Beispiel #1
0
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name="x-input")
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name="y-input")
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))

    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               mnist.train.num_examples/BATCH_SIZE,
                                               LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate)\
        .minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, setp = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
            if i % 1000 == 0:
                print("%d 训练后,损失值为 %g" % (i, loss_value))
                saver.save(sess, MODEL_SAVE_PATH, global_step=global_step)
def build_model(x, y_, is_chief):
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    # 通过和5.5节给出的mnist_inference.py代码计算神经网络前向传播的结果。
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.contrib.framework.get_or_create_global_step()

    # 计算损失函数并定义反向传播过程。
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        60000 / BATCH_SIZE,
        LEARNING_RATE_DECAY)
    
    train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    # 定义每一轮迭代需要运行的操作。
    if is_chief:
        # 计算变量的滑动平均值。   
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY, global_step)
        variables_averages_op = variable_averages.apply(
            tf.trainable_variables())
        with tf.control_dependencies([variables_averages_op, train_op]):
            train_op = tf.no_op()
    return global_step, loss, train_op
def get_loss(x, y_, regularizer, scope, reuse_variables=None):
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        y = mnist_inference.inference(x, regularizer)
    cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_))
    regularization_loss = tf.add_n(tf.get_collection('losses', scope))
    loss = cross_entropy + regularization_loss
    return loss
def build_model(x, y_, n_workers, is_chief):
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, 60000 / BATCH_SIZE, LEARNING_RATE_DECAY)
   
    # 通过tf.train.SyncReplicasOptimizer函数实现同步更新。
    opt = tf.train.SyncReplicasOptimizer(
        tf.train.GradientDescentOptimizer(learning_rate),
        replicas_to_aggregate=n_workers,
        total_num_replicas=n_workers)

    train_op = opt.minimize(loss, global_step=global_step)     
    if is_chief:
        variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
        variables_averages_op = variable_averages.apply(tf.trainable_variables())
        with tf.control_dependencies([variables_averages_op, train_op]):
            train_op = tf.no_op()

    return global_step, loss, train_op, opt
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        xs = mnist.validation.images
        num = len(mnist.validation.images)
        # x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
        x = tf.placeholder(tf.float32, [num, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS], name='x-input')
        y_ = tf.placeholder(tf.float32, [num, mnist_inference.OUTPUT_NODE], name='y-input')
        reshaped_xs = np.reshape(xs, (num, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
        validate_feed = {x: reshaped_xs, y_: mnist.validation.labels}

        y = mnist_inference.inference(x, False, None)
        # 计算正确率
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 通过变量重命名的方式来加载数据
        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variable_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variable_to_restore)

        # while True:
        with tf.Session() as sess:
            # tf.train.get_checkpoint_state 通过checkpoint文件自动找到目录中最新模型的文件名
            ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                # 加载模型
                saver.restore(sess, ckpt.model_checkpoint_path)
                # 通过文件名得到模型保存时迭代的轮数
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                print("After %s training step(s), validateion accuracy = %g " % (global_step, accuracy_score))
            else:
                print("No checkpoint file found")
                return
Beispiel #6
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}

        y = mnist_inference.inference(x, None)
        # 分类,预测结果
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        while True:
            with tf.Session() as sess:
                # 加载模型
                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 通过文件名获得迭代轮数
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                    print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
                else:
                    print('No checkpoint file found')
                    return
            time.sleep(EVAL_INTERVAL_SECS)
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # 滑动平均操作
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    # 损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples/BATCH_SIZE, LEARNING_RATE_DECAY)
    # 训练过程
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')
    
    # 初始化TF 持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.initialize_all_variables().run()

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
            if i % 1000 == 0:
                print("After %d training step(s), loss on training "
                    "batch is %g." % (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        
        # 定义输入输出的格式:
        x = tf.placeholder(tf.float32, 
                           [mnist.validation.num_examples,
                            mnist_inference.IMAGE_SIZE,
                            mnist_inference.IMAGE_SIZE,
                            mnist_inference.NUM_CHANNELS
                            ],
                            name = 'x_input')
        y_ = tf.placeholder(tf.float32, 
                            [None, mnist_inference.OUTPUT_NODE],
                            name = 'y-input')
        
        validate_feed = {x: np.reshape(mnist.validation.images, 
                                       mnist.validation.num_examples,
                                       mnist_inference.IMAGE_SIZE,
                                       mnist_inference.IMAGE_SIZE,
                                       mnist_inference.NUM_CHANNELS),
                         y_: mnist.validation.labels}
        
        # 直接通过调用分装好的函数来计算前向传播的结果.
        # 因为测试时不关注正则损失的值, 所以这里用与计算正则化损失的函数被设置为None
        y = mnist_inference.inference(x, False, None)
        
        # 使用前向传播的结果计算正确率
        # 如果需要对位置的样例进行分类, 那么使用tf.argmax(y,1)就可以得到
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        
        # 通过变量重命名的方式来加载模型, 这样在前向传播的过程中就不需要
        # 调用求滑动平均的函数来获取平均值了. 这样就可以完全公用mnist_inference.py
        # 中定义的前向传播过程
        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variable_to_restore = variale_averages.variables_to_restore()
        saver = tf.train.Saver(variable_to_restore)
        
        
        # 每隔EVAL_INTERVALSECS秒调用一次计算正确率的过程以检测训练过程中正确率的变化
        while True:
            with tf.Session() as sess:
                # tf.train.get_checkpoint_state函数会通过checkpoint文件自动找到
                # 目录中最新模型的文件名
                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    # load model
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 通过文件名得到模型保存时迭代的轮数`
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy,feed_dict=validate_feed)
                    print("After %s training step(s), validation accuracy = %f" %
                             (global_step, accuracy_score))
                else:
                    print("No checkpoint file found")
                    return
                time.sleep(EVAL_INTERVAL_SECS)
Beispiel #9
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(
            tf.float32,
            [
                mnist.validation.num_examples,
                mnist_inference.IMAGE_SIZE,
                mnist_inference.IMAGE_SIZE,
                mnist_inference.NUM_CHANNELS
            ],
            name='x'
        )
        y_ = tf.placeholder(
            tf.float32,
            [None, mnist_inference.OUTPUT_NODE],
            name='y'
        )

        reshaped_x = np.reshape(
            mnist.validation.images, 
            [
                mnist.validation.num_examples,
                mnist_inference.IMAGE_SIZE,
                mnist_inference.IMAGE_SIZE,
                mnist_inference.NUM_CHANNELS
            ])
        validate_feed = {
            x:reshaped_x,
            y_:mnist.validation.labels
        }

        y = mnist_inference.inference(x, None, None)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY
        )
        variable_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variable_to_restore)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(
                mnist_train.MODEL_SAVE_PATH
            )
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                print('After %s training step, validation accuracy is %g' %(global_step, accuracy_score))
            else:
                print('No checkpoint file found')
                return
Beispiel #10
0
def train(mnist): 
#输入层和数据label 
#    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
    x = tf.placeholder(tf.float32, [None, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS], name='x-input')  
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
 
#前向传播结果y
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  
    y = mnist_inference.inference(x, 1 ,regularizer)
    global_step = tf.Variable(0, trainable=False)  
 
#滑动平均模型
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)  
    variables_averages_op = variable_averages.apply(tf.trainable_variables())   
 
#计算交叉熵,并加入正则-->损失函数loss
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)   
#    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) 
    loss = cross_entropy_mean 
#学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)  
#train_step 梯度下降(学习率,损失函数,全局步数)  
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)  
#运算图控制,用train_op作集合  
    with tf.control_dependencies([train_step, variables_averages_op]):  
        train_op = tf.no_op(name='train')  

#持久化
    saver = tf.train.Saver()
#tf.equal(A, B)是对比这两个矩阵或者向量的相等的元素,如果是相等的那就返回True,反正返回False
#
    correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
    accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    
    with tf.Session() as sess:  
        tf.initialize_all_variables().run() 
        x1=mnist.validation.images
        x1=np.reshape(x1, (-1,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS))
        value_feed={x:x1,y_:mnist.validation.labels}
        
        for i in range(TRAINING_STEPS): 
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(xs, (BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))
#             reshaped_xs = tf.reshape(xs, [BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS])
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:reshaped_xs, y_:ys})
            validate_acc=sess.run(accuracy,feed_dict=value_feed)
            print("After %d train steps, validate accuracy using average model is %g"%(i,validate_acc))   
            print("After %d training step(s), loss on training batch is %g " %(step, loss_value)) 
            if i%200 == 0:
                #将y加入集合,方便加载模型时调用,必须放在这里才能保存最后的y值
                tf.add_to_collection("pred_network", y)
                saver.save(sess, "./model/model.ckpt")
Beispiel #11
0
def get_loss(x, y_, regularizer, scope, reuse_variables=None):
    # 沿用5.5节中定义的函数来计算神经网络的前向传播结果。
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        y = mnist_inference.inference(x, regularizer)
    # 计算交叉熵损失。
    cross_entropy = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=y_))
    # 计算当前GPU上计算得到的正则化损失。
    regularization_loss = tf.add_n(tf.get_collection('losses', scope))
    # 计算最终的总损失。
    loss = cross_entropy + regularization_loss
    return loss
def get_loss(x, y_, regularizer, scope, reuse_variables=None):
    # 沿用5.5节中定义的函数来计算神经网络的前向传播结果。
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        y = mnist_inference.inference(x, regularizer)
    # 计算交叉熵损失。
    cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=y_))
    # 计算当前GPU上计算得到的正则化损失。
    regularization_loss = tf.add_n(tf.get_collection('losses', scope))
    # 计算最终的总损失。
    loss = cross_entropy + regularization_loss
    return loss
Beispiel #13
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        # 定义输出的格式
        x = tf.placeholder(
            tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input'
        )
        y_ = tf.placeholder(
            tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input'
        )
        validate_feed = {x: mnist.validation.images,
                        y_: mnist.validation.labels}

        # 直接通过封装好的函数来计算前向传播的结果. 因为测试时不关注正则化损失的值,
        # 所以这里用于计算正则化损失的函数被设置为None
        y = mnist_inference.inference(x, None)

        # 使用前向传播的结果计算正确率. 如果需要对未知的样例进行分类, 那么使用
        #tf.argmax(y, 1)就可以得到输入样例的预测类别了
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 通过变量重命名的方式来家在模型, 这样在前向传播的过程中就不需要调用滑动平均值函数
        # 来获取平均值了, 这样就可以完全共用mnist_inference.py中定义的前向传播过程.
        variable_averages = tf.train.ExponentialMovingAverage(
                        mnist_train.MOVING_AVERAGE_DECAY
        )
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # 每隔 EVAL_INTERVAL_SEC 秒调用一次计算正确率的过程以检测训练过程中的正确率的变化.
        while True:
            with tf.Session() as sess:
                # tf.train.get_checkpoint_state 函数会通过 checkpoint 文件自动
                # 找到目录中的最新模型文件名
                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)

                if ckpt and ckpt.model_checkpoint_path:
                    # 加载模型
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 通过文件名得到模型保存时的迭代轮数
                    global_step = ckpt.model_checkpoint_path.split('/')[-1]\
                                .split('-')[-1]
                    accuracy_score = sess.run(accuracy,
                                feed_dict=validate_feed
                    )
                    print("After {} training steps, validation"
                            "accuracy = {}".format(global_step, accuracy_score)
                    )
                else:
                    print("No checkpoint file found")
                    return

            time.sleep(EVAL_INTERVAL_SECS)
Beispiel #14
0
def train(mnist):
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                            name='y-input')
    regularizer = tf.contrib.layers.l2_regularizer(REGULATION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # moving average
    with tf.name_scope('moving_average'):
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY, global_step)
        variable_averages_op = variable_averages.apply(
            tf.trainable_variables())

    # loss function
    with tf.name_scope('loss_function'):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y, labels=tf.argmax(y_, 1))
        cross_entropy_mean = tf.reduce_mean(cross_entropy)
        loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    # train step
    with tf.name_scope('train_step'):
        learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                                   global_step,
                                                   mnist.train.num_examples /
                                                   BATCH_SIZE,
                                                   LEARNING_RATE_DECAY,
                                                   staircase=True)
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
            loss, global_step=global_step)
        with tf.control_dependencies([train_step, variable_averages_op]):
            train_op = tf.no_op(name='train')

    # train model
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })
            if i % 1000 == 0:
                print(
                    "After %d training step, loss on training batch is %g. " %
                    (i, loss_value))
        final_result = sess.run(y, feed_dict={x: mnist.test.images})
    return final_result
Beispiel #15
0
def train(mnist):
    # 定义输入输出placeholder
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                        name='y-input')
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)

    # 直接使用 mnist_inference.py 中定义的前向传播过程
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # 定义滑动平均操作、学习率、损失函数和训练过程
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.arg_max(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化 Tensorflow 持久化层
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        # 训练过程不再使用验证数据
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })

            # 每1000轮保存一次模型
            if i % 1000 == 0:
                # 输出当前的训练情况
                print(
                    "After %d training step(s), loss on training batch is %g."
                    % (step, loss_value))

                # 保存当前的模型
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
def train(mnist):
    # 定义输入输出 placeholder
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                        name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    # 直接使用mnist_inference.py 中定义的前向传播过程
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # 和5.2.1小节样例中类似地定义损失函数、学习率、晃动平均操作及训练过程
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=tf.argmax(y_, 1), logits=y)  # 特别注意 labels=tf.argmax(y_,1)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        # decay_steps 完整使用一遍训练数据所需要迭代的轮数
        # 总训练样本数除以每一个batch中的训练样本数
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=False)
    train_step = tf.train.GradientDescentOptimizer(learning_rate)\
                 .minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')
    # 初始化 TensorFlow 持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.initialize_all_variables().run()

        # 在训练过程中不在测试模型在验证数据上的表现,验证和测试的过程将会有
        # 一个独立的程序完成
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })
            # 每1000轮保存一次模型
            if i % 1000 == 0:
                print("After %d train step(s), loss on training "
                      "batch is %g." % (step, loss_value))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
Beispiel #17
0
def train(mnist):
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE,                             
        mnist_inference.IMAGE_SIZE,            
        mnist_inference.IMAGE_SIZE,
        mnist_inference.NUM_CHANNELS],          
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    y = mnist_inference.inference(x, True, regularizer)
    global_step = tf.Variable(0, trainable=False)
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples/BATCH_SIZE, LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()
    
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(xs, (BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})
            if i%100 == 0:
                with tf.variable_scope('layer6-fc2', reuse=True):
                    fc2_weights = tf.get_variable("weight")
                y_max = tf.argmax(y,1)
                ys_max = tf.argmax(ys,1)
                data_dict = {x: reshaped_xs, y_: ys}
                w_store = sess.run(fc2_weights)
                print('y :',sess.run(y , feed_dict=data_dict))
                print('y :',sess.run(y , feed_dict=data_dict))  
                #-----------Why are the results different?-----------
                print('y_max :', sess.run(y_max, feed_dict=data_dict))
                print('y_max :', sess.run(y_max, feed_dict=data_dict))
                w_store2 = sess.run(fc2_weights)
                print('w is same:', np.array(w_store==w_store2).all())
                #--------------parameters doesn't change--------------
                correct_prediction = tf.equal(y_max, ys_max)
                print("After %d training step(s), loss on training batch is %f." % (i, loss_value))
                print('CP:',sess.run(correct_prediction,feed_dict=data_dict))
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
                train_accuracy = sess.run(accuracy,feed_dict=data_dict)
                print("step %d, training accuracy %g"%(i, train_accuracy))
                break #For test, I only need run this code once.
        saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def train(mnist):
    x = tf.placeholder(
        tf.float32, [
        BATCH_SIZE,
        mnist_inference.IMAGE_SIZE,
        mnist_inference.IMAGE_SIZE,
        mnist_inference.NUM_CHANNELS],
        name='x_input')

    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y_input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)

    y = mnist_inference.inference(x, True, regularizer)
    global_step = tf.Variable(0, trainable=False)

    variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_average_op = variable_average.apply(tf.trainable_variables())

    # function sparse_softmax_cross_entropy_with_logits(),first parameter is labels, second parameter is logits
    # different from the function in book
    # -------------------------------------------------------------------------------------
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(y_, 1), logits=y)
    # -------------------------------------------------------------------------------------
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_average_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(xs, [BATCH_SIZE,
                                            mnist_inference.IMAGE_SIZE,
                                            mnist_inference.IMAGE_SIZE,
                                            mnist_inference.NUM_CHANNELS])

            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})

            if i % 10 == 0:
                print("After %d training steps, loss on training batch is %g." % (step, loss_value))

                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step)
Beispiel #19
0
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name="x-input")
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name="y-input")
    
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = mnist_inference.inference(x, regularizer)
    
    global_step = tf.Variable(0, trainable=False)
    
    variables_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    
    variables_averages_op = variables_averages.apply(tf.trainable_variables())
    
#    average_y = inference(x, variables_averages, reuse=True)
    
    # argmax: Returns the index with the largest value across axes of a tensor. 
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    
    loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))
    
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, 
        global_step, 
        mnist.train.num_examples / BATCH_SIZE, 
        LEARNING_RATE_DECAY)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).\
                 minimize(loss, global_step=global_step)

    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name="train")

    saver = tf.train.Saver()

#    correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))
#
#    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})

            if i % 1000 == 0:
                print("After %d training step(s), loss on training "
                     "batch is %g" % (step, loss_value))

                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
Beispiel #20
0
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                        name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = mnist_inference.inference(x, regularizer)

    global_step = tf.Variable(0, trainable=False)

    #loss function
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    #learning rate
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)

    #Exponential moving average
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    #Train step
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.initialize_all_variables().run()

        for i in range(TRAINING_STEP):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })

            if i % 1000 == 0:
                print(
                    "After %d training step(s), loss on training batch is %g."
                    % (step, loss_value))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
Beispiel #21
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        # define input and output format
        x = tf.placeholder(tf.float32, [
            mnist.validation.num_examples, mnist_inference.IMAGE_SIZE,
            mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS
        ],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                            name='y-input')
        reshaped_xs = np.reshape(
            mnist.validation.images,
            (mnist.validation.num_examples, mnist_inference.IMAGE_SIZE,
             mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))
        validate_feed = {x: reshaped_xs, y_: mnist.validation.labels}

        # calculator forward propagation result without regularization loss.
        # when we test the model, we needn't regularization value.
        y = mnist_inference.inference(x, False, None)

        # calculator correct rate. use tf.argmax(y, 1) to get classification.
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # load model by renaming variables
        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # get accuracy every EVAL_INTERVAL_SECS
        while True:
            with tf.Session() as sess:
                # tf.train.get_checkpoint_state find the latest file
                ckpt = tf.train.get_checkpoint_state(
                    mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    # load model
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # get round number by files
                    global_step = ckpt.model_checkpoint_path\
                                      .split("/")[-1].split("-")[-1]
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print(
                        "After %s training step(s), validation "
                        "accuracy = %g" % (global_step, accuracy_score))
                else:
                    print 'No checkpoint file found.'
                    return

            time.sleep(EVAL_INTERVAL_SECS)
Beispiel #22
0
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)

    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # 初始化滑动平均类
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)

    # 在所有代表神经网络参数的辩论上使用滑动平均
    # tf.trainable_variables()返回所有没有指定 trainable=False的参数
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    # 交叉熵
    # 当分类只有一个正确结果的时候,使用tf.nn.sparse_softmax_cross_entropy_with_logits()
    # 第一个参数为不包含softmax的前向传播结果
    # 第二个参数是训练数据的正确答案
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))

    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    # 总损失 交叉熵损失 + 正则化损失和
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    # tf.train.exponential_decay 学习率指数衰减法
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,  # 基础学习率
                                               global_step,  # 当前迭代轮数
                                               mnist.train.num_examples / BATCH_SIZE,  # 总共需要的次数
                                               LEARNING_RATE_DECAY)  # 学习率衰减速度

    # 梯度下降
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

    # 每过一遍数据,需要反向传播跟新参数,又要跟新每一个参数的滑动平均,
    # train_step 反向传播更新参数
    # variable_averages_op 跟新参数的滑动平均
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
            if i % 1000 == 0:
                print('After %d training step(s), loss on training batch is %g' % (step, loss_value))
                # saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
                saver.save(sess, './model/model.ckpt', global_step=global_step)
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                        name='y-input')

    # init and use regularizer function
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # init the moving average class
    variable_avg = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,
                                                     global_step)
    variable_avg_op = variable_avg.apply(tf.trainable_variables())

    # calculate the cross entropy of forecast (y) and actual (y_)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    # define learning rate and train step
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step,
                                               mnist.train.num_examples,
                                               LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate)\
        .minimize(loss, global_step=global_step)
    # update the params and avg value in the same time
    with tf.control_dependencies([train_step, variable_avg_op]):
        train_op = tf.no_op(name='train')

    # init tensorflow model saver
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })

            # save model every 1000 times
            if i % 1000 == 0:
                print('after %d training steps, loss value is %g ' %
                      (step, loss_value))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
def train():
    x = tf.placeholder(
        tf.float32, [None, mnist_inference.Input_Node],
        name='x_input'
    )
    y_ = tf.placeholder(
        tf.float32, [None, mnist_inference.Output_Node],
        name='y_input'
    )
    regularizer = contrib.layers.l2_regularizer(Regularation_Rate)
    y = mnist_inference.inference(x, regularizer)

    global_step = tf.Variable(0, trainable=False)
    varible_average = tf.train.ExponentialMovingAverage(Moving_Average_Decay)
    varible_average_op = varible_average.apply(tf.trainable_variables())

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(
        y_*tf.log(tf.clip_by_value(y, 1e-10, 1.0)), reduction_indices=[1]
    ))
    loss = cross_entropy + tf.add_n(tf.get_collection('losses'))

    Learning_Rate = tf.train.exponential_decay(
        Learning_Rate_Base,
        global_step =global_step,
        decay_steps = train_size,
        decay_rate = Learning_Rate_Decay
    )
    train_step = tf.train.GradientDescentOptimizer(Learning_Rate)\
                 .minimize(loss, global_step=global_step)

    train_op = tf.group(train_step, varible_average_op)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(100):
            index = int(i%train_size)
            label,data=readData('trainColloction/train'+str(index)+'.txt',3,588)
            _, loss_value, step = sess.run([train_op, loss, global_step],
            feed_dict={x: data, y_: label[:,0:-1]})
            if(i % 1000 ==0):
                print("After %d training steps, loss on training"
                "batch is %g"%(step, loss_value))
                saver.save(
                    sess,os.path.join(Model_Save_Path, Model_Name)
                )
        probability = 0.0
        for i in range(10):
            labelT,dataT=readData('testColloction/test'+str(i)+'.txt',3,588)
            correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            probability=probability+accuracy.eval({x:dataT,y_:labelT[:,0:-1]})
        print(probability/test_size)
Beispiel #25
0
def train(mnist):
    # 定义输入输出
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODES],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODES],
                        name='y-input')

    # 定义正则化
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    # 使用mnist_inference定义前向传播
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)
    # 定义滑动平均值
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    # 定义交叉熵
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    # 定义损失函数
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    # 定义学习率
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)
    # 训练过程
    train_step = tf.train.GradientDescentOptimizer(learning_rate)\
        .minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name="train")

    # 初始TensorFlow的持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })
            # 每隔1000次保存一次模型
            if i % 1000 == 0:
                print(
                    'After %d training step(s), loss on training batch is %g.'
                    % (step, loss_value))
                saver.save(sess,
                           os.path.join(MODLE_SAVE_PATH, MODLE_NAME),
                           global_step=global_step)
Beispiel #26
0
def train(mnist):
    x=tf.placeholder(tf.float32,[None,mnist_inference.INPUT_NODE],name="x-input")
    #采用LeNet时的输入变量格式,为一个四维矩阵
    '''
    x=tf.placeholder(tf.float32,[BATCH_SIZE, #一个batch中样例的个数
                                 mnist_inference.IMAGE_SIZE,
                                 mnist_inference.IMAGE_SIZE,
                                 mnist_inference.NUM_CHANNELS], #图片的深度
                     name="x-input")
    '''
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name="y-input")
    regularizer=tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    #前向传播
    y=mnist_inference.inference(x,regularizer)
    global_step=tf.Variable(0,trainable=False)
    #滑动平均模型
    variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    variable_averages_op=variable_averages.apply(tf.trainable_variables())
    #softmax + 交叉熵损失
    cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
    cross_entropy_mean=tf.reduce_mean(cross_entropy)
    loss=cross_entropy_mean+tf.add_n(tf.get_collection("losses"))
    #指数衰减的学习率
    learning_rate=tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)
    #梯度下降优化方法
    train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
    with tf.control_dependencies([train_step,variable_averages_op]):
        train_op=tf.no_op(name="train")

    #初始化TensorFlow持久化类
    saver=tf.train.Saver()
    #定义默认会话,并对所有变量进行初始化
    sess=tf.InteractiveSession()
    tf.global_variables_initializer().run()

    #在训练过程中不再测试模型在验证集上的表现
    for i in range(TRAINING_STEPS):
        #每次取batch_size个样本进行训练
        xs,ys=mnist.train.next_batch(BATCH_SIZE)
        # LeNet中需要将输入数据调整为四维矩阵才能传入sess.run()
        '''
        xs=np.reshape(xs,(BATCH_SIZE, #一个batch中样例的个数
                                 mnist_inference.IMAGE_SIZE,
                                 mnist_inference.IMAGE_SIZE,
                                 mnist_inference.NUM_CHANNELS)) #图片的深度
        '''
        train_op_,loss_value,step=sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
        #每1000轮保存一次
        if i%1000==0:
            print("After %d training steps, loss on training batch is %g."% (step,loss_value))
            saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step)
Beispiel #27
0
def train(mnist):
    #initialize input & label
	x = tf.placeholder(
		tf.float32, [None, mnist_inference.input_node], name='x-input')
	y_ = tf.placeholder(
		tf.float32, [None, mnist_inference.output_node], name='y-input')
    
	#define l2 regularizer
	regularizer = tf.contrib.layers.l2_regularizer(regularztion_rate)

    #compute output 
	y = mnist_inference.inference(x, regularizer)
	global_step = tf.Variable(0, trainable=False)
    #compute average of parameter
	variable_averages = tf.train.ExponentialMovingAverage(
		moving_average_decay, global_step)
	variables_averages_op = variable_averages.apply(
		tf.trainable_variables())
	#compute cross_entropy
	cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
		logits=y, labels=tf.argmax(y_, 1))
	cross_entropy_mean = tf.reduce_mean(cross_entropy)
	#compute loss
	loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
	#adjust learning_rate
	learning_rate = tf.train.exponential_decay(
		learning_rate_base, 
		global_step, 
		mnist.train.num_examples / batch_size, 
		learning_rate_decay)
	train_step = tf.train.GradientDescentOptimizer(learning_rate)\
					.minimize(loss, global_step=global_step)
	with tf.control_dependencies([train_step, variables_averages_op]):
		train_op = tf.no_op(name='train')

	#initialize persist class
	saver = tf.train.Saver()
	with tf.Session() as sess:
		tf.global_variables_initializer().run()
		#tf.initialize_all_variables().run()

		for i in range(training_steps):
			xs, ys = mnist.train.next_batch(batch_size)
			_, loss_value, step =sess.run([train_op, loss, global_step],
										   feed_dict={x: xs, y_:ys})
			if i % 1000 == 0:
				print("After %d training step(s), loss on training "
				      "batch is %g." % (step, loss_value))
				saver.save(
					sess, os.path.join(model_save_path, model_name),
					global_step=global_step)
Beispiel #28
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                            name='y-input')

        # 验证内容
        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels
        }

        # 直接获取到运算结果,测试时不关注正则化内容
        y = mnist_inference.inference(x, None)

        # 是否正确预测
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))

        # 得分
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 通过变量重命名的方式来加载模型,这样在前向传播的过程中就不需要调用求滑动平均的函数来获取平均值了。
        # 这样就可以完全共用mnist_inference.py中的前向传播过程
        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)

        variables_to_restore = variable_averages.variables_to_restore()

        saver = tf.train.Saver(variables_to_restore)

        # 每10S计算一次正确率
        while True:
            with tf.Session() as sess:
                # get_checkpoint_state()函数会自动扫描checkpoint文件来自动找到目录中最新的模型文件名
                ckpt = tf.train.get_checkpoint_state(
                    mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    # 加载模型
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 通过文件名来获取到训练的轮数
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print("After %s training step, validation accuracy = %g" %
                          (global_step, accuracy_score))
                else:
                    print("No checkpoint file found!")
                    return
            time.sleep(EVAL_INTERVAL_SECS)
Beispiel #29
0
def train(mnist):
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE,
        mnist_inference.NUM_CHANNELS
    ],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                        name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = mnist_inference.inference(x, False, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # 损失函数、学习率、滑动平均、训练过程
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=tf.arg_max(y_, 1), logits=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LRARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LRARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化TensorFlow持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINNING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(
                xs, (BATCH_SIZE, mnist_inference.IMAGE_SIZE,
                     mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: reshaped_xs,
                                               y_: ys
                                           })
            if i % 500 == 0:
                print(
                    'After %d training step(s), loss on training batch is %g' %
                    (step, loss_value))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
def evaluate(mnist):
    with tf.Graph().as_default() as g:  #将默认图设为g
        #定义输入输出的格式
        Validate_SIZE = mnist.validation.num_examples
        x = tf.placeholder(tf.float32, [Validate_SIZE, mnist_inference.IMAGE_SIZE,
                                        mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
        xs = mnist.validation.images
        # 将输入的测试数据调整为一个三维矩阵
        reshaped_xs = np.reshape(xs, (Validate_SIZE, mnist_inference.IMAGE_SIZE,
                                      mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))
        validate_feed = {x: reshaped_xs, y_: mnist.validation.labels}

        #直接通过调用封装好的函数来计算前向传播的结果
        #测试时不关注过拟合问题,所以正则化输入为None
        y = mnist_inference.inference(x, None, None)

        #使用前向传播的结果计算正确率,如果需要对未知的样例进行分类
        #使用tf.argmax(y, 1)就可以得到输入样例的预测类别
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        # 首先将一个布尔型的数组转换为实数,然后计算平均值
        # 平均值就是网络在这一组数据上的正确率
        #True为1,False为0
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        #通过变量重命名的方式来加载模型
        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variable_to_restore = variable_averages.variables_to_restore()
        # 所有滑动平均的值组成的字典,处在/ExponentialMovingAverage下的值
        # 为了方便加载时重命名滑动平均量,tf.train.ExponentialMovingAverage类
        # 提供了variables_to_store函数来生成tf.train.Saver类所需要的变量
        saver = tf.train.Saver(variable_to_restore) #这些值要从模型中提取


        with tf.Session() as sess:
            #tf.train.get_checkpoint_state函数
            # 会通过checkpoint文件自动找到目录中最新模型的文件名
            ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                #加载模型
                saver.restore(sess, ckpt.model_checkpoint_path)
                #得到所有的滑动平均值
                #通过文件名得到模型保存时迭代的轮数
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                accuracy_score = sess.run(accuracy, feed_dict=validate_feed) #使用此模型检验
                #没有初始化滑动平均值,只是调用模型的值,inference只是提供了一个变量的接口,完全没有赋值
                print("After %s training steps, validation accuracy = %g"
                      %(global_step, accuracy_score))
            else:
                print("No checkpoint file found")
                return
Beispiel #31
0
def train(mnist):
    # 784个输入节点
    x = tf.placeholder(dtype=tf.float32,
                       shape=(None, mnist_inference.INPUT_NODE),
                       name='x-input')
    # 10个输出节点
    y_ = tf.placeholder(dtype=tf.float32,
                        shape=(None, mnist_inference.OUTPUT_NODE),
                        name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # 给定滑动平均衰减率和训练轮数变量,初始化滑动平均类
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    # 在所有代表神经网络参数的变量上使用滑动平均。
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    #计算在当前batch中所有样例的交叉熵平均值
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    #设置指数衰减的学习率
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    #初始化TensorFlow持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            a, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })
            if i % 1000 == 0:
                print('After %d training steps, loss on training batch is %g' %
                      (step, loss_value))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
Beispiel #32
0
def train(mnist):
  # Input and label placeholders; 'None' means input size is not specified.
  x = tf.placeholder(
    tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
  y_ = tf.placeholder(
    tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

  # Create network (add regularizer to losees).
  regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
  y = mnist_inference.inference(x, regularizer)
  global_step = tf.Variable(0, trainable=False)

  variable_averages = tf.train.ExponentialMovingAverage(
    MOVING_AVERAGE_DECAY,
    global_step)
  variables_averages_op = variable_averages.apply(tf.trainable_variables())

  # Use 'sparse_softmax_cross_entropy_with_logits' to calcute lose. 'logits' is
  # the raw output and 'labels' is the correct answers.
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
    logits=y, labels=tf.argmax(y_, 1))
  cross_entropy_mean = tf.reduce_mean(cross_entropy)
  # 'add_n' adds all tensors in 'losses' element wise.
  loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

  # Applies exponential decay to the learning rate.
  learning_rate = tf.train.exponential_decay(
    LEARNING_RATE_BASE,
    global_step,
    mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,
    staircase=True)
  train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
    loss, global_step=global_step)

  # Make sure both 'train_step' and 'variables_averages_op' are completed.
  with tf.control_dependencies([train_step, variables_averages_op]):
    train_op = tf.no_op(name='train')

  saver = tf.train.Saver()
  with tf.Session() as sess:
    tf.global_variables_initializer().run()
    # Start training loop; save model every 1000 steps.
    for i in range(TRAINING_STEPS):
      xs, ys = mnist.train.next_batch(BATCH_SIZE)
      _, loss_value, step = sess.run([train_op, loss, global_step],
                                     feed_dict={x: xs, y_: ys})
      if i % 1000 == 0:
        print("After %d training step(s), loss on training batch is %g." %
              (step, loss_value))
        saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                   global_step=global_step)
Beispiel #33
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        # 定义输入输出的格式。
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                            name='y-input')
        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels
        }

        # 直接通过调用封装好的函数来计算前向传播的结果。因为测试时不关注ze正则化损失的值
        # 所以这里用于计算正则化损失的函数被设置为None。
        y = mnist_inference.inference(x, None)

        # 使用前向传播的结果计算正确率。如果需要对未知的样例进行分类,那么使用
        # tf.argmax(y,1)就可以得到输入样例的预测类别了。
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 通过变量重命名的方式来加载模型,这样在前向传播的过程中就不需要调用求滑动平均
        # 的函数来获取平均值了。这样就可以完全共用mnist_inference.py中定义的
        # 前向传播过程。
        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # 每隔EVAL_INTERVAL_SECS秒调用一次计算正确率的过程以检验训练过程中正确率的
        # 变化。
        while True:
            with tf.Session() as sess:
                # tf.train.get_checkpoint_state函数会通过checkpoint文件自动
                # 找到目录中最新模型的文件名。
                ckpt = tf.train.get_checkpoint_state(
                    mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    # 加载模型。
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 通过文件名得到模型保存时迭代的轮数。
                    global_step = ckpt.model_checkpoint_path\
                                      .split('/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print("After %s training step(s), validation "
                          "accuracy = %g" % (global_step, accuracy_score))
                else:
                    print("No checkpoint file found")
                    return
            time.sleep(EVAL_INTERVAL_SECS)
Beispiel #34
0
def evaluate(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                        name='y-input')
    y = mnist_inference.inference(x, None)
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, './model/model.ckpt')
        test_feed = {x: mnist.test.images, y_: mnist.test.labels}
        test_acc = sess.run(accuracy, feed_dict=test_feed)
        print('Test accuracy:', test_acc)
def test(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [
            mnist.validation.num_examples, mnist_inference.IMAGE_SIZE,
            mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS
        ],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                            name='y-input')

        # prepare validation feed
        reshaped_xs = np.reshape(
            mnist.validation.images,
            (mnist.validation.num_examples, mnist_inference.IMAGE_SIZE,
             mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))
        validate_feed = {x: reshaped_xs, y_: mnist.validation.labels}

        # calculate the result
        y = mnist_inference.inference(x, False, None)

        # calcuate the accuracy
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # init the moving average class
        variable_avg = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_avg.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # test model every x seconds
        while True:
            with tf.Session() as sess:
                # find the latest model in the path
                ckpt = tf.train.get_checkpoint_state(
                    mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)

                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print(
                        'after %s training steps, validation accuracy is %g ' %
                        (global_step, accuracy_score))
                else:
                    print('no checkpoint file found')
                    return
            time.sleep(TEST_INTERVAL_SECS)
Beispiel #36
0
def train(x_train, y_train):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                        name='y-input')

    y = mnist_inference.inference(x, REGULARIZATION_RATE)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               len(y_train) / BATCH_SIZE,
                                               LEARNING_RATE_DECAY,
                                               staircase=True)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化TensorFlow持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            Iteration_data = int(len(y_train) /
                                 BATCH_SIZE)  # 自己编写的getBatch函数,确定batchs的个数
            xs, ys = get_batch(x_train, y_train, BATCH_SIZE,
                               i % Iteration_data, Iteration_data)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })

            # 每1000轮保存一次结果
            if i % 1000 == 0:
                #输出当前的训练情况。这里输出的是损失函数。通过损失函数的大小可以大概了解训练的情况
                print(
                    "After %d training step(s), loss on training batch is %g."
                    % (step, loss_value))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
Beispiel #37
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                            name='y-input')
        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels
        }

        #直接通过调用封装好的函数计算前向传播的结果,因为测试时不关心正则化损失的值
        #所以这里用于计算正则化损失的函数被设置为 None
        y = mnist_inference.inference(x, None)

        correction_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correction_prediction, tf.float32))

        #通过变量重命名的方式来加载模型,这样在前向传播的过程中就不需要调用 滑动平均函数来
        #获取平均值了,这样就可以完全共用mnsit_inference.py中定义的前向传播过程
        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)
        #{'v/ExponentialMovingAverage': <tf.Variable 'v:0' shape=() dtype=float32_ref>}
        varialbles_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(varialbles_to_restore)

        #每个EVAL_INTERVAL_SECS 秒调用一次计算正确率的过程以检查训练过程中正确率的
        #变化
        while True:
            with tf.Session() as sess:
                #tf.train.get_checkpoint_state函数会通过checkpoint文件自动
                #找到目录中最新模型的文件名
                ckpt = tf.train.get_checkpoint_state(
                    mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    #加载模型
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    #通过文件名得到模型保存时迭代的轮数
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print(
                        "After %s training step(s),validation accuracy = %g" %
                        (global_step, accuracy_score))

                else:
                    print('No check point file found')
                    return
                time.sleep(EVAL_INTERVAL_SECS)
Beispiel #38
0
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                        name='y-input')
    #to avoid overfitting
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)

    y = mnist_inference.inference(x, regularizer)
    #identifies how many steps the model has been trained
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)

    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(y_, 1),
                                                       logits=y))

    loss = cross_entropy + tf.add_n(tf.get_collection('losses'))
    learing_rate = tf.train.exponential_decay(
        LEARNING_RATE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)

    train_step = tf.train.GradientDescentOptimizer(learing_rate).minimize(
        loss, global_step=global_step)
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(TRAINING_STEPS):
            batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: batch_xs,
                                               y_: batch_ys
                                           })

            if i % 2000 == 0:
                print(
                    "After %d training steps, loss on training batch is %g." %
                    (i, loss_value))
                saver.save(sess,
                           MODEL_SAVE_PATH + MODEL_NAME,
                           global_step=global_step)
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_inference.input_node],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.output_node],
                            name='y-input')

        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels
        }

        y = mnist_inference.inference(x, None)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.moving_average_decay)
        #生成variable_averages的应用变量(具体看mnist_train里的variable_averages.apply()函数)重命名
        variable_to_restore = variable_averages.variables_to_restore()
        #print(variable_to_restore)
        saver = tf.train.Saver(variable_to_restore)  #只有模型的平均滑动变量被加载进来
        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state('Model/')
            #print(ckpt)
            if ckpt and ckpt.all_model_checkpoint_paths:
                #加载模型
                #这一部分是有多个模型文件时,对所有模型进行测试验证
                for path in ckpt.all_model_checkpoint_paths:
                    saver.restore(sess, path)
                    global_step = path.split('/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print(
                        "After %s training step(s),valisation accuracy = %g" %
                        (global_step, accuracy_score))
                '''
                #对最新的模型进行测试验证
                saver.restore(sess,ckpt.model_checkpoint_paths)                
                global_step=ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                accuracy_score=sess.run(accuracy,feed_dict=validate_feed)
                print("After %s training step(s),valisation accuracy = %g"%(global_step,accuracy_score))
                '''
            else:
                print('No checkpoint file found')
                return
        #time.sleep(eval_interval_secs)
        return
Beispiel #40
0
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # moving average, cross entropy, loss function with regularization and learning rate
    variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_average_op = variable_average.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY
    )

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_average_op]):
        train_op = tf.no_op(name='train')

    # initialize persistence class
    saver = tf.train.Saver()

    config = tf.ConfigProto(allow_soft_placement=True)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        # create directory
        try:
            os.mkdir(MODEL_SAVE_PATH)
        except:
            print("directory already exist")

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)

            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})

            if i % 2500 == 0:
                print("after %d training step(s), loss on training batch is %g " % (step, loss_value))
                saver.save(
                    sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step
                )
Beispiel #41
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                            name='y-input')
        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels
        }

        # 测试时正则化损失设为 None
        y = mnist_inference.inference(x, None)

        # 计算正确率
        # tf.argmax(y, 1) 计算没一个样例的预测答案
        # y 为 batch_size * 10 的二维数组, 每一行表示一个样例的前向传播结果
        # 第二个参数 1 表示选取最大值的操作仅在第一个维度中进行
        # 每一行选取最大值对应下标, 得到结果是一个长度为 batch 的一维数组
        # 该一维数组中的值表示没一个样例对应的数字识别结果
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        # 转换数值类型为浮点型, 然后计算平均值
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 给定滑动平均衰减率和训练轮数的变量,初始化滑动平均类
        # 给定训练轮数的变量可以加快训练早期变量的更新速度
        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)
        variable_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variable_to_restore)

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(
                    mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    print('ckpt.model_checkpoint_path',
                          ckpt.model_checkpoint_path)
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print(
                        'After %s training step(s), validation accuracy = %g' %
                        (global_step, accuracy_score))
                else:
                    print('No checkpoint file found')
                    return time.sleep(EVAL_INTERVAL_SECS)
def train(mnist):
    # 定义输入输出placeholder
    # 调整输入数据placeholder的格式,输入为一个四维矩阵
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE,                             # 第一维表示一个batch中样例的个数
        mnist_inference.IMAGE_SIZE,             # 第二维和第三维表示图片的尺寸
        mnist_inference.IMAGE_SIZE,
        mnist_inference.NUM_CHANNELS],          # 第四维表示图片的深度,对于RBG格式的图片,深度为5
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], 
                        name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    # 直接使用mnist_inference.py中定义的前向传播过程
    y = mnist_inference.inference(x, True, regularizer)
    global_step = tf.Variable(0, trainable=False)

    #定义损失函数、学习率、滑动平均操作以及训练过程
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples/BATCH_SIZE, LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化Tensorflow持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        # 验证和测试的过程将会有一个独立的程序来完成
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            #类似地将输入的训练数据格式调整为一个四维矩阵,并将这个调整后的数据传入sess.run过程
            reshaped_xs = np.reshape(xs, (BATCH_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNELS))
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})
            #每1000轮保存一次模型。
            if i%1000 == 0:
                # 输出当前的训练情况。这里只输出了模型在当前训练batch上的损失函数大小。通过损失函数的大小可以大概了解训练的情况。
                # 在验证数据集上的正确率信息会有一个单独的程序来生成。
                print("After %d training step(s), loss on training batch is %f." % (step, loss_value))
                # 保存当前的模型。注意这里隔出了global_step参数,这样可以让每个被保存模型的文件名末尾加上训练的轮数,比如“model.ckpt-1000”表示训练1000轮后得到的模型
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def evaluate(mnist):
    with tf.Graph().as_default() as g:

        #定义输入输出的格式
        x = tf.placeholder(tf.float32,[None,mnist_inference.INPUT_NODE],name='x-input')
        y_ = tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE],name='y-input')
        validate_feed = {x:mnist.validation.images,y_:mnist.validation.labels}

        #直接使用封装好的函数计算前向传播结果
        y = mnist_inference.inference(x,None)

        #使用前向传播结果计算正确率
        correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

        #通过变量重命名的方式加载模型,不需要调用求滑动平均的函数来求平均值
        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        #每隔10秒调用一次计算正确率的过程
        while True:
            with tf.Session() as sess:
            #找到最新的模型文件名
                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
                if(ckpt and ckpt.model_checkpoint_path):
                    #加载模型
                    saver.restore(sess,ckpt.model_checkpoint_path)
                    #通过文件名得到模型保存时迭代的次数
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('/')[-1]
                    accuracy_score = sess.run(accuracy,feed_dict=validate_feed)
                    print('After %s training step(s),validation ' " accuracy = %g" % (global_step,accuracy_score))
                else:
                    print('No checkpoint file found')
                    return
            time.sleep(EVAL_INTERVAL_SECS)