Exemple #1
0
def test():
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [
            BATCH_SIZE, cifar10_lenet5_forward.IMAGE_SIZE,
            cifar10_lenet5_forward.IMAGE_SIZE,
            cifar10_lenet5_forward.NUM_CHANNELS
        ])

        y_ = tf.placeholder(tf.float32,
                            [None, cifar10_lenet5_forward.OUTPUT_NODE])
        y = cifar10_lenet5_forward.forward(x, False, None)

        ema = tf.train.ExponentialMovingAverage(
            cifar10_lenet5_backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        img_batch, label_batch = cifar10_lenet5_generateds.get_tfrecord(
            TEST_NUM, isTrain=False)

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(
                    cifar10_lenet5_backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)

                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]

                    coord = tf.train.Coordinator()
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)
                    xs, ys = sess.run([img_batch, label_batch])

                    reshaped_xs = np.reshape(
                        xs, (BATCH_SIZE, cifar10_lenet5_forward.IMAGE_SIZE,
                             cifar10_lenet5_forward.IMAGE_SIZE,
                             cifar10_lenet5_forward.NUM_CHANNELS))

                    accuracy_score = sess.run(accuracy,
                                              feed_dict={
                                                  x: reshaped_xs,
                                                  y_: ys
                                              })

                    print("after %s training step(s), test accuracy = %g" %
                          (global_step, accuracy_score))

                    coord.request_stop()
                    coord.join(threads)

                else:
                    print("No checkpoint file found")
                    return
            time.sleep(INTERVAL_TIME)
def backward():								#执行反向传播,训练参数w
    x = tf.placeholder(tf.float32, [None, 32,32,3])
    y_ = tf.placeholder(tf.float32, [None]) #
    
    y, end_points = nets.resnet_v2.resnet_v2_50(x, num_classes=10, is_training=True)
    y = tf.reshape(y, shape=[-1, 10]) #预测值,相当于y

    global_step = tf.Variable(0, trainable = False)

    # 将label值进行onehot编码,10分类
    one_hot_labels = tf.one_hot(indices=tf.cast(y_, tf.int32), depth=10)
    # 定义损失函数和优化器
    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=one_hot_labels))
    learning_rate = tf.train.exponential_decay(				# 设置指数衰减的学习率
    				               LEARNING_RATE_BASE,  	# 基础学习率,随着迭代的进行,更新变量时使用的学习率在此基础上递减 	
                                               global_step,			    # 当前迭代轮数
                                               train_num_examples / BATCH_SIZE,     # 过完所有训练数据所需迭代次数 	
                                               LEARNING_RATE_DECAY,		    # 指数学习衰减率
                                               staircase = True)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step = global_step)# 使用梯度下降优化损失函数,损失函数包含了交叉熵和正则化损失
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) 				  # 初始化滑动平均类
    ema_op = ema.apply(tf.trainable_variables()) 																		  	# 对所有表示神经网络参数的变量进行滑动平均
    #正确率计算
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(one_hot_labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.control_dependencies([train_step, ema_op]):		        # 使用tf.control_dependencies机制一次完成多个操作。在此神经网络模型中,每过一遍数据既通过 
        train_op = tf.no_op(name = 'train')             		# 反向传播更新参数,又更新了每一个参数的滑动平均值
    saver = tf.train.Saver()

    img_batch, label_batch = cifar10_lenet5_generateds.get_tfrecord(BATCH_SIZE, isTrain=True)#img_batch = [[32,32,3],[32,32,3]], label_batch=[[0,0,0,1,0...], [0,1,0....]]

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        ckpt = tf.train.get_checkpoint_state("./model")                 # 从"./model"中加载训练好的模型
        if ckpt and ckpt.model_checkpoint_path: 			# 若ckpt和保存的模型在指定路径中存在,则将保存的神经网络模型加载到当前会话中
            saver.restore(sess, ckpt.model_checkpoint_path)

        coord = tf.train.Coordinator()                                 # 创建一个协调器,管理线程
        threads = tf.train.start_queue_runners(sess=sess, coord=coord) # 启动QueueRunner,此时文件名队列已经进队  

        for i in range(STEPS):
            b_image, b_label = sess.run([img_batch, label_batch])

            _, step, accuracy_,  loss_,= sess.run([train_op, global_step, accuracy, loss], feed_dict={x: b_image,
                                                    y_: b_label})
            if i % 1000 == 0:
                print("after %d training step(s),accuracy : %g, loss is %g." % (step,accuracy_, loss_))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME),global_step = global_step)   # 保存当前模型,globle_step参数可以使每个被保存模型的文件名末尾都加上训练的轮数
        coord.request_stop()
        coord.join(threads)
def test():
    with tf.Graph().as_default() as g:  #复现之前定义的计算图,并执行以下操作
        x = tf.placeholder(
            tf.float32,
            [  #定义占位符x,以之代替输入图片
                BATCH_SIZE, cifar10_lenet5_forward.IMAGE_SIZE,
                cifar10_lenet5_forward.IMAGE_SIZE,
                cifar10_lenet5_forward.NUM_CHANNELS
            ])

        y_ = tf.placeholder(
            tf.float32,
            [None, cifar10_lenet5_forward.OUTPUT_NODE])  #定义占位符y_,用来接神经元计算结果
        y = cifar10_lenet5_forward.forward(x, False, None)  #y是神经元的计算结果,下一步喂给y_

        ema = tf.train.ExponentialMovingAverage(
            cifar10_lenet5_backward.MOVING_AVERAGE_DECAY
        )  # 实现滑动平均模型,参数MOVING_AVERAGE_DECAY用于控制模型更新的速度,训练过程中会对每一个变量维护一个影子变量
        ema_restore = ema.variables_to_restore(
        )  # variable_to_restore()返回dict ({ema_variables : variables}),字典中保存变量的影子值和现值
        saver = tf.train.Saver(
            ema_restore)  # 创建可还原滑动平均值的对象saver,测试时使用w的影子值,有更好的适配性

        correct_prediction = tf.equal(
            tf.argmax(y, 1), tf.argmax(y_, 1)
        )  # 比较预测值和标准输出得到correct_prediction,if tf.argmax(y, 1) equals to tf.argmax(y_, 1),correct_prediction will be set True
        accuracy = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32)
        )  # 将correct_prediction的值从boolean型转为tf.float32型,求均值,得出预测准确率

        img_batch, label_batch = cifar10_lenet5_generateds.get_tfrecord(
            TEST_NUM, isTrain=False)  #2 一次批获取 TEST_NUM 张图片和标签

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(
                    cifar10_lenet5_backward.MODEL_SAVE_PATH)  # 从指定路径中,加载训练好的模型
                if ckpt and ckpt.model_checkpoint_path:  # 若已有ckpt模型则执行以下恢复操作
                    saver.restore(sess,
                                  ckpt.model_checkpoint_path)  # 恢复会话到当前的神经网络

                    global_step = ckpt.model_checkpoint_path.split(
                        '/'
                    )[-1].split(
                        '-'
                    )[-1]  # 从ckpt.model_checkpoint_path中,通过字符 "/" 和 "-"提取出最后一个整数(保存的轮数),恢复轮数,

                    coord = tf.train.Coordinator()  #3开启线程协调器
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)  #4
                    xs, ys = sess.run([img_batch, label_batch
                                       ])  #5# 在 sess.run 中执行图片和标签的批获取

                    reshaped_xs = np.reshape(
                        xs,
                        (  #导入部分,更改参数的形状
                            BATCH_SIZE, cifar10_lenet5_forward.IMAGE_SIZE,
                            cifar10_lenet5_forward.IMAGE_SIZE,
                            cifar10_lenet5_forward.NUM_CHANNELS))

                    accuracy_score = sess.run(
                        accuracy,  # 计算准确率
                        feed_dict={
                            x: reshaped_xs,
                            y_: ys
                        })

                    print("after %s training step(s), test accuracy = %g" %
                          (global_step, accuracy_score))

                    coord.request_stop()  #6
                    coord.join(threads)  #7

                else:  #can not get checkpoint file ,print error infomation
                    print("No checkpoint file found")
                    return
            time.sleep(
                INTERVAL_TIME)  # 设置等待时间,等backward生成新的checkpoint文件,再循环执行test函数
Exemple #4
0
def backward():  #执行反向传播,训练参数w
    x = tf.placeholder(
        tf.float32,
        [  #定义占位符x,以之代替输入图片
            BATCH_SIZE, cifar10_lenet5_forward.IMAGE_HEIGHT,
            cifar10_lenet5_forward.IMAGE_WIDTH,
            cifar10_lenet5_forward.NUM_CHANNELS
        ])
    y_ = tf.placeholder(tf.float32, [None, 10])  #定义占位符y_,作为传入标签
    #True表示训练阶段,在进行forward时,if语句成立,进行dropout
    y = cifar10_lenet5_forward.forward(x, True, REGULARIZER)  #y是神经元的计算结果
    y = tf.reshape(y, [-1, 10])
    #    print "y:",y
    global_step = tf.Variable(0,
                              trainable=False)  #定义变量global_step,并把它的属性设置为不可训练
    #    y = tf.reshape(y,[-1,18,10])                                        #将神经网络的计算结果y,转换为batch*18行*10列的tensor
    #    for i in range(18):

    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=
        y,  #用交叉熵ce(cross entropy),使用tf.nn.sparse_softmax_cross_entropy_with_logits函数计算交叉熵,
        labels=tf.argmax(y_,
                         1))  #其第一个参数是神经网络不包括softmax层的前向传播结果,第二个参数是训练数据的正确答案
    # tf.argmax(vector, 1):返回的是vector中的最大值的索引号
    cem = tf.reduce_mean(ce)  #计算在当前batch中所有样例的交叉熵平均值
    loss = cem + tf.add_n(
        tf.get_collection('losses')
    )  # 总损失等于交叉熵损失 + 正则化损失的和,losses保存有正则化的计算结果(forward中getweight()对参数进行了正则化计算)
    learning_rate = tf.train.exponential_decay(  # 设置指数衰减的学习率
        LEARNING_RATE_BASE,  # 基础学习率,随着迭代的进行,更新变量时使用的学习率在此基础上递减 	
        global_step,  # 当前迭代轮数
        train_num_examples / BATCH_SIZE,  # 过完所有训练数据所需迭代次数 	
        LEARNING_RATE_DECAY,  # 指数学习衰减率
        staircase=True)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)  # 使用梯度下降优化损失函数,损失函数包含了交叉熵和正则化损失
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,
                                            global_step)  # 初始化滑动平均类
    ema_op = ema.apply(tf.trainable_variables())  # 对所有表示神经网络参数的变量进行滑动平均
    #bind operation train_step & ema_op together to realize two operations at time
    with tf.control_dependencies(
        [train_step,
         ema_op]):  # 使用tf.control_dependencies机制一次完成多个操作。在此神经网络模型中,每过一遍数据既通过
        train_op = tf.no_op(name='train')  # 反向传播更新参数,又更新了每一个参数的滑动平均值

    saver = tf.train.Saver()  # 声明tf.train.Saver类用于保存模型

    img_batch, lable_batch = cifar10_lenet5_generateds.get_tfrecord(
        BATCH_SIZE, isTrain=True)  #3一次批获取 batch_size 张图片和标签

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())  # 初始化所有变量
        # 恢复模块
        ckpt = tf.train.get_checkpoint_state("./model")  # 从"./model"中加载训练好的模型
        if ckpt and ckpt.model_checkpoint_path:  # 若ckpt和保存的模型在指定路径中存在,则将保存的神经网络模型加载到当前会话中
            print ckpt.model_checkpoint_path
            saver.restore(sess, ckpt.model_checkpoint_path)

        coord = tf.train.Coordinator()  #4开启线程协调器
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)  #5

        for i in range(STEPS):  # 迭代地训练神经网络
            xs, ys = sess.run([img_batch,
                               lable_batch])  #6将一个batch的训练数数据和对应标签分别赋给xs,ys

            reshaped_xs = np.reshape(
                xs,
                (  #导入部分,更改参数的形状
                    BATCH_SIZE, cifar10_lenet5_forward.IMAGE_HEIGHT,
                    cifar10_lenet5_forward.IMAGE_WIDTH,
                    cifar10_lenet5_forward.NUM_CHANNELS))
            reshaped_ys = np.reshape(ys, (-1, 10))

            _, loss_value, step = sess.run(
                [
                    train_op, loss, global_step
                ],  # 计算损失函数结果,计算节点train_op, loss,global_step并返回结果至 _, loss_value, step ,
                feed_dict={
                    x: reshaped_xs,
                    y_: reshaped_ys
                }
            )  #'_' means an anonymous variable which will not in use any more
            if i % 1000 == 0:  # 每1000轮打印损失函数信息,并保存当前的模型
                print(
                    "after %d training step(s), loss on training batch is %g."
                    % (step, loss_value))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step
                           )  # 保存当前模型,globle_step参数可以使每个被保存模型的文件名末尾都加上训练的轮数
    # 文件的名字是MODEL_SAVE_PATH + MODEL_NAME + global_step
        coord.request_stop()  #7关闭线程协调器
        coord.join(threads)  #8
#coding:utf-8
import tensorflow as tf
import tensorflow.contrib.slim.nets as nets
import cifar10_lenet5_generateds
BATCH_SIZE = 100

save_dir = r"./train_image_63.model"
batch_size_ = 100
lr = tf.Variable(0.0001, dtype=tf.float32)
x = tf.placeholder(tf.float32, [None, 32, 32, 3])
y_ = tf.placeholder(tf.float32, [None])  #

img_batch, label_batch = cifar10_lenet5_generateds.get_tfrecord(
    BATCH_SIZE, isTrain=True
)  #img_batch = [[32,32,3],[32,32,3]], label_batch=[[0,0,0,1,0...], [0,1,0....]]
#with tf.Session() as sess:#???
#    print sess.run(img_batch), sess.run(label_batch)#???

# 将label值进行onehot编码,10分类
one_hot_labels = tf.one_hot(indices=tf.cast(y_, tf.int32), depth=10)
#one_hot_labels = tf.argmax(y_,1)
pred, end_points = nets.resnet_v2.resnet_v2_50(x,
                                               num_classes=10,
                                               is_training=True)
pred = tf.reshape(pred, shape=[-1, 10])  #预测值,相当于y
# 定义损失函数和优化器
loss = tf.reduce_mean(
    tf.nn.sigmoid_cross_entropy_with_logits(logits=pred,
                                            labels=one_hot_labels))
optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
# 准确度
Exemple #6
0
def test():
    with tf.Graph().as_default() as g:  #复现之前定义的计算图,并执行以下操作
        x = tf.placeholder(tf.float32, [None, 32, 32, 3])
        y_ = tf.placeholder(tf.float32, [None])  #

        y, end_points = nets.resnet_v2.resnet_v2_50(x,
                                                    num_classes=10,
                                                    is_training=True)
        y = tf.reshape(y, shape=[-1, 10])  #预测值,相当于y

        # ema = tf.train.ExponentialMovingAverage(cifar10_lenet5_backward.MOVING_AVERAGE_DECAY)# 实现滑动平均模型,参数MOVING_AVERAGE_DECAY用于控制模型更新的速度,训练过程中会对每一个变量维护一个影子变量
        # ema_restore = ema.variables_to_restore()                                      # variable_to_restore()返回dict ({ema_variables : variables}),字典中保存变量的影子值和现值
        # saver = tf.train.Saver(ema_restore) 			                                    # 创建可还原滑动平均值的对象saver,测试时使用w的影子值,有更好的适配性
        saver = tf.train.Saver()  # 创建可还原滑动平均值的对象saver,测试时使用w的影子值,有更好的适配性

        one_hot_labels = tf.one_hot(indices=tf.cast(y_, tf.int32), depth=10)
        correct_prediction = tf.equal(tf.argmax(y, 1),
                                      tf.argmax(one_hot_labels, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        img_batch, label_batch = cifar10_lenet5_generateds.get_tfrecord(
            TEST_NUM, isTrain=False)  #2 一次批获取 TEST_NUM 张图片和标签

        print "before true"
        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(
                    cifar10_lenet5_backward.MODEL_SAVE_PATH)  # 从指定路径中,加载训练好的模型
                if ckpt and ckpt.model_checkpoint_path:  # 若已有ckpt模型则执行以下恢复操作
                    saver.restore(sess,
                                  ckpt.model_checkpoint_path)  # 恢复会话到当前的神经网络

                    global_step = ckpt.model_checkpoint_path.split(
                        '/'
                    )[-1].split(
                        '-'
                    )[-1]  # 从ckpt.model_checkpoint_path中,通过字符 "/" 和 "-"提取出最后一个整数(保存的轮数),恢复轮数,

                    coord = tf.train.Coordinator()  #3开启线程协调器
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)  #4
                    xs, ys = sess.run([img_batch, label_batch
                                       ])  #5# 在 sess.run 中执行图片和标签的批获取

                    accuracy_score = sess.run(
                        accuracy,  # 计算准确率
                        feed_dict={
                            x: xs,
                            y_: ys
                        })

                    print("after %s training step(s), test accuracy = %g" %
                          (global_step, accuracy_score))

                    coord.request_stop()  #6
                    coord.join(threads)  #7

                else:  #can not get checkpoint file ,print error infomation
                    print("No checkpoint file found")
                    return
            time.sleep(
                INTERVAL_TIME)  # 设置等待时间,等backward生成新的checkpoint文件,再循环执行test函数