Пример #1
0
def backward():
    X = tf.placeholder(tf.float32, [None, 512, 512, 1])
    Y = tf.placeholder(tf.float32, [None, 512, 512, 1])

    y_ = forward(X, 1, True)

    # loss=tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=y_)
    loss = tf.losses.sigmoid_cross_entropy(Y, y_)

    # loss_mean = tf.reduce_mean(loss)
    # tf.add_to_collection(name='loss', value=loss_mean)
    # loss_all = tf.add_n(inputs=tf.get_collection(key='loss'))
    # train_step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss_all)

    # correct_prediction = tf.equal(tf.argmax(input=y_, axis=3, output_type=tf.int32), Y)
    # correct_prediction = tf.cast(correct_prediction, tf.float32)
    # accuracy = tf.reduce_mean(correct_prediction)

    global_step = tf.Variable(0, trainable=False)
    incr_global_step = tf.assign(global_step, global_step + 1)
    train_step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)
    train_op = tf.group([train_step, incr_global_step])

    saver = tf.train.Saver()
    X_batch, Y_real_batch = generateds.get_tfrecord(1, True)

    if not os.path.exists(MODEL_SAVE_PATH):
        os.mkdir(MODEL_SAVE_PATH)
    if not os.path.exists(TRAINING_RESULT_PATH):
        os.mkdir(TRAINING_RESULT_PATH)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)

        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for i in range(global_step.eval(), 10000):
            xs, ys = sess.run([X_batch, Y_real_batch])
            _, step = sess.run([train_op, global_step], feed_dict={X: xs, Y: ys})

            print(i)
            if step % 50 == 0:
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, 'unet_model'), global_step=global_step)
                los = sess.run(loss, feed_dict={X:xs, Y: ys})
                print('after %d step training, loss is: %f' % (step, los))
                test_result = sess.run(y_, feed_dict={X: xs})
                img = test_result
                img = (img + 1) / 2
                img *= 256
                img = img.astype(np.uint8)
                img = img.reshape(512, 512)
                Image.fromarray(img, 'L').save(TRAINING_RESULT_PATH + '/' + str(step) + '.tif')

        coord.request_stop()
        coord.join(threads)
Пример #2
0
def backward(mnist):
    x = tf.placeholder(tf.float32, [None, forward.INPUT_NODE])
    y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
    y = forward.forward(x, REGULARIZER)
    global_step = tf.Variable(0, trainable=False)

    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,
                                                        labels=tf.argmax(
                                                            y_, 1))
    cem = tf.reduce_mean(ce)
    loss = cem + tf.add_n(tf.get_collection('losses'))

    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               mnist.train.num_examples /
                                               BATCH_SIZE,
                                               LEARNING_RATE_DECAY,
                                               staircase=True)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()
    img_batch, label_batch = generateds.get_tfrecord(BATCH_SIZE, isTrain=True)

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for i in range(STEPS):
            xs, ys = sess.run([img_batch, label_batch])
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })
            if i % 1000 == 0:
                print(
                    "After %d training step(s), loss on training batch is %g."
                    % (step, loss_value))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)

        coord.request_stop()
        coord.join(threads)
Пример #3
0
def test():
    # 每次调用将输出最近一次保存结果的正确率
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [
            TEST_NUM, forward.IMAGE_SIZE, forward.IMAGE_SIZE,
            forward.NUM_CHANNELS
        ])
        y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
        y = forward.forward(x, False, None)

        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)
        # 定义测试准确率
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        # 指定从生成器中获取数据,需要sess.run后执行
        img_batch, label_batch = generateds.get_tfrecord(TEST_NUM,
                                                         isTrain=False)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                # 读入训练好的模型
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                # -------------------------------------------------
                # 线程协调器
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess,
                                                       coord=coord)  # 4
                # 获取样本和标签
                xs, ys = sess.run([img_batch, label_batch])
                reshaped_xs = np.reshape(
                    xs, (TEST_NUM, forward.IMAGE_SIZE, forward.IMAGE_SIZE,
                         forward.NUM_CHANNELS))
                # 获得测试结果测试
                accuracy_score = sess.run(accuracy,
                                          feed_dict={
                                              x: reshaped_xs,
                                              y_: ys
                                          })

                print("After %s training step(s), test accuracy = %g" %
                      (global_step, accuracy_score))

                coord.request_stop()
                coord.join(threads)
                # -------------------------------------------------
            else:
                print('No checkpoint file found')
                return
Пример #4
0
def test():
    with tf.Graph().as_default() as g:
        #x y_占位
        x = tf.placeholder(tf.float32, [None, forward.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])

        #前向传播预测结果y
        y = forward.forward(x, REGULARIZER)

        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        #计算正确率
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        img_batch, label_batch = generateds.get_tfrecord(TEST_NUM,
                                                         isTrain=False)  #2
        while True:
            with tf.Session() as sess:
                #加载训练好的模型
                ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
                #如果已有ckpt则恢复
                if ckpt and ckpt.model_checkpoint_path:
                    #恢复会话
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    #恢复轮数
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]

                    coord = tf.train.Coordinator()  #3
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)  #4
                    xs, ys = sess.run([img_batch, label_batch])  #5

                    #计算准确率
                    accuracy_score = sess.run(accuracy,
                                              feed_dict={
                                                  x: xs,
                                                  y_: ys
                                              })
                    print("After %s training step(s), test accuracy = %g" %
                          (global_step, accuracy_score))

                    coord.request_stop()  #6
                    coord.join(threads)  #7
                #如果没有模型
                else:
                    print('No checkpoint file found')  #模型不存在
                    return
                time.sleep(TEST_INTERVAL_SECS)
Пример #5
0
def test():
    X = tf.placeholder(tf.float32, [None, None, None, 3])  #输入(黑白图片)的占位符
    with tf.name_scope('generator'), tf.variable_scope(
            'generator'):  #生成器的变量名前加上generator前缀,以便与判别器的变量分开训练
        Y = forward.forward(X, 1, False)  #构建生成器网络,并获得其输出与中间层
    Y_real = tf.placeholder(tf.float32,
                            [None, None, None, 3])  #以中间层为输入构建guide decoder
    XYY = tf.concat([X, Y, Y_real], axis=2)  #输出(输入对应的原彩色图片)的占位符

    global_step = tf.Variable(0, trainable=False)  #定义global step
    saver = tf.train.Saver()  #定义用来读取模型的saver

    X_batch, Y_real_batch = generateds.get_tfrecord(
        1, False)  #从tfrecord中获取黑白图和对应彩图

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())  #全局变量初始化

        ckpt = tf.train.get_checkpoint_state(
            backward.MODEL_SAVE_PATH)  #在模型存放路径中获取模型checkpoint的状态
        if ckpt and ckpt.model_checkpoint_path:  #如果存在checkpoint且可以获得其最新版本的路径
            saver.restore(sess,
                          ckpt.model_checkpoint_path)  #从模型的最新版本路径读取模型中的参数
        else:  #没找到checkpoint话
            print('Checkpoint Not Found')  #输出一下
            return  #没有模型可以测试,结束运行

        coord = tf.train.Coordinator()  #创建一个coordinator
        threads = tf.train.start_queue_runners(sess=sess,
                                               coord=coord)  #创建读取数据的线程们

        if not os.path.exists(TEST_RESULT_PATH):  #创建需要但尚未创建的训练结果目录
            os.mkdir(TEST_RESULT_PATH)  #创建需要但尚未创建的训练结果目录

        for i in range(TEST_NUM):  #对于每一轮测试
            xs, ys = sess.run([X_batch, Y_real_batch
                               ])  #从tfrecord中读取x和y的下一批数据(对于测试来说,一批就是一张)
            img = sess.run(XYY, feed_dict={
                X: xs,
                Y_real: ys
            })  #获取黑白图、生成图、原材图的拼接
            img = (img + 1) / 2  #从-1~1映射到0~1
            img *= 256  #再映射到0~256
            img = img.astype(np.uint8)  #类型化为uint8
            Image.fromarray(img[0]).save(
                os.path.join(TEST_RESULT_PATH,
                             '{}.png'.format(i + 1)))  #转成图片并保存

        coord.request_stop()  #要求读取图片的线程们停止
        coord.join(threads)  #等待他们停止
Пример #6
0
Файл: test.py Проект: reroze/DL
def test():
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, forward.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
        y = forward.forward(x, None)
        #喂入对应的x,y数据,使用forward文件中的forward函数搭建网络

        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)
        #滑动平均

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        #正则化
        img_batch, label_batch = generateds.get_tfrecord(
            TEST_NUM, isTrain=False)  #从测试集中选择数据,数据没有被训练过,应该填False

        while (True):
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
                print(ckpt.model_checkpoint_path)
                if (ckpt and ckpt.model_ckeckpoint_path):
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    #从ckpt.model_ckeckpoint_path中读取数据
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]

                    coord = tf.train.Coordinator()  #extre
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)  #extre
                    xs, ys = sess.run([img_batch, label_batch])  #extre

                    #before : accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_:mnist.test.labels})
                    accuracy_score = sess.run(accuracy,
                                              feed_dict={
                                                  x: xs,
                                                  y: ys
                                              })
                    print("After %s training step(s), test accuracy = %g" %
                          (global_step, accuracy_score))

                    coord.request_stop()  #extre
                    coord.join(threads)  #extre关闭多线程
                else:
                    print("No checkpoint file found")
                    return
            time.sleep(TEST_INTERVAL_SECS)
Пример #7
0
def backward(mnist):
	x = tf.placeholder(tf.float32, [None, forward.INPUT_NODE])
	y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
	y = forward.forward(x, REGULARIZER)
	global_step = tf.Variable(0, trainable=False)

	ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
	cem = tf.reduce_mean(ce)
	loss = cem + tf.add_n(tf.get_collection('losses'))

	learning_rate = tf.train.exponential_decay(
		LEARNING_RATE_BASE,
		global_step,
		mnist.train.num_examples/BATCH_SIZE,
		LEARNING_RATE_DECAY,
		staircase=True)

	train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

	ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
	ema_op = ema.apply(tf.trainable_variables())
	with tf.control_dependencies([train_step, ema_op]):
		train_op = tf.no_op(name='train')

	saver = tf.train.Saver()
	img_batch, label_batch = generateds.get_tfrecord(BATCH_SIZE, isTrain=True)

	with tf.Session() as sess:
		init_op = tf.global_variables_initializer()
		sess.run(init_op)

		ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
		if ckpt and ckpt.model_checkpoint_path:
			saver.restore(sess, ckpt.model_checkpoint_path)

		coord = tf.train.Coordinator()
		threads = tf.train.start_queue_runners(sess=sess, coord=coord)
		
		for i in range(STEPS):
			xs, ys = sess.run([img_batch, label_batch])
			_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs,y_:ys})
			if i % 1000 == 0:
				print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
				saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

		coord.request_stop()
		coord.join(threads)
Пример #8
0
def test():
    saver = tf.train.Saver()
    img_batch, label_batch = generateds.get_tfrecord(100, False)

    with tf.Session() as sess:
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            xs, ys = sess.run([img_batch, label_batch])
            accur = sess.run(accuracy, feed_dict={images: xs, y: ys, drop_prob: 1})
            print('accuracy after %s step: %f' % (global_step, accur))

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        else:
            print('no checkpoint file found')
Пример #9
0
def test(mnist):
    with tf.Graph().as_default() as g: 
        x = tf.placeholder(tf.float32,[
            TEST_NUM,
            forward.IMAGE_SIZE,
            forward.IMAGE_SIZE,
            forward.NUM_CHANNELS]) 
        y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
        y = forward.forward(x,False,None)

        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)
		
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) 
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 
		img_batch, label_batch = generateds.get_tfrecord(TEST_NUM, isTrain=False)
		
        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] 
                    coord = tf.train.Coordinator()
					threads = tf.train.start_queue_runners(sess=sess, coord=coord)
					
					xs, ys = sess.run([img_batch, label_batch])
					accuracy_score = sess.run(accuracy, feed_dict={x: xs, y_: ys})
					
                    print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
					coord.request_stop()
                    coord.join(threads)
                else:
                    print('No checkpoint file found')
                    return
Пример #10
0
def train():
    saver = tf.train.Saver()
    img_batch, label_batch = generateds.get_tfrecord(batch_size, True)

    f = open('result.txt','a')
    f.write('result\n')

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        # 训练200次
        for epoch in range(all_poch):
            ave_cost = 0
            print(epoch)

            xs, ys = sess.run([img_batch, label_batch])
            cost, _ = sess.run([cross_entry, optimizer], feed_dict={images: xs, y: ys, drop_prob: 0.75})

            ave_cost += cost / batch_size
            if (epoch + 1) % 10 == 0:
                cst = "epoch : %04d" % (epoch + 1), " ", "cost :{:.9f}".format(ave_cost)
                print(cst)
                accur = sess.run(accuracy, feed_dict={images: xs, y: ys, drop_prob: 1})
                acr = 'accuracy after %d step: %f' % (epoch, accur)
                print(acr)
                f = open('result.txt', 'a')
                f.write(str(cst)+ '\n')
                f.write(acr + '\n')
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=epoch)
                saver.save(sess, '/home/u22520/talex', global_step=epoch)

        coord.request_stop()
        coord.join(threads)
Пример #11
0
def test():
    # 接下来定义的节点在计算图g内,计算图是tensorflow的默认图
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [
            TEST_NUM, forward.IMAGE_SIZE, forward.IMAGE_SIZE,
            forward.NUM_CHANNELS
        ])
        y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
        # False表示这是测试过程,正则化参数regularizer设置为None
        y = forward.forward(x, False, None)

        # 实例化一个存储滑动平均值的saver
        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        # 预测分数与实际分数相减取绝对值,求平均,即为平均分差
        distance_prediction = tf.abs(tf.argmax(y, 1) - tf.argmax(y_, 1))
        distance = tf.reduce_mean(tf.cast(distance_prediction, tf.float32))

        # 预测准确率
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 打印预测的分数
        # mse = tf.losses.mean_squared_error(tf.argmax(y, 1), tf.argmax(y_, 1))
        # rmse=tf.sqrt(mse)
        # thisprint = tf.Print(y, [tf.argmax(y, 1)],summarize=1100)
        # with tf.control_dependencies([rmse, thisprint]):
        #     myrmse = tf.no_op(name='train')

        # 读取所有batch,isTrain=False表示读取测试集
        img_batch, label_batch = generateds.get_tfrecord(TEST_NUM,
                                                         isTrain=False)

        while True:
            with tf.Session() as sess:
                # 找当前存在的model并恢复
                ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 恢复轮数。分割文件名,-1表示分割后的最后一部分,在这里即为训练轮数
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    # 批获取
                    coord = tf.train.Coordinator()
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)
                    # 运行img_batch,label_batch计算节点
                    xs, ys = sess.run([img_batch, label_batch])
                    reshaped_x = np.reshape(
                        xs, (TEST_NUM, forward.IMAGE_SIZE, forward.IMAGE_SIZE,
                             forward.NUM_CHANNELS))
                    # 运行计算节点,得到输出
                    accuracy_score, dis = sess.run([accuracy, distance],
                                                   feed_dict={
                                                       x: reshaped_x,
                                                       y_: ys
                                                   })
                    print(
                        "After %s training step(s), test accuracy = %g, distance = %g "
                        % (global_step, accuracy_score, dis))
                    # 协调器coord发出所有线程终止信号
                    coord.request_stop()
                    # 把开启的线程加入主线程,等待threads结束
                    coord.join(threads)
                else:
                    print('No checkpoint file found')
                    return
            time.sleep(TEST_INTERVAL_SECS)
Пример #12
0
def backward():  #反向传播模块,包括了GAN的判别器、guide decoder以及与模型训练相关的操作
    def dis_conv(X, kernels, stride, layer, regularizer=None):  #生成反卷积层的函数
        w = tf.get_variable('w{}'.format(layer), [
            forward.KERNEL_SIZE, forward.KERNEL_SIZE,
            X.get_shape().as_list()[-1], kernels
        ],
                            initializer=tf.truncated_normal_initializer(
                                0, 0.2))  #获取卷积核
        padded_X = tf.pad(X, [[0, 0], [1, 1], [1, 1], [0, 0]],
                          mode='CONSTANT')  #手动padding一波
        return tf.nn.conv2d(padded_X,
                            w, [1, stride, stride, 1],
                            padding='VALID')  #用刚得到的卷积核以及步长构建卷积层

    def discriminator(discriminator_input, discriminator_output):  #定义GAN的判别器
        X = tf.concat([discriminator_input, discriminator_output],
                      axis=3)  #将黑白图与彩色图摞在一起作为输入
        layers = [X]  #layers用来存储各层结果
        for i in range(6):  #判别器包括六层
            stride = 2 if i < 4 else 1  #前四层步长为2,后两层步长为1
            kernels = forward.FIRST_OUTPUT_CHANNEL / 2 * 2**i if i < 5 else 1  #前五层通道数递增,最后一层通道数为1,用来指示这一块图是真是假
            activation_fn = forward.lrelu if i < 5 else tf.nn.sigmoid  #前五层的激活函数是lrelu,最后一层用sigmoid归到0~1
            bn = forward.batchnorm if i < 5 else tf.identity  #前五层进行批标准化,最后一层不做(一定不要在最后一层加批标准化)
            layers.append(
                activation_fn(bn(dis_conv(layers[-1], kernels, stride,
                                          i + 1))))  #一次通过卷积、批标准化、激活函数加入layers
        return layers[-1]  #返回结果

    def guide_decoder(
        middle_layer, batch_size
    ):  #定义guide decoder,用来防止U-net底层被舍弃。与生成器中的decoder不同,他没有与encoder的skip connection
        layers = [middle_layer]  #layers用来存放各层结果
        for i in range(5):  #guide decoder也是六个模块儿,这个循环构建前五个
            deconvolved = forward.gen_deconv(
                layers[-1], forward.FIRST_OUTPUT_CHANNEL *
                2**min(forward.MAX_OUTPUT_CHANNEL_LAYER, 4 - i),
                batch_size)  #先是一个步长为一的反卷积
            output = forward.batchnorm(deconvolved)  #通过批标准化层
            output = forward.lrelu(output)  #再通过激活函数
            layers.append(output)  #将结果加入layers
        output = forward.gen_deconv(output, 3, batch_size)  #最后一层的反卷积
        output = tf.nn.tanh(output)  #批标准化
        layers.append(output)  #激活函数
        return layers[-1]  #返回guide decoder的输出

    X = tf.placeholder(tf.float32, [None, None, None, 3])  #输入(黑白图片)的占位符
    with tf.name_scope('generator'), tf.variable_scope(
            'generator'):  #生成器的变量名前加上generator前缀,以便与判别器的变量分开训练
        Y, middle_layer = forward.forward(X, BATCH_SIZE,
                                          True)  #构建生成器网络,并获得其输出与中间层
        Y_guide = guide_decoder(middle_layer,
                                BATCH_SIZE)  #以中间层为输入构建guide decoder
    Y_real = tf.placeholder(tf.float32,
                            [None, None, None, 3])  #输出(输入对应的原彩色图片)的占位符
    XYY = tf.concat([X, Y, Y_real], axis=2)  #将黑白图、生成的彩图和原彩图合并,用来展示结果

    with tf.name_scope('discriminator_real'):  #判别真实图片的判别器的name scope
        with tf.variable_scope(
                'discriminator'):  #判别器的variable scope,为之后的变量复用作准备
            discriminator_real = discriminator(
                X, Y_real)  #给判别器喂入黑白图及其对应的原彩图,得到一个输出

    with tf.name_scope('discriminator_fake'):  #判别生成图片的判别器的name scope
        with tf.variable_scope('discriminator',
                               reuse=True):  #判别器的variable scope,复用变量
            discriminator_fake = discriminator(X,
                                               Y)  #给判别器喂入黑白图及生成器生成的彩图,得到另一个输出

    dis_loss = tf.reduce_mean(
        -tf.log(discriminator_real + EPS) -
        tf.log(1 - discriminator_fake + EPS))  #判别器的损失函数是两个判别器输出的交叉熵的平均
    dis_vars = [
        var for var in tf.trainable_variables()
        if var.name.startswith('discriminator')
    ]  #获得判别器的变量
    dis_optimizer = tf.train.AdamOptimizer(LEARNING_RATE,
                                           BETA1)  #定义判别器的optimizer
    dis_train_op = dis_optimizer.minimize(
        dis_loss, var_list=dis_vars)  #判别器的训练步骤,注意只训练判别器的变量

    gen_loss_GAN = tf.reduce_mean(
        -tf.log(discriminator_fake + EPS))  #判别器提供给生成器的损失,生成器希望判别器把它生成的图片判断为原图
    gen_loss_L1 = tf.reduce_mean(tf.abs(Y - Y_real))  #生成器生成的图与原图l1距离
    guide_decoder_loss = tf.reduce_mean(
        tf.abs(Y_guide - Y_real))  #guide decoder生成的图与原图的l1距离
    gen_loss = L1_WEIGHT * (gen_loss_L1 +
                            GUIDE_DECODER_WEIGHT * guide_decoder_loss
                            ) + GAN_WEIGHT * gen_loss_GAN  #生成器的损失函数为以上三项的加权和
    gen_vars = [
        var for var in tf.trainable_variables()
        if var.name.startswith('generator')
    ]  #获得生成器的变量
    gen_optimizer = tf.train.AdamOptimizer(LEARNING_RATE,
                                           BETA1)  #定义生成器的optimizer
    gen_train_op = gen_optimizer.minimize(
        gen_loss, var_list=gen_vars)  #生成器的训练步骤,注意只训练生成器的变量

    global_step = tf.Variable(0, trainable=False)  #定义global step
    incr_global_step = tf.assign(global_step,
                                 global_step + 1)  #定义global step加一的步骤

    train_op = tf.group([dis_train_op, gen_train_op,
                         incr_global_step])  #把判别器、生成器的训练步骤以及global step加一组合起来

    saver = tf.train.Saver()  #定义用来保存、读取模型的saver
    X_batch, Y_real_batch = generateds.get_tfrecord(
        BATCH_SIZE, True)  #从tfrecord中获取黑白图和对应彩图

    if not os.path.exists(MODEL_SAVE_PATH):  #创建需要但尚未创建的模型存储目录
        os.mkdir(MODEL_SAVE_PATH)  #创建需要但尚未创建的模型存储目录
    if not os.path.exists(TRAINING_RESULT_PATH):  #创建需要但尚未创建的训练结果目录
        os.mkdir(TRAINING_RESULT_PATH)  #创建需要但尚未创建的训练结果目录
    if not os.path.exists(GUIDE_DECODER_PATH):  #创建需要但尚未创建的guide decoder效果目录
        os.mkdir(GUIDE_DECODER_PATH)  #创建需要但尚未创建的guide decoder效果目录

    with tf.Session() as sess:  #开启会话
        sess.run(tf.global_variables_initializer())  #全局变量初始化

        ckpt = tf.train.get_checkpoint_state(
            MODEL_SAVE_PATH)  #在模型存放路径中获取模型checkpoint的状态
        if ckpt and ckpt.model_checkpoint_path:  #如果存在checkpoint且可以获得其最新版本的路径
            saver.restore(sess,
                          ckpt.model_checkpoint_path)  #从模型的最新版本路径读取模型中的参数

        coord = tf.train.Coordinator()  #创建一个coordinator
        threads = tf.train.start_queue_runners(sess=sess,
                                               coord=coord)  #创建读取数据的线程们

        for i in range(global_step.eval(), TOTAL_STEP):  #从当前轮数到总轮数,一轮一轮训练模型
            xs, ys = sess.run([X_batch, Y_real_batch])  #从tfrecord中读取x和y的下一批数据
            _, step = sess.run([train_op, global_step],
                               feed_dict={
                                   X: xs,
                                   Y_real: ys
                               })  #执行训练步骤,并获取轮数和损失
            for i in range(4):  #为了生成器和判别器的平衡,再训练四次生成器
                sess.run(gen_train_op, feed_dict={X: xs, Y_real: ys})  #训练生成器
            if step % SAVE_FREQ == 0:  #如果到了该保存模型的轮数
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)  #保存模型
            if step % DISPLAY_FREQ == 0:  #如果到了该展示训练效果的轮数
                glloss, ggloss, dloss = sess.run(
                    [gen_loss_L1, gen_loss_GAN, dis_loss],
                    feed_dict={
                        X: xs,
                        Y_real: ys
                    })  #获取三部分的loss
                print(
                    '\rSteps: {}, Generator L1 loss: {:.6f}, Generator GAN loss: {:.6f}, Discriminator loss: {:.6f}'
                    .format(step, glloss, ggloss, dloss))  #输出轮数和各部分loss
                test_result = sess.run(XYY, feed_dict={
                    X: xs,
                    Y_real: ys
                })  #获取黑白图、生成图、原材图的拼接
                for i, img in enumerate(test_result[:3]):  #对于这批图的前三张
                    img = (img + 1) / 2  #从-1~1映射到0~1
                    img *= 256  #再映射到0~256
                    img = img.astype(np.uint8)  #类型化为uint8
                    Image.fromarray(img).save(
                        os.path.join(TRAINING_RESULT_PATH,
                                     'Step{}-{}.png'.format(step,
                                                            i + 1)))  #转成图片并保存
            if step % DISPLAY_GUIDE_DECODER_FREQ == 0:  #如果到了该展示guide decoder效果的轮数
                guide_result = sess.run(Y_guide, feed_dict={
                    X: xs,
                    Y_real: ys
                })  #获取guide decoder生成的图片
                for i, img in enumerate(guide_result[:1]):  #对于该批图片的第一张
                    img = (img + 1) / 2  #从-1~1映射到0~1
                    img *= 256  #再映射到0~256
                    img = img.astype(np.uint8)  #类型化为uint8
                    Image.fromarray(img).save(
                        os.path.join(GUIDE_DECODER_PATH,
                                     'Step-{}.png'.format(step)))  #转成图片并保存
            print('\r{}'.format(step), end='')  #输出训练轮数

        coord.request_stop()  #要求读取图片的线程们停止
        coord.join(threads)  #等待他们停止
Пример #13
0
def backward():
    # 输入x的占位符
    x = tf.placeholder(tf.float32, [BATCH_SIZE,
                                    forward.IMAGE_SIZE,
                                    forward.IMAGE_SIZE,
                                    forward.NUM_CHANNELS])
    # 实际分数y_的占位符
    y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
    # 得到预测分数y
    y = forward.forward(x, True, REGULARIZER)
    # 记录训练了多少步
    global_step = tf.Variable(0, trainable=False)

    # y过一个softmax层,转化为概率,计算y和y_的交叉熵
    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    # 求平均
    cem = tf.reduce_mean(ce)
    # get_collection取出losses集合的值,add_n把值加起来,表示进行正则化
    loss = cem + tf.add_n(tf.get_collection('losses'))

    # 这一轮训练集的准确率
    correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # 指数衰减学习率
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        train_num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True)

    # AdamOptimizer:根据损失函数对每个参数的梯度的一阶矩估计和二阶矩估计动态调整针对于每个参数的学习速率
    train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)
    # 计算w的滑动平均值,记录每个w过去一段时间内的平均值,避免w迅速变化,导致模型过拟合
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    # ema.apply后面的括号是更新列表,每次运行sess.run(ema_op)时,对待训练的参数求滑动平均值
    ema_op = ema.apply(tf.trainable_variables())

    ##将训练过程和计算滑动平均的过程绑定
    with tf.control_dependencies([train_op, ema_op]):
        # 将它们合并为一个训练节点
        train_step = tf.no_op(name='train')

    # 实例化一个 tf.train.Saver,之后可以用saver保存模型或读取模型
    saver = tf.train.Saver()

    # 取BATCH_SIZE数量的训练数据
    img_batch, label_batch = generateds.get_tfrecord(BATCH_SIZE, isTrain=True)

    with tf.Session() as sess:
        # 初始化所有变量
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        # 断点续训,#如果地址下存在断点,就把断点恢复
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        # 创建线程管理器
        coord = tf.train.Coordinator()
        # 启动队列填充,读入文件
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for i in range(STEPS):
            # 运行img_batch和label_batch,获得下一批训练数据
            xs, ys = sess.run([img_batch, label_batch])
            # 将xs转化为合适的shape准备喂入网络
            reshaped_xs = np.reshape(xs, (
                BATCH_SIZE,
                forward.IMAGE_SIZE,
                forward.IMAGE_SIZE,
                forward.NUM_CHANNELS))
            # 运行之前定义的计算节点,获得输出
            _, loss_value, step, acc = sess.run([train_step, loss, global_step, accuracy],
                                                feed_dict={x: reshaped_xs, y_: ys})
            # 每10轮保存一次model
            if i % 10 == 0:
                print("After %d training step(s), loss on training batch is %g. accuracy is  %g" % (
                    step, loss_value, acc))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
        # 协调器coord发出所有线程终止信号
        coord.request_stop()
        # 把开启的线程加入主线程,等待threads结束
        coord.join(threads)
Пример #14
0
def backward():
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE, forward.IMAGE_SIZE, forward.IMAGE_SIZE,
        forward.NUM_CHANNELS
    ])
    y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
    y = forward.forward(x, True, REGULARIZER)
    global_step = tf.Variable(0, trainable=False)
    # 交叉熵
    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,
                                                        labels=tf.argmax(
                                                            y_, 1))
    cem = tf.reduce_mean(ce)
    # 正则
    loss = cem + tf.add_n(tf.get_collection('losses'))
    # 指数下降学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               NUM_EXAMPLES / BATCH_SIZE,
                                               LEARNING_RATE_DECAY,
                                               staircase=True)
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(
        loss, global_step=global_step)
    # 滑动平均值
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name='train')
    # 实例化保存模型
    saver = tf.train.Saver()
    img_batch, label_batch = generateds.get_tfrecord(BATCH_SIZE, isTrain=True)
    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        for i in range(STEPS):
            # 通过sess.run获得样本和标签
            xs, ys = sess.run([img_batch, label_batch])
            reshaped_xs = np.reshape(
                xs, (BATCH_SIZE, forward.IMAGE_SIZE, forward.IMAGE_SIZE,
                     forward.NUM_CHANNELS))
            for j in range(50):
                # 对每组样本训练50次
                _, loss_value, step = sess.run([train_op, loss, global_step],
                                               feed_dict={
                                                   x: reshaped_xs,
                                                   y_: ys
                                               })
            if i % 10 == 0:
                print("Now Step{}".format(i))  # 提示当前步数
            if i % 1000 == 0:  # 每一千步验证一次准确率,实际上训练了1000*50=50000次
                loss_value = sess.run(loss, feed_dict={x: reshaped_xs, y_: ys})
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
                print(
                    "After {} training step(s), loss on training batch is {}.".
                    format(i, loss_value))
                test.test()
        coord.request_stop()
        coord.join(threads)
Пример #15
0
def test():
    with tf.Graph().as_default() as g:  #复现之前定义的计算图,并执行以下操作
        x = tf.placeholder(
            tf.float32,
            [  #定义占位符x,以之代替输入图片
                BATCH_SIZE, forward.IMAGE_HEIGHT, forward.IMAGE_WIDTH,
                forward.NUM_CHANNELS
            ])

        y_ = tf.placeholder(tf.float32, [None, 10])  #定义占位符y_,用来接数据集中的标签值
        y = forward.forward(x, False, None)  #y是神经元的计算结果
        y = tf.reshape(y, [-1, 10])
        predict_ans = tf.argmax(y, 1)  #batch*18行数据

        ema = tf.train.ExponentialMovingAverage(
            backward.MOVING_AVERAGE_DECAY
        )  # 实现滑动平均模型,参数MOVING_AVERAGE_DECAY用于控制模型更新的速度,训练过程中会对每一个变量维护一个影子变量
        ema_restore = ema.variables_to_restore(
        )  # variable_to_restore()返回dict ({ema_variables : variables}),字典中保存变量的影子值和现值
        saver = tf.train.Saver(
            ema_restore)  # 创建可还原滑动平均值的对象saver,测试时使用w的影子值,有更好的适配性

        correct_prediction = tf.equal(
            tf.argmax(y, 1), tf.argmax(y_, 1)
        )  # 比较预测值和标准输出得到correct_prediction,if tf.argmax(y, 1) equals to tf.argmax(y_, 1),correct_prediction will be set True
        accuracy = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32)
        )  # 将correct_prediction的值从boolean型转为tf.float32型,求均值,得出预测准确率

        img_batch, label_batch = generateds.get_tfrecord(
            TEST_NUM, isTrain=True)  #2 一次批获取 TEST_NUM 张图片和标签

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(
                    backward.MODEL_SAVE_PATH)  # 从指定路径中,加载训练好的模型
                if ckpt and ckpt.model_checkpoint_path:  # 若已有ckpt模型则执行以下恢复操作
                    saver.restore(sess,
                                  ckpt.model_checkpoint_path)  # 恢复会话到当前的神经网络

                    global_step = ckpt.model_checkpoint_path.split(
                        '/'
                    )[-1].split(
                        '-'
                    )[-1]  # 从ckpt.model_checkpoint_path中,通过字符 "/" 和 "-"提取出最后一个整数(保存的轮数),恢复轮数,

                    coord = tf.train.Coordinator()  #3开启线程协调器
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)  #4
                    xs, ys = sess.run([img_batch, label_batch
                                       ])  #5# 在 sess.run 中执行图片和标签的批获取

                    reshaped_xs = np.reshape(
                        xs,
                        (  #导入部分,更改参数的形状
                            BATCH_SIZE, forward.IMAGE_HEIGHT,
                            forward.IMAGE_WIDTH, forward.NUM_CHANNELS))

                    reshaped_ys = np.reshape(ys, (-1, 10))
                    #                    print y_,reshaped_ys
                    #                    print x, reshaped_xs
                    accuracy_score, predict_value = sess.run(
                        [accuracy, predict_ans],  # 计算准确率
                        feed_dict={
                            x: reshaped_xs,
                            y_: reshaped_ys
                        })
                    #                    print "predict_value:",predict_value
                    print("after %s training step(s), test accuracy = %g" %
                          (global_step, accuracy_score))

                    coord.request_stop()  #6
                    coord.join(threads)  #7

                else:  #can not get checkpoint file ,print error infomation
                    print("No checkpoint file found")
                    return
            time.sleep(
                INTERVAL_TIME)  # 设置等待时间,等backward生成新的checkpoint文件,再循环执行test函数
Пример #16
0
def backward():  #执行反向传播,训练参数w
    x = tf.placeholder(
        tf.float32,
        [  #定义占位符x,以之代替输入图片
            BATCH_SIZE, forward.IMAGE_HEIGHT, forward.IMAGE_WIDTH,
            forward.NUM_CHANNELS
        ])
    y_ = tf.placeholder(tf.float32, [None, 10])  #定义占位符y_,作为传入标签
    #True表示训练阶段,在进行forward时,if语句成立,进行dropout
    y = forward.forward(x, True, REGULARIZER)  #y是神经元的计算结果
    y = tf.reshape(y, [-1, 10])
    #    print "y:",y
    global_step = tf.Variable(0,
                              trainable=False)  #定义变量global_step,并把它的属性设置为不可训练
    #    y = tf.reshape(y,[-1,18,10])                                        #将神经网络的计算结果y,转换为batch*18行*10列的tensor
    #    for i in range(18):

    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=
        y,  #用交叉熵ce(cross entropy),使用tf.nn.sparse_softmax_cross_entropy_with_logits函数计算交叉熵,
        labels=tf.argmax(y_,
                         1))  #其第一个参数是神经网络不包括softmax层的前向传播结果,第二个参数是训练数据的正确答案
    # tf.argmax(vector, 1):返回的是vector中的最大值的索引号
    cem = tf.reduce_mean(ce)  #计算在当前batch中所有样例的交叉熵平均值
    loss = cem + tf.add_n(
        tf.get_collection('losses')
    )  # 总损失等于交叉熵损失 + 正则化损失的和,losses保存有正则化的计算结果(forward中getweight()对参数进行了正则化计算)
    learning_rate = tf.train.exponential_decay(  # 设置指数衰减的学习率
        LEARNING_RATE_BASE,  # 基础学习率,随着迭代的进行,更新变量时使用的学习率在此基础上递减 	
        global_step,  # 当前迭代轮数
        train_num_examples / BATCH_SIZE,  # 过完所有训练数据所需迭代次数 	
        LEARNING_RATE_DECAY,  # 指数学习衰减率
        staircase=True)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)  # 使用梯度下降优化损失函数,损失函数包含了交叉熵和正则化损失
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,
                                            global_step)  # 初始化滑动平均类
    ema_op = ema.apply(tf.trainable_variables())  # 对所有表示神经网络参数的变量进行滑动平均
    #bind operation train_step & ema_op together to realize two operations at time
    with tf.control_dependencies(
        [train_step,
         ema_op]):  # 使用tf.control_dependencies机制一次完成多个操作。在此神经网络模型中,每过一遍数据既通过
        train_op = tf.no_op(name='train')  # 反向传播更新参数,又更新了每一个参数的滑动平均值

    saver = tf.train.Saver()  # 声明tf.train.Saver类用于保存模型

    img_batch, lable_batch = generateds.get_tfrecord(
        BATCH_SIZE, isTrain=True)  #3一次批获取 batch_size 张图片和标签

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())  # 初始化所有变量
        # 恢复模块
        ckpt = tf.train.get_checkpoint_state("./model")  # 从"./model"中加载训练好的模型
        if ckpt and ckpt.model_checkpoint_path:  # 若ckpt和保存的模型在指定路径中存在,则将保存的神经网络模型加载到当前会话中
            print ckpt.model_checkpoint_path
            saver.restore(sess, ckpt.model_checkpoint_path)

        coord = tf.train.Coordinator()  #4开启线程协调器
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)  #5

        for i in range(STEPS):  # 迭代地训练神经网络
            xs, ys = sess.run([img_batch,
                               lable_batch])  #6将一个batch的训练数数据和对应标签分别赋给xs,ys

            reshaped_xs = np.reshape(
                xs,
                (  #导入部分,更改参数的形状
                    BATCH_SIZE, forward.IMAGE_HEIGHT, forward.IMAGE_WIDTH,
                    forward.NUM_CHANNELS))
            reshaped_ys = np.reshape(ys, (-1, 10))

            _, loss_value, step = sess.run(
                [
                    train_op, loss, global_step
                ],  # 计算损失函数结果,计算节点train_op, loss,global_step并返回结果至 _, loss_value, step ,
                feed_dict={
                    x: reshaped_xs,
                    y_: reshaped_ys
                }
            )  #'_' means an anonymous variable which will not in use any more
            if i % 1000 == 0:  # 每1000轮打印损失函数信息,并保存当前的模型
                print(
                    "after %d training step(s), loss on training batch is %g."
                    % (step, loss_value))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step
                           )  # 保存当前模型,globle_step参数可以使每个被保存模型的文件名末尾都加上训练的轮数
    # 文件的名字是MODEL_SAVE_PATH + MODEL_NAME + global_step
        coord.request_stop()  #7关闭线程协调器
        coord.join(threads)  #8