def test(signal_re, labels_re): # 创建一个默认图,在该图中执行以下操作(多数操作和train中一样) with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32, [ test_num_examples, deap_forward.IMAGE_SIZE_X, deap_forward.IMAGE_SIZE_Y, deap_forward.NUM_CHANNELS ]) y_ = tf.placeholder(tf.float32, [None, deap_forward.OUTPUT_NODE]) # ?? y = deap_forward.forward(x, False, None) # ?? ema = tf.train.ExponentialMovingAverage( deap_backward.MOVING_AVERAGE_DECAY) ema_restore = ema.variables_to_restore() saver = tf.train.Saver(ema_restore) # correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # 判断预测值和实际值是否相同 # correct_prediction = (abs(y-y_) < 0.5) # 预测结果与真实结果相差是否小于0.5 # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 求平均值得到准确率 while True: with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state( deap_backward.MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) # 根据读入的模型名字切分出该模型是属于迭代了多少次保存的 global_step = ckpt.model_checkpoint_path.split( '/')[-1].split('-')[-1] reshaped_x = np.reshape( signal_re, (test_num_examples, deap_forward.IMAGE_SIZE_X, deap_forward.IMAGE_SIZE_Y, deap_forward.NUM_CHANNELS)) # 计算出测试集上准确率 pred_labels = sess.run(y, feed_dict={x: reshaped_x}) y_ = labels_re pred_correct = 0 for i in range(test_num_examples): if pred_labels[i] >= 0.5: pred_labels[i] = 1 else: pred_labels[i] = 0 if pred_labels[i] == y_[i]: pred_correct += 1 Recognition_rate = pred_correct / test_num_examples # pred_labels = np.asarray(reshaped_x) # print(pred_labels) # 测试用 print("----------我是可爱的分割线----------") # debug用 # print(labels_re) # 测试用 print("After %s training step(s), test accuracy = %g" % (global_step, Recognition_rate)) else: print('No checkpoint file found') return time.sleep(TEST_INTERVAL_SECS) # 每隔TEST_INTERVAL_SECS秒寻找一次是否有最新模型
def backward(signal_re, labels_re): # x,y_是定义的占位符,需要指定参数的类型,维度(要和网络的输入与输出维度一致),类似于函数的形参,运行时必须输入的参数 x = tf.placeholder(tf.float32, [ BATCH_SIZE, deap_forward.IMAGE_SIZE_X, deap_forward.IMAGE_SIZE_Y, deap_forward.NUM_CHANNELS ]) y_ = tf.placeholder(tf.float32, [None, deap_forward.OUTPUT_NODE]) # 调用前向传播网络得到维度为8的tensor y = deap_forward.forward(x, True, REGULARIZER) # 声明一个全局计数器,并输出化为0 并说明是不参与训练过程的 global_step = tf.Variable(0, trainable=False) # 先是对网络最后一层的输出y做softmax,通常是求取输出属于某一类的概率,其实就是num_classes的大小的向量 # 再将此向量和实际标签值做交叉熵,需要说明的是该函数的返回是一个向量 # ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) # 不会用 暂放 # ce = tf.square(y - y_) # 有问题 # 再对得到的向量求均值就得到loss cem = tf.reduce_mean(tf.square(y - y_)) loss = cem + tf.add_n(tf.get_collection('losses')) # 添加正则化中的losses # 实现指数级的减小学习率,可以让模型在训练的前期快速接近较优解,又可以保证模型在训练后期不会有太大波动 # 计算公式:decayed_learning_rate = learning_rate*decay_rate^(global_step/decay_steps) learning_rate = tf.train.exponential_decay( LEARNING_RATE_BASE, global_step, 40 / BATCH_SIZE, # 每几轮改变一次 LEARNING_RATE_DECAY, staircase=True) # True(global_step/decay_steps)取整,阶梯 False 平滑 # 传入学习率,构造一个实现梯度下降算法的优化器,再通过使用minimize更新存储要训练的变量列表来减少loss train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize( loss, global_step=global_step) # 实现滑动平均模型,参数MOVING_AVERAGE_DECAY用于控制模型更新速度.训练过程中会对每一个变量维护一个影子变量,这个影子变量的初始值 # 就是相应变量的初始值,每次变量更新时,影子变量就会随之更新 ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) ema_op = ema.apply(tf.trainable_variables()) with tf.control_dependencies([train_step, ema_op ]): # 将train_step和eam_op两个训练操作绑定到train_op上 train_op = tf.no_op(name='train') saver = tf.train.Saver() # 实例化一个保存和恢复变量的saver # 创建一个会话,并通过python中的上下文管理器来管理这个会话 with tf.Session() as sess: init_op = tf.global_variables_initializer() # 初始化计算图中的变量 sess.run(init_op) ckpt = tf.train.get_checkpoint_state( MODEL_SAVE_PATH) # 通过checkpoint文件定位到最新保存的模型 if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) # 加载最新的模型 for i in range(STEPS): start = (i * BATCH_SIZE) % BATCH_SIZE_ALL end = start + BATCH_SIZE xs = signal_re ys = labels_re[start:end] # 读取一个batch的数据 reshaped_xs = np.reshape( # 将输入数据xs转换成与网络输入相同形状的矩阵 xs[start:end], # 读取一个batch的数据 (BATCH_SIZE, deap_forward.IMAGE_SIZE_X, deap_forward.IMAGE_SIZE_Y, deap_forward.NUM_CHANNELS)) # 喂入训练图像&标签, 开始训练 # print(reshaped_xs) # debug # print(ys) # debug # print("-------------我是分割线-------------") # debug用 _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={ x: reshaped_xs, y_: ys }) # print("-------------我是分割线-------------") # debug用 if i % 10 == 0: # 每迭代10次打印loss信息,20000次保存最新的模型 print( "After %d training step(s), loss on training batch is %g." % (step, loss_value)) if i % 20000 == 0 and i != 0: saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) print("After %d training step(s), Model has been saved." % step)