def restore_model(feedData): #复现计算图 with tf.Graph().as_default() as g: #给x占位 x = tf.placeholder(tf.float32, [None, face_forward.INPUT_NODE]) y = face_forward.forward(x, None) preValue = tf.argmax(y, 1) # variable_average = tf.train.ExponentialMovingAverage( face_backward.MOVING_AVERAGE_DECAY) variable_restore = variable_average.variables_to_restore() saver = tf.train.Saver(variable_restore) with tf.Session() as sess: #加载CKPT ckpt = tf.train.get_checkpoint_state(face_backward.MODEL_SAVE_PATH) #判断是否有模型 if ckpt and ckpt.model_checkpoint_path: #恢复模型到当前会话 saver.restore(sess, ckpt.model_checkpoint_path) #执行预测操作 preValue = sess.run(preValue, feed_dict={x: feedData}) #观察 preArr = sess.run(y, feed_dict={x: feedData}) # preArr = preArr.reshape([1,10]) print(preArr) return preValue else: print('No checkpoint file found') return -1
def backward(): x = tf.placeholder(tf.float32, [None, face_forward.INPUT_NODE]) y_ = tf.placeholder(tf.float32, [None, face_forward.OUTPUT_NODE]) y = face_forward.forward(x, REGULARIZER) global_step = tf.Variable(0, trainable=False) ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax( y_, 1)) cem = tf.reduce_mean(ce) loss = cem + tf.add_n(tf.get_collection("losses")) learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, train_num_examples / BATCH_SIZE, LEARNING_RATE_DECAY, staircase=True) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize( loss, global_step=global_step) ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) ema_op = ema.apply(tf.trainable_variables()) with tf.control_dependencies([train_step, ema_op]): train_op = tf.no_op(name="train") saver = tf.train.Saver() img_batch, label_batch = face_generateds.get_tfRecode(BATCH_SIZE, isTrain=True) with tf.Session() as sess: init_op = tf.initialize_all_variables() sess.run(init_op) #duan dian ji xu ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) #thread start coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) for i in range(STEPS): xs, ys = sess.run([img_batch, label_batch]) _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={ x: xs, y_: ys }) if i % 100 == 0: print("After %d training steps,loss on training batch is % g" % (step, loss_value)) saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) coord.request_stop() coord.join(threads)
def test(face_data): #复现计算图 with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32, [None, face_forward.INPUT_NODE]) y_ = tf.placeholder(tf.float32, [None, face_forward.OUTPUT_NODE]) y = face_forward.forward(x, None) #恢复过后,数据会得到各自的滑动平均值 ema = tf.train.ExponentialMovingAverage( face_backward.MOVING_AVERAGE_DECAY) ema_restore = ema.variables_to_restore() saver = tf.train.Saver(ema_restore) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #拿数据 feature, label = face_generateds.get_tfrecord(TEST_NUM, isTrain=False) while True: with tf.Session() as sess: #滑动平均值赋值给各个参数 ckpt = tf.train.get_checkpoint_state( face_backward.MODEL_SAVE_PATH) #判断是否有模型 if ckpt and ckpt.model_checkpoint_path: #恢复模型到当前会话 saver.restore(sess, ckpt.model_checkpoint_path) #恢复global_step值 global_step = ckpt.model_checkpoint_path.split( '/')[-1].split('-')[-1] #线程调节器 coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) xs, ys = sess.run([feature, label]) #执行准确率计算 accuracy_score = sess.run(accuracy, feed_dict={ x: xs, y_: ys }) #打印准确率 print("after %s training steps , test accuracy = %g" % (global_step, accuracy_score)) else: print('No checkpoint file found') return time.sleep(TEST_INTERVAL_SECS)
def restore_model(testPicArr): with tf.Graph().as_default() as tg: x = tf.placeholder(tf.float32, [None, face_forward.INPUT_NODE]) y = face_forward.forward(x, None) preValue = tf.argmax(y, 1) variable_averages = tf.train.ExponentialMovingAverage( face_backword.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(face_backword.MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) preValue = sess.run(preValue, feed_dict={x: testPicArr}) return preValue else: print " No checkPoint file found" return -1
def test(): with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32, [None, face_forward.INPUT_NODE]) y_ = tf.placeholder(tf.float32, [None, face_forward.OUTPUT_NODE]) y = face_forward.forward(x, None) ema = tf.train.ExponentialMovingAverage( face_backword.MOVING_AVERAGE_DECAY) ema_restore = ema.variables_to_restore() saver = tf.train.Saver(ema_restore) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) img_batch, label_batch = face_generateds.get_tfRecode(TEST_NUM, isTrain=False) while True: with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state( face_backword.MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split( '/')[-1].split('-')[-1] coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) xs, ys = sess.run([img_batch, label_batch]) accuracy_score = sess.run(accuracy, feed_dict={ x: xs, y_: ys }) print("After %s traing steps (s),test accuracy = % g" % (global_step, accuracy_score)) else: print("Without checkpoint") time.sleep(TIME_INTERVAL_SECS)
def backward(face_data): x = tf.placeholder(tf.float32,[None,face_forward.INPUT_NODE]) y_ = tf.placeholder(tf.float32,[None,face_forward.OUTPUT_NODE]) #调用前向传播的程序,输出y y = face_forward.forward(x,REGULARIZER) #轮数计数器 global_step = tf.Variable(0,trainable = False) #交叉熵 ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = y,labels = tf.argmax(y_,1)) cem = tf.reduce_mean(ce) #定义损失函数 loss = cem + tf.add_n(tf.get_collection('losses')) learning_rate = tf.train.exponential_decay( LEARNING_RATE_BASE, global_step, train_num_examples/BATCH_SIZE, LEARNING_RATE_DECAY, staircase = True) #定义训练过程:这个是梯度下降优化程序 train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step = global_step) #定义滑动平均 ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step) #每一次运行这一句,所有待优化参数求滑动平均 ema_op = ema.apply(tf.trainable_variables()) with tf.control_dependencies([train_step,ema_op]): train_op = tf.no_op(name = 'train') saver = tf.train.Saver() #自己写的特征和标签 feature,label = face_generateds.get_tfrecord(BATCH_SIZE,isTrain = True) with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) #加载ckpt模型,实现断点续训 #给所有的w和b赋值保存再ckpt中的值,实现断点续训 ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess,ckpt.model_checkpoint_path) #加载完成 #开启线程调节器 coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess = sess,coord = coord) for i in range(STEPS): #每次读入BATCH_SIZE组数据 xs,ys = sess.run([feature,label]) #喂入神经网络,执行训练过程 _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys}) if i % 1000 == 0: print("after %d training steps,loss on training batch is %g" %(step,loss_value)) i = i % 1000 saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step = global_step) pass #关闭线程调节器 coord.request_stop() coord.join(threads) pass