def __init__(self, ckpt_path="./model/model-30000"): print(ckpt_path) self.x = tf.placeholder(tf.float32, shape=[ None, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNEL ], name='x-input') self.y = mnist_inference.inference(self.x, False, None) saver = tf.train.Saver() self.sess = tf.Session() saver.restore(self.sess, ckpt_path)
def on_message(self, img): global users global msg ''' canvas return base64 data ''' ''' base64 convert to png ''' img = img[len("data:image/png;base64,") - 1:] img = bytes(img, encoding="utf-8") img += b'=' img_data = base64.b64decode(img) # save picture to look up for modification with open('data.png', 'wb') as f: f.write(img_data) f.close() # get handwriting image img_tmp = cv2.imread("data.png", 0) # reverse colour to enhance accuracy rows = img_tmp.shape[0] cols = img_tmp.shape[1] for i in range(rows): for j in range(cols): img_tmp[i][j] = 255 - img_tmp[i][j] img_tmp = cv2.resize(img_tmp, (28, 28), interpolation=cv2.INTER_CUBIC) self.u_img = np.reshape(img_tmp, (28, 28, 1)).astype( np.float32) # reshape to feed NN with tf.Graph().as_default() as g: #定义用于输入图片数据的张量占位符,输入样本的尺寸 x = tf.placeholder(tf.float32, shape=[ None, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNEL ], name='x-input') y = mnist_inference.inference(x, None, None) variable_averages = tf.train.ExponentialMovingAverage( mnist_train.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: saver = tf.train.import_meta_graph( 'model/model-9999001.meta') # restore model saver.restore(sess, tf.train.latest_checkpoint('model')) result = sess.run(y, feed_dict={x: [self.u_img]}) print(tf.argmax(result, 1).eval()[0]) msg = str(tf.argmax(result, 1).eval()[0]) self.write_message(msg)
def evaluate(mnist): with tf.Graph().as_default(): x = tf.placeholder(tf.float32, shape=[ None, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNEL ], name='x-input') y_ = tf.placeholder(tf.float32, shape=[None, mnist_inference.OUTPUT_NODE], name='y-input') xs, ys = mnist.test.images, mnist.test.labels reshape_xs = np.reshape( xs, (-1, mnist_inference.IMAGE_SIZE, mnist_inference.IMAGE_SIZE, mnist_inference.NUM_CHANNEL)) val_feed = {x: reshape_xs, y_: mnist.test.labels} y = mnist_inference.inference(x, False, None) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_average = tf.train.ExponentialMovingAverage( mnist_train.MOVING_AVERAGE_DECAY) val_to_restore = variable_average.variables_to_restore() saver = tf.train.Saver(val_to_restore) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] accuracy_score = sess.run(accuracy, feed_dict=val_feed) print('After %s train ,the accuracy is %g' % (global_step, accuracy_score)) else: print('No Checkpoint file find')
def train(mnist): x = tf.placeholder(tf.float32, [None, mnist_cnn.INPUT_NODE], name="x-input") y_ = tf.placeholder(tf.float32, [None, mnist_cnn.OUTPUT_NODE], name="y-input") regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE) y = mnist_cnn.inference(x, True, regularizer) global_step = tf.Variable(0, trainable=False) variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=y, labels=tf.argmax(y_, 1)) cross_entropy_mean = tf.reduce_mean(cross_entropy) loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) learning_rate = tf.train.exponential_decay( LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize( loss, global_step=global_step) with tf.control_dependenices([train_step, variables_averages_op]): train_op = tf.no_op(name='train') with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAINING_STEPS): xs, ys = mnist.train.next_batch(BATCH_SIZE) _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={ xs: xs, y_: ys }) if i % 1000 == 0: print( "After %d training steps, loss on training batch is %g." % (step, loss_value))
def train(mnist): x = tf.placeholder(tf.float32, shape=[None, mnist_interence.IMAGE_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.NUM_CHANNEL], name='x-input') y_ = tf.placeholder(tf.float32, shape=[None, mnist_interence.OUTPUT_NODE], name='y-input') regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_TATE) y = mnist_interence.inference(x, True, regularizer) global_step = tf.Variable(0, trainable=False) variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variable_average_ops = variable_average.apply(tf.trainable_variables()) cross_entroy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) cross_entroy_mean = tf.reduce_mean(cross_entroy) loss = cross_entroy_mean + tf.add_n(tf.get_collection('loss')) learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY) train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss, global_step=global_step) train_op = tf.group(train_step, variable_average_ops) saver = tf.train.Saver(max_to_keep=10) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] for i in range(eval(global_step), TRAIN_STEP): xs, ys = mnist.train.next_batch(BATCH_SIZE) reshape_xs = np.reshape(xs, (BATCH_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.NUM_CHANNEL)) _, loss_value, step, learn_rate = sess.run([train_op, loss, global_step, learning_rate], feed_dict={x: reshape_xs, y_: ys}) if (i + 1) % 3000 == 0: print('After %d step, loss on train is %g,and learn rate is %g' % (step, loss_value, learn_rate)) saver.save(sess, os.path.join(MODEL_PATH, MODEL_NAME), global_step=global_step) else: print('No Checkpoint file find')
def train(mnist): # 定义用于输入图片数据的张量占位符,输入样本的尺寸 x = tf.placeholder(tf.float32, shape=[ None, mnist_interence.IMAGE_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.NUM_CHANNEL ], name='x-input') # 定义用于输入图片标签数据的张量占位符,输入样本的尺寸 y_ = tf.placeholder(tf.float32, shape=[None, mnist_interence.OUTPUT_NODE], name='y-input') # 定义采用方差的正则化函数 regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_TATE) # 通过interence函数获得计算结果张量 y = mnist_interence.inference(x, True, regularizer) global_step = tf.Variable(0, trainable=False) # 定义平均滑动 variable_average = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) # 对所有可以训练的变量采用平均滑动 variable_average_ops = variable_average.apply(tf.trainable_variables()) # 对预测数据y和实际数据y_计算他们概率的交叉值 cross_entroy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=y, labels=tf.argmax(y_, 1)) # 对各对交叉值求平均,其实是计算y和y_两个随机变量概率分布的交叉熵,交叉熵值越小则表明两种概率分布越接近 cross_entroy_mean = tf.reduce_mean(cross_entroy) # 采用交叉熵和正则化参数作为最后的损失函数 loss = cross_entroy_mean + tf.add_n(tf.get_collection('loss')) # 设置学习率递减方式 learning_rate = tf.train.exponential_decay( LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY) # 采用梯度下降的方式计算损失函数的最小值 train_step = tf.train.GradientDescentOptimizer(0.01).minimize( loss, global_step=global_step) # 定义学习操作:采用梯度下降求一次模型训练参数,并对求得的模型参数计算滑动平均值 train_op = tf.group(train_step, variable_average_ops) saver = tf.train.Saver(max_to_keep=10) with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAIN_STEP): # 由于神经网络的输入大小为[BATCH_SIZE,IMAGE_SIZE,IMAGE_SIZE,CHANNEL],因此需要reshape输入。 xs, ys = mnist.train.next_batch(BATCH_SIZE) reshape_xs = np.reshape( xs, (BATCH_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.IMAGE_SIZE, mnist_interence.NUM_CHANNEL)) # 通过计算图,计算模型损失张量和学习张量的当前值 _, loss_value, step, learn_rate = sess.run( [train_op, loss, global_step, learning_rate], feed_dict={ x: reshape_xs, y_: ys }) if (i + 1) % 3000 == 0: print( 'After %d step, loss on train is %g,and learn rate is %g' % (step, loss_value, learn_rate)) saver.save(sess, os.path.join(MODEL_PATH, MODEL_NAME), global_step=global_step)