def gen_captcha_text_and_image(out_path='E:/data/captcha/images/'): image = ImageCaptcha() #获得随机生成的验证码 captcha_text = random_captcha_text() #把验证码列表转为字符串 captcha_text = ''.join(captcha_text) #生成验证码 captcha = image.generate(captcha_text) mkdir(out_path) image.write(captcha_text, out_path + captcha_text + '.jpg') # 写到文件
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction)) with tf.name_scope('train'): # 使用梯度下降法 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss) # 初始化变量 init = tf.global_variables_initializer() with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): # 结果存放在一个布尔型列表中 correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1)) # argmax返回一维张量中最大的值所在的位置 with tf.name_scope('accuracy'): # 求准确率 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) with tf.Session() as sess: sess.run(init) mkdir('E:/log/tensorboard') writer = tf.summary.FileWriter('E:/log/tensorboard', sess.graph) for epoch in range(10): for batch in range(n_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys}) acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}) print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
# coding: utf-8 import os import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from DeepLearning.utils import mkdir log_file = r"E:\log\tensorflow_tensorboard" mkdir(log_file) MNIST_data = r"E:\data\MNIST_data_sets\MNIST_data" # 载入数据集 mnist = input_data.read_data_sets("MNIST_data", one_hot=True) # 每个批次的大小 batch_size = 100 # 计算一共有多少个批次 n_batch = mnist.train.num_examples // batch_size with tf.name_scope("input"): # 定义两个placeholder x = tf.placeholder(tf.float32, [None, 784], name="input_x") y = tf.placeholder(tf.float32, [None, 10], name="input_y") with tf.name_scope("layer"): with tf.name_scope("weights"): with tf.name_scope("w"): # 创建一个简单的神经网络 W = tf.Variable(tf.zeros([784, 10]), name="W") with tf.name_scope("biases"): b = tf.Variable(tf.zeros([10])) with tf.name_scope("wx_plus_b"): wx_plus_b = tf.matmul(x, W) + b
with tf.name_scope('optimization'): # opt = tf.train.GradientDescentOptimizer(learning_rate=.2).minimize(loss) opt = tf.train.AdamOptimizer(learning_rate=.4).minimize(loss) # opt = tf.train.RMSPropOptimizer(learning_rate=.2).minimize(loss)**** correct_prediction = tf.equal(tf.argmax(out,1), tf.argmax(Y,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) init = tf.global_variables_initializer() logdir = "E:/data/tensorboard/log" with tf.Session() as sess: sess.run(init) mkdir(logdir) writer = tf.summary.FileWriter(logdir,sess.graph) avg_cost = 0 for Epoch in range(1000): for i in range(n_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) opt_, c_ = sess.run([opt, loss], feed_dict={x: batch_xs, Y: batch_ys}) # avg_cost += c_ / n_batch # if (i + 1) % display_step == 0: # print("in-Epoch: {0} cost={1}".format(Epoch + 1, avg_cost)) # print("--------------------------->>>") acc = sess.run(accuracy, feed_dict={x: mnist.test.images, Y: mnist.test.labels}) print("Epoch: {0} Acc={1}%".format(Epoch + 1, acc*100)) print("finished!")