def train(lr, batch_size, epoches, keep_prob_value): # 下载图片 path = './data/' x_train, y_train, x_val, y_val = read_image.read_img(path) x = tf.placeholder(tf.float32, [None, w * h * c], name="images") y_ = tf.placeholder(tf.float32, [None, n_class], name="labels") keep_prob = tf.placeholder(tf.float32, name="keep_prob") y = model.model(x, keep_prob) # Cost function cross_entropy = tf.reduce_mean( -tf.reduce_sum(y_ * tf.log(y + 1e-10), reduction_indices=[1]), name="corss_entropy") train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy") saver = tf.train.Saver() # Start training with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(epoches): iters = np.int32(len(x_train) / batch_size) + 1 for j in range(iters): if j == iters - 1: batch0 = x_train[j * batch_size:] batch1 = y_train[j * batch_size:] else: batch0 = x_train[j * batch_size:(j + 1) * batch_size] batch1 = y_train[j * batch_size:(j + 1) * batch_size] sess.run(train_step, feed_dict={ x: batch0, y_: batch1, keep_prob: keep_prob_value }) if i % 5 == 0: train_accuracy = sess.run(accuracy, feed_dict={ x: batch0, y_: batch1, keep_prob: keep_prob_value }) print("step %d, training accuracy %.6f" % (i, train_accuracy)) # Save model saver_path = saver.save(sess, "./model/model.ckpt") print("Model saved in file:", saver_path) test_accuracy = sess.run(accuracy, feed_dict={ x: x_val, y_: y_val, keep_prob: 1.0 }) print("test accuracy %g" % test_accuracy)
def train(lr, batch_size, epoches, keep_prob_value): # 读入图片,并进行预处理 path = 'D:\\picbase\\test\\capture_police\\' x_train, y_train, x_val, y_val = read_image.read_img(path) #x_train = x_train/255.0 #x_val = x_val/255.0 x = tf.placeholder(tf.float32, [None, w * h], name="images") y_ = tf.placeholder(tf.float32, [None, 5], name="labels") keep_prob = tf.placeholder(tf.float32, name="keep_prob") y = model.model(x, keep_prob) # Cost function #cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y), reduction_indices=[1]),name="corss_entropy") cross_entropy = tf.reduce_mean(-tf.reduce_sum( y_ * tf.log(tf.clip_by_value(y, 1e-25, 1.0)), reduction_indices=[1]), name="corss_entropy") train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy") saver = tf.train.Saver() # Start training with tf.Session() as sess: sess.run(tf.global_variables_initializer()) while True: for i in range(epoches + 1): #每一轮迭代 iters = np.int32(len(x_train) / batch_size) + 1 for j in range(iters): if j == iters - 1: batch0 = x_train[j * batch_size:] batch1 = y_train[j * batch_size:] else: batch0 = x_train[j * batch_size:(j + 1) * batch_size] batch1 = y_train[j * batch_size:(j + 1) * batch_size] if j == 1: train_accuracy, cross_ent = sess.run( [accuracy, cross_entropy], feed_dict={ x: batch0, y_: batch1, keep_prob: keep_prob_value }) print( "step %d, training accuracy %g, corss_entropy %g" % (i, train_accuracy, cross_ent)) # Save model saver_path = saver.save( sess, 'D:\\picbase\\test\\model_police\\model.ckpt') print("Model saved in file:", saver_path) test_accuracy = sess.run(accuracy, feed_dict={ x: x_val, y_: y_val, keep_prob: 1.0 }) print("test accuracy %g" % test_accuracy) sess.run(train_step, feed_dict={ x: batch0, y_: batch1, keep_prob: keep_prob_value }) test_accuracy = sess.run(accuracy, feed_dict={ x: x_val, y_: y_val, keep_prob: 1.0 }) print("test accuracy %g" % test_accuracy) ask_for_continue = input("Would like to continue training?y/n\n") if ask_for_continue == 'n': break
def train(lr, batch_size, epoches, keep_prob_value): # 读入图片 path = 'images/' x_train, y_train, x_val, y_val = read_image.read_img(path) x_train = x_train/255.0 # 图片预处理 x_val = x_val/255.0 x = tf.placeholder(tf.float32, [None, w*h], name="images") y_ = tf.placeholder(tf.float32, [None, 4], name="labels") keep_prob = tf.placeholder(tf.float32,name="keep_prob") y = model.model(x, keep_prob) # Cost function cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y), reduction_indices=[1]),name="corss_entropy") train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy") saver = tf.train.Saver() #draw data x_lable=[] y1_lable=[] y2_lable=[] # Start training with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(epoches+1): iters = np.int32(len(x_train)/batch_size)+1 for j in range(iters): if j==iters-1: batch0 = x_train[j*batch_size:] batch1 = y_train[j*batch_size:] #print(x_train,y_train) else: batch0 = x_train[j*batch_size:(j+1)*batch_size] batch1 = y_train[j*batch_size:(j+1)*batch_size] if i%25==0 and j==1: train_accuracy, cross_ent = sess.run([accuracy, cross_entropy], feed_dict={x:batch0, y_:batch1, keep_prob: keep_prob_value}) print("step %d, training accuracy %g, cross_entropy %g" % (i, train_accuracy, cross_ent)) x_lable.append(i) y1_lable.append(train_accuracy) y2_lable.append(cross_ent) # Save model saver_path = saver.save(sess,"model/model.ckpt") print("Model saved in file:", saver_path) test_accuracy = sess.run(accuracy, feed_dict={x:x_val, y_:y_val, keep_prob: 1.0}) print("test accuracy %g" % test_accuracy) sess.run(train_step, feed_dict={x:batch0, y_:batch1, keep_prob:keep_prob_value}) test_accuracy = sess.run(accuracy, feed_dict={x:x_val, y_:y_val, keep_prob: 1.0}) #print("test accuracy %g" % test_accuracy) fig = plt.figure(figsize=(20,8),dpi=100) plt.plot(x_lable,y1_lable,'s-',markersize=4,color = 'r',label="train_accuracy",linewidth=2)#s-:方形 plt.plot(x_lable,y2_lable,'o-',markersize=4,color = 'g',label="cross_ent",linewidth=2)#o-:圆形 plt.xlabel("thr number of training")#横坐标名字 plt.ylabel("percentage")#纵坐标名字 plt.legend(loc = "percentage test")#图例 plt.grid(color='b', ls = '-.', lw = 0.25) plt.show() plt.savefig('./percentage.jpg')