def backward(data): x = tf.placeholder(tf.float32, (BATCH_SIZE, network.IMAGE_SIZE, network.IMAGE_SIZE, network.IMAGE_CHANNEL)) y = network.forward(x, False, KEEP_PROB, REGULARIZER) y_ = tf.placeholder(tf.float32, (None, network.OUTPUT_NODE)) loss1 = tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_) loss2 = tf.reduce_mean(loss1) accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)), tf.float32)) saver=tf.train.Saver(max_to_keep=1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) model = tf.train.latest_checkpoint('mnist_Complex/') saver.restore(sess, model) accu_sum=0 loss_sum=0 for i in range(0,EPOCH): xf, yf = data.next_batch(BATCH_SIZE) xf = mnist.mnist_fft(xf, BATCH_SIZE) accu, los = sess.run([accuracy, loss2], feed_dict={x: xf, y_: yf}) accu_sum+=accu loss_sum+=los accu_mean=accu_sum/EPOCH loss_mean=loss_sum/EPOCH print('loss on test: ',loss_mean ) print('accuracy on test: ',accu_mean)
def test(data_set): x = tf.placeholder(dtype=tf.float32, shape=(None, network.IMAGE_RAW, network.IMAGE_COL, network.CHANNEL)) y = network.forward(x, False, 0.1, 0.1) y_ = tf.placeholder(dtype=tf.float32, shape=(None, network.OUTPUT_SIZE)) losses = tf.losses.softmax_cross_entropy(onehot_labels=y_, logits=y) loss = tf.reduce_mean(losses) correct = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) saver = tf.train.Saver() with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) xs, ys = getData.get_all(data_set) xs = np.reshape( xs, (-1, network.IMAGE_RAW, network.IMAGE_COL, network.CHANNEL)) model = tf.train.latest_checkpoint('ckpt_person/') saver.restore(sess, model) lo, accu = sess.run([loss, accuracy], feed_dict={x: xs, y_: ys}) print("loss: ", lo) print("accuracy: ", accu)
def train(train_set): x = tf.placeholder(tf.float32, shape=(None, network.IMAGE_RAW, network.IMAGE_COL, network.CHANNEL)) y_ = tf.placeholder(tf.float32, shape=(None, network.OUTPUT_SIZE)) y = network.forward(x, True, REGULARIZER, KEEP_PROB) loss = tf.losses.softmax_cross_entropy(logits=y, onehot_labels=y_) losses = tf.reduce_mean(loss) losses += tf.add_n(tf.get_collection('losses')) correct = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1)) # argmax:1(按行算,得到列) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) train = tf.train.AdamOptimizer(LEARNING_RATE).minimize(losses) saver = tf.train.Saver(max_to_keep=1) with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) accu1 = 0 accu2 = 0 # 记录要绘图的变量 x_p = [i for i in range(1, EPOCH + 1)] y_loss = [i for i in range(1, EPOCH + 1)] y_accu = [i for i in range(1, EPOCH + 1)] for i in range(1, EPOCH + 1): x_train, y_train = getData.get_batch(train_set, BATCH_SIZE) x_train = np.reshape(x_train, (BATCH_SIZE, network.IMAGE_RAW, network.IMAGE_COL, network.CHANNEL)) _, loss_batch, accuracy_batch = sess.run([train, losses, accuracy], feed_dict={ x: x_train, y_: y_train }) y_loss[i - 1] = loss_batch y_accu[i - 1] = accuracy_batch if i > 100 and accu1 > 0.4 and accu2 > 0.4 and accuracy_batch > 0.4: saver.save(sess, 'ckpt_person/my_first.ckpt') accu1 = accu2 accu2 = accuracy_batch print("epoch:", i) print("loss_batch:", loss_batch) print("accuracy_batch:", accuracy_batch) print(".....................................") print(y_loss) print(y_accu) plt.figure() plt.plot(x_p[0:len(x_p):4], y_loss[0:len(y_loss):4]) plt.figure() plt.plot(x_p[0:len(x_p):4], y_accu[0:len(y_loss):4]) plt.show()
def simple_test(test_img): if(len(test_img) == 0): x_test = cv2.imread(args.TEST_IMG, cv2.IMREAD_GRAYSCALE) else: x_test = test_img img_h = x_test.shape[0] img_w = x_test.shape[1] if(img_h != 28 or img_w != 28): x_test = cv2.resize(x_test, (28, 28), interpolation = cv2.INTER_NEAREST) img_cahnnels = 1 x_test = x_test.reshape(1, img_h, img_w, img_cahnnels).astype(np.float32) x_test/=255. n_features = x_test.shape[-1] X = network.get_placeholder_X(img_h, img_w, n_features) _, predictions = network.forward(X, is_training = False) print("x_test.shape = ", x_test.shape) saver = tf.train.Saver() with tf.Session() as sess: # from train_dir search the latest model ckpt = tf.train.get_checkpoint_state(args.TRAIN_DIR) if ckpt and ckpt.model_checkpoint_path: # load trained model saver.restore(sess, ckpt.model_checkpoint_path) predict_value = sess.run(predictions, feed_dict = {X:x_test}) print("The predicted value of the input image is %d" %(np.argmax(predict_value))) else: print("No checkpoint file found")
n_labels = y_train.shape[-1] print("x_train.shape = ", x_train.shape) print("y_train.shape = ", y_train.shape) print("x_val.shape = ", x_val.shape) print("y_val.shape = ", y_val.shape) TRAIN_EPOCHS = args.TRAIN_EPOCHS BATCH_SIZES = args.BATCH_SIZES LEARNING_RATE = args.LEARNING_RATE TOTAL_BATCHES = int(n_examples / BATCH_SIZES + 0.5) DISPLAY_EPOCH = args.DISPLAY_EPOCH X = network.get_placeholder_X(img_h, img_w, n_features) Y = network.get_placeholder_Y(n_labels) outputs, predictions = network.forward(X, is_training=True) loss, train_op = network.train(LEARNING_RATE, outputs, Y) correct_prediction, acc = network.accuracy(predictions, Y) saver = network.save() logs_path = args.LOG_DIR tf.summary.scalar("loss", loss) tf.summary.scalar("accuracy", acc) merged_summary_op = tf.summary.merge_all() print("\nStart Training!!!\n") with tf.Session() as sess: sess.run( [tf.global_variables_initializer(), tf.local_variables_initializer()])
def test_forward(self): network = nw.init_network() x = np.array([1.0, 0.5]) y = nw.forward(network, x) self.assertEqual(y[0], 0.3168270764110298) self.assertEqual(y[1], 0.6962790898619668)
def backward(data): x = tf.placeholder(tf.float32, (BATCH_SIZE, network.IMAGE_SIZE, network.IMAGE_SIZE, network.IMAGE_CHANNEL)) y = network.forward(x, True, KEEP_PROB, REGULARIZER) y_ = tf.placeholder(tf.float32, (None, network.OUTPUT_NODE)) loss1 = tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_) loss2 = tf.reduce_mean(loss1) loss3 = loss2 + tf.add_n(tf.get_collection('losses')) accuracy = tf.reduce_mean( tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)), tf.float32)) opimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss3) saver = tf.train.Saver(max_to_keep=1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) accu1 = 0 accu2 = 0 # 记录要绘图的变量 x_p = [i for i in range(1, EPOCH + 1)] y_loss = [i for i in range(1, EPOCH + 1)] y_accu = [i for i in range(1, EPOCH + 1)] for i in range(1, EPOCH + 1): xf, yf = data.next_batch(BATCH_SIZE) xf = mnist.mnist_fft(xf, BATCH_SIZE) _, accu, los = sess.run([opimizer, accuracy, loss3], feed_dict={ x: xf, y_: yf }) y_loss[i - 1] = los y_accu[i - 1] = accu if accu1 > 0.75 and accu2 > 0.75 and accu > 0.75: saver.save(sess, 'mnist_Complex/mnist_Complex.ckpt', write_meta_graph=False) accu1 = accu2 accu2 = accu print('Epoch: ', i) print('loss on batch: ', los) print('accuracy on batch: ', accu) print('.......................................') print(y_loss) print(y_accu) plt.figure() plt.plot(x_p[0:len(x_p):4], y_loss[0:len(y_loss):4]) plt.figure() plt.plot(x_p[0:len(x_p):4], y_accu[0:len(y_loss):4]) plt.show()
for i in range(0, num): number = Y_[i] index = i * 10 + number Y[index] = 1 X = X_ / 255.0 X = X.reshape((num, 28 * 28)) Y = Y.reshape((num, 10)) X = X.T Y = Y.T #print(Image(X[1]).print()) print(X.shape, " X shape") print(Y.shape, " Y shape") sizes = n.layer_sizes(X, Y) print(sizes, " Layer sizes") parameters = n.initialize_parameters(*sizes) for i in range(0, 1000): A2, cache = n.forward(X, parameters) cost = n.compute_cost(A2, Y, parameters) grads = n.backward(parameters, cache, X, Y) parameters = n.update_parameters(parameters, grads, learning_rate) print(cost)