def main(_): # load data meta, train_data, test_data = input_data.load_data(FLAGS.data_dir, flatten=True) print('data loaded') print('train images: %s. test images: %s' % (train_data.images.shape[0], test_data.images.shape[0])) LABEL_SIZE = meta['label_size'] * meta["num_per_image"] IMAGE_SIZE = meta['width'] * meta['height'] print('label_size: %s, image_size: %s' % (LABEL_SIZE, IMAGE_SIZE)) # variable in the graph for input data x = tf.placeholder(tf.float32, [None, IMAGE_SIZE]) y_ = tf.placeholder(tf.float32, [None, LABEL_SIZE]) # define the model W = tf.Variable(tf.zeros([IMAGE_SIZE, LABEL_SIZE])) b = tf.Variable(tf.zeros([LABEL_SIZE])) y = tf.matmul(x, W) + b # Define loss and optimizer diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y) cross_entropy = tf.reduce_mean(diff) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # forword prop predict = tf.argmax(y, axis=1) expect = tf.argmax(y_, axis=1) # evaluate accuracy correct_prediction = tf.equal(predict, expect) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) with tf.Session() as sess: tf.global_variables_initializer().run() # Train for i in range(MAX_STEPS): batch_xs, batch_ys = train_data.next_batch(BATCH_SIZE) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) if i % 100 == 0: # Test trained model r = sess.run(accuracy, feed_dict={ x: test_data.images, y_: test_data.labels }) print('step = %s, accuracy = %.2f%%' % (i, r * 100)) # final check after looping r_test = sess.run(accuracy, feed_dict={ x: test_data.images, y_: test_data.labels }) print('testing accuracy = %.2f%%' % (r_test * 100, )) saver = tf.train.Saver() save_path = saver.save(sess, "./model.ckpt")
def main(_): # load data meta, train_data, test_data = input_data.load_data(FLAGS.data_dir, flatten=False) print('data loaded') print('train images: %s. test images: %s' % (train_data.images.shape[0], test_data.images.shape[0])) LABEL_SIZE = meta['label_size'] IMAGE_HEIGHT = meta['height'] IMAGE_WIDTH = meta['width'] IMAGE_SIZE = IMAGE_WIDTH * IMAGE_HEIGHT print('label_size: %s, image_size: %s' % (LABEL_SIZE, IMAGE_SIZE)) # variable in the graph for input data with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT, IMAGE_WIDTH]) y_ = tf.placeholder(tf.float32, [None, LABEL_SIZE]) # must be 4-D with shape `[batch_size, height, width, channels]` x_image = tf.reshape(x, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1]) tf.summary.image('input', x_image, max_outputs=LABEL_SIZE) # define the model with tf.name_scope('convolution-layer-1'): W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) with tf.name_scope('convolution-layer-2'): W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) with tf.name_scope('densely-connected'): W_fc1 = weight_variable([IMAGE_WIDTH * IMAGE_HEIGHT * 4, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, IMAGE_WIDTH*IMAGE_HEIGHT*4]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) with tf.name_scope('dropout'): # To reduce overfitting, we will apply dropout before the readout layer keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) with tf.name_scope('readout'): W_fc2 = weight_variable([1024, LABEL_SIZE]) b_fc2 = bias_variable([LABEL_SIZE]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # Define loss and optimizer # Returns: # A 1-D `Tensor` of length `batch_size` # of the same type as `logits` with the softmax cross entropy loss. with tf.name_scope('loss'): cross_entropy = tf.reduce_mean( # -tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1])) tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) variable_summaries(cross_entropy) # forword prop with tf.name_scope('forword-prop'): predict = tf.argmax(y_conv, axis=1) expect = tf.argmax(y_, axis=1) # evaluate accuracy with tf.name_scope('evaluate_accuracy'): correct_prediction = tf.equal(predict, expect) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_summaries(accuracy) with tf.Session(config=config) as sess: merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(LOG_DIR + '/train', sess.graph) test_writer = tf.summary.FileWriter(LOG_DIR + '/test', sess.graph) tf.global_variables_initializer().run() # Train for i in range(MAX_STEPS): batch_xs, batch_ys = train_data.next_batch(BATCH_SIZE) step_summary, _ = sess.run([merged, train_step], feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 1.0}) train_writer.add_summary(step_summary, i) if i % 100 == 0: # Test trained model valid_summary, train_accuracy = sess.run([merged, accuracy], feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 1.0}) train_writer.add_summary(valid_summary, i) # final check after looping test_x, test_y = test_data.next_batch(2000) test_summary, test_accuracy = sess.run([merged, accuracy], feed_dict={x: test_x, y_: test_y, keep_prob: 1.0}) test_writer.add_summary(test_summary, i) print('step %s, training accuracy = %.2f%%, testing accuracy = %.2f%%' % (i, train_accuracy * 100, test_accuracy * 100)) saver = tf.train.Saver() saver.save(sess, "model/1char/model.ckpt") train_writer.close() test_writer.close() # final check after looping test_x, test_y = test_data.next_batch(2000) test_accuracy = accuracy.eval(feed_dict={x: test_x, y_: test_y, keep_prob: 1.0}) print('testing accuracy = %.2f%%' % (test_accuracy * 100, ))
def main(_): # load data print('开始') meta, train_data, test_data = input_data.load_data(c_dir + '/' + FLAGS.data_dir, flatten=False) print('data loaded') print('train images: %s. test images: %s' % (train_data.images.shape[0], test_data.images.shape[0])) LABEL_SIZE = meta['label_size'] NUM_PER_IMAGE = meta['num_per_image'] IMAGE_HEIGHT = meta['height'] IMAGE_WIDTH = meta['width'] IMAGE_SIZE = IMAGE_WIDTH * IMAGE_HEIGHT print('label_size: %s, image_size: %s' % (LABEL_SIZE, IMAGE_SIZE)) # variable in the graph for input data with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT, IMAGE_WIDTH]) y_ = tf.placeholder(tf.float32, [None, NUM_PER_IMAGE * LABEL_SIZE]) # must be 4-D with shape `[batch_size, height, width, channels]` x_image = tf.reshape(x, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1]) tf.summary.image('input', x_image, max_outputs=LABEL_SIZE) # define the model with tf.name_scope('convolution-layer-1'): W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) with tf.name_scope('convolution-layer-2'): W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) with tf.name_scope('densely-connected'): W_fc1 = weight_variable([IMAGE_WIDTH * IMAGE_HEIGHT * 4, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, IMAGE_WIDTH * IMAGE_HEIGHT * 4]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) with tf.name_scope('dropout'): # To reduce overfitting, we will apply dropout before the readout layer keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) with tf.name_scope('readout'): W_fc2 = weight_variable([1024, LABEL_SIZE]) b_fc2 = bias_variable([LABEL_SIZE]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # Define loss and optimizer # Returns: # A 1-D `Tensor` of length `batch_size` # of the same type as `logits` with the softmax cross entropy loss. # forword prop with tf.name_scope('forword-prop'): predict = tf.argmax(y_conv, axis=1) expect = tf.argmax(y_, axis=1) # evaluate accuracy with tf.name_scope('evaluate_accuracy'): correct_prediction = tf.equal(predict, expect) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_summaries(accuracy) fixed_adv_sample_get_op = stepll_adversarial_images(x, y_conv) with tf.Session() as sess: restore(sess) # 初始化 tf.global_variables_initializer().run() # Test test_x, test_y = train_data.next_batch(1) _predict = predict.eval(feed_dict={ x: test_x, y_: test_y, keep_prob: 1.0 }) _expect = expect.eval(feed_dict={ x: test_x, y_: test_y, keep_prob: 1.0 }) _adv = fixed_adv_sample_get_op.eval(feed_dict={ x: test_x, y_: test_y, keep_prob: 1.0 }) print(_adv.shape) _adv_predict = predict.eval(feed_dict={ x: _adv, y_: test_y, keep_prob: 1.0 }) plt.subplot(1, 2, 1) plt.imshow(test_x[0]) plt.subplot(1, 2, 2) plt.imshow(_adv[0]) plt.show() print( _predict, _expect, _adv_predict, )
def main(_): # load data meta, train_data, test_data = input_data.load_data(FLAGS.data_dir, flatten=False) print('data loaded') print('train images: %s. test images: %s' % (train_data.images.shape[0], test_data.images.shape[0])) LABEL_SIZE = meta['label_size'] IMAGE_HEIGHT = meta['height'] IMAGE_WIDTH = meta['width'] # LABEL_SIZE = 62 # IMAGE_HEIGHT = 40 # IMAGE_WIDTH = 40 IMAGE_SIZE = IMAGE_WIDTH * IMAGE_HEIGHT print('label_size: %s, image_size: %s' % (LABEL_SIZE, IMAGE_SIZE)) # variable in the graph for input data with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT, IMAGE_WIDTH]) y_ = tf.placeholder(tf.float32, [None, LABEL_SIZE]) # must be 4-D with shape `[TRAIN_BATCH_SIZE, height, width, channels]` x_image = tf.reshape(x, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1]) tf.summary.image('input', x_image, max_outputs=LABEL_SIZE) # define the model with tf.name_scope('convolution-layer-1'): W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) with tf.name_scope('convolution-layer-2'): W_conv2 = weight_variable([3, 3, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) with tf.name_scope('convolution-layer-3'): W_conv3 = weight_variable([3, 3, 64, 128]) b_conv3 = bias_variable([128]) h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3) h_pool3 = max_pool_2x2(h_conv3) with tf.name_scope('convolution-layer-4'): W_conv4 = weight_variable([3, 3, 128, 256]) b_conv4 = bias_variable([256]) h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4) # h_pool4 = max_pool_2x2(h_conv4) h_pool4 = tf.nn.max_pool(h_conv4, ksize=[1, 5, 5, 1], strides=[1, 1, 1, 1], padding='VALID') with tf.name_scope('readout'): W_fc2 = weight_variable([256, LABEL_SIZE]) b_fc2 = bias_variable([LABEL_SIZE]) # pre_fc = tf.reshape(h_pool5, [-1, 512]) # h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) pre_fc = tf.reshape(h_pool4, [-1, 256]) y_conv = tf.matmul(pre_fc, W_fc2) + b_fc2 # with tf.name_scope('convolution-layer-5'): # W_conv5 = weight_variable([3, 3, 256, 512]) # b_conv5 = bias_variable([512]) # h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5) # h_pool5 = max_pool_2x2(h_conv5) # h_pool5 = tf.nn.max_pool(h_conv5, ksize=[1, 2, 2, 1], # strides=[1, 2, 2, 1], padding='SAME') # with tf.name_scope('densely-connected'): # W_fc1 = weight_variable([IMAGE_WIDTH * IMAGE_HEIGHT * 4, 1024]) # b_fc1 = bias_variable([1024]) # h_pool2_flat = tf.reshape( # h_pool2, [-1, IMAGE_WIDTH * IMAGE_HEIGHT * 4]) # h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # with tf.name_scope('dropout'): # # To reduce overfitting, we will apply dropout before the readout layer # keep_prob = tf.placeholder(tf.float32) # h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # with tf.name_scope('readout'): # W_fc2 = weight_variable([512, LABEL_SIZE]) # b_fc2 = bias_variable([LABEL_SIZE]) # pre_fc = tf.reshape(h_pool5, [-1, 512]) # y_conv = tf.matmul(pre_fc, W_fc2) + b_fc2 # Define loss and optimizer # Returns: # A 1-D `Tensor` of length `TRAIN_BATCH_SIZE` # of the same type as `logits` with the softmax cross entropy loss. with tf.name_scope('loss'): cross_entropy = tf.reduce_mean( # -tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1])) tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) variable_summaries(cross_entropy) # forword prop with tf.name_scope('forword-prop'): predict = tf.argmax(y_conv, axis=1) expect = tf.argmax(y_, axis=1) # evaluate accuracy with tf.name_scope('evaluate_accuracy'): correct_prediction = tf.equal(predict, expect) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_summaries(accuracy) with tf.Session() as sess: merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(LOG_DIR + '/train', sess.graph) test_writer = tf.summary.FileWriter(LOG_DIR + '/test', sess.graph) tf.global_variables_initializer().run() # Train for i in range(MAX_STEPS): batch_xs, batch_ys = train_data.next_batch(TRAIN_BATCH_SIZE) step_summary, _ = sess.run([merged, train_step], feed_dict={ x: batch_xs, y_: batch_ys }) train_writer.add_summary(step_summary, i) if i % 100 == 0: # Test trained model valid_summary, train_accuracy = sess.run([merged, accuracy], feed_dict={ x: batch_xs, y_: batch_ys }) train_writer.add_summary(valid_summary, i) # final check after looping sum_test_acc = 0 for j in range(TEST_STEPS): test_x, test_y = test_data.next_batch(TEST_BATCH_SIZE) test_summary, test_accuracy = sess.run([merged, accuracy], feed_dict={ x: test_x, y_: test_y }) test_writer.add_summary(test_summary, i) sum_test_acc += test_accuracy sum_test_acc /= TEST_STEPS print( 'step %s, training accuracy = %.2f%%, testing accuracy = %.2f%%' % (i, train_accuracy * 100, sum_test_acc * 100)) train_writer.close() test_writer.close() # saver = tf.train.Saver() # # saver.save(sess, osp.join(LOG_DIR, './models-5-layers-zyf')) # saver.save(sess, osp.join(LOG_DIR, './models-4-layers-zyf')) # final check after looping sum_test_acc = 0 for j in range(TEST_STEPS): test_x, test_y = test_data.next_batch(TEST_BATCH_SIZE) test_accuracy = accuracy.eval(feed_dict={x: test_x, y_: test_y}) sum_test_acc += test_accuracy sum_test_acc /= TEST_STEPS print('testing accuracy = %.2f%%' % (sum_test_acc * 100, )) if i >= MAX_STEPS * 0.9 or sum_test_acc >= 0.95: saver = tf.train.Saver() # saver.save(sess, osp.join(LOG_DIR, './models-5-layers-zyf')) saver.save(sess, osp.join(LOG_DIR, './models-7-layers-zyf'), global_step=i)
def main(_): # load data meta, train_data, test_data = input_data.load_data(FLAGS.data_dir, flatten=True) print('data loaded. train images: %s. test images: %s' % (train_data.images.shape[0], test_data.images.shape[0])) LABEL_SIZE = meta['label_size'] IMAGE_WIDTH = meta['width'] IMAGE_HEIGHT = meta['height'] IMAGE_SIZE = IMAGE_WIDTH * IMAGE_HEIGHT print('label_size: %s, image_size: %s' % (LABEL_SIZE, IMAGE_SIZE)) # variable in the graph for input data with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, IMAGE_SIZE]) y_ = tf.placeholder(tf.float32, [None, LABEL_SIZE]) variable_summaries(x) variable_summaries(y_) # must be 4-D with shape `[batch_size, height, width, channels]` images_shaped_input = tf.reshape(x, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1]) tf.summary.image('input', images_shaped_input, max_outputs=LABEL_SIZE * 2) # define the model # Adding a name scope ensures logical grouping of the layers in the graph. with tf.name_scope('linear_model'): with tf.name_scope('W'): W = tf.Variable(tf.zeros([IMAGE_SIZE, LABEL_SIZE])) variable_summaries(W) with tf.name_scope('b'): b = tf.Variable(tf.zeros([LABEL_SIZE])) variable_summaries(b) with tf.name_scope('y'): y = tf.matmul(x, W) + b tf.summary.histogram('y', y) # Define loss and optimizer # Returns: # A 1-D `Tensor` of length `batch_size` # of the same type as `logits` with the softmax cross entropy loss. with tf.name_scope('loss'): diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y) cross_entropy = tf.reduce_mean(diff) train_step = tf.train.GradientDescentOptimizer(0.5).minimize( cross_entropy) variable_summaries(diff) # forword prop predict = tf.argmax(y, axis=1) expect = tf.argmax(y_, axis=1) # evaluate accuracy with tf.name_scope('evaluate_accuracy'): correct_prediction = tf.equal(predict, expect) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_summaries(accuracy) with tf.Session() as sess: merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(LOG_DIR + '/train', sess.graph) tf.global_variables_initializer().run() # Train for i in range(MAX_STEPS): batch_xs, batch_ys = train_data.next_batch(BATCH_SIZE) train_summary, _ = sess.run([merged, train_step], feed_dict={ x: batch_xs, y_: batch_ys }) train_writer.add_summary(train_summary, i) if i % 100 == 0: # Test trained model test_summary, r = sess.run([merged, accuracy], feed_dict={ x: test_data.images, y_: test_data.labels }) train_writer.add_summary(test_summary, i) print('step = %s, accuracy = %.2f%%' % (i, r * 100)) train_writer.close() # final check after looping test_summary, r_test = sess.run([merged, accuracy], feed_dict={ x: test_data.images, y_: test_data.labels }) train_writer.add_summary(test_summary, i) print('testing accuracy = %.2f%%' % (r_test * 100, ))