def main(): with tf.Graph().as_default(): cnn = CNN(image_size=FLAGS.image_size, class_count=len(Channel)) images, labels = load_data( 'data/test/data.csv', batch_size=FLAGS.batch_size, image_size=FLAGS.image_size, class_count=len(Channel), shuffle=False) keep_prob = tf.placeholder(tf.float32) logits = cnn.inference(images, keep_prob) accuracy = cnn.accuracy(logits, labels) saver = tf.train.Saver() init_op = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init_op) saver.restore(sess, os.path.join(LOG_DIR, 'model.ckpt')) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) accuracy_value = sess.run(accuracy, feed_dict={keep_prob: 0.5}) print(f'test accuracy: {accuracy_value}') coord.request_stop() coord.join(threads)
train_labels) test_images, test_labels = fetch_images_and_labels(TEST_DIR) test_images, test_labels = shaffle_images_and_labels(test_images, test_labels) cnn = CNN(image_size=FLAGS.image_size, class_count=len(CLASSES)) with tf.Graph().as_default(): x = tf.placeholder(tf.float32, [None, PIXEL_COUNT]) labels = tf.placeholder(tf.float32, [None, len(CLASSES)]) keep_prob = tf.placeholder(tf.float32) y = cnn.inference(x, keep_prob) v = cnn.cross_entropy(y, labels) train_step = cnn.train_step(v, FLAGS.learning_rate) accuracy = cnn.accuracy(y, labels) saver = tf.train.Saver() init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(LOG_DIR, sess.graph) for i in range(FLAGS.step_count): for j in range(int(len(train_images) / FLAGS.batch_size)): batch = FLAGS.batch_size * j sess.run(train_step, feed_dict={