Esempio n. 1
0
def test(test_file):
    log_dir = 'saves'
    image_arr = get_one_image(test_file)

    with tf.Graph().as_default():
        image = tf.cast(image_arr, tf.float32)
        image = tf.image.per_image_standardization(image)
        image = tf.reshape(image, [1, 32, 32, 3])
        print(image.shape)
        p = model.mmodel(image, 1)
        logits = tf.nn.softmax(p)
        x = tf.placeholder(tf.float32, shape=[32, 32, 3])
        saver = tf.train.Saver()
        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success')
                prediction = sess.run(logits, feed_dict={x: image_arr})
                max_index = np.argmax(prediction)
                print(max_index)
                return max_index
            else:
                print('No checkpoint')
Esempio n. 2
0
def run_training():
    data_dir = 'D:/WCsPy/data/train/'
    log_dir = 'saves'
    image, label = inputData.get_files(data_dir)
    image_batches, label_batches = inputData.get_batches(
        image, label, 32, 32, 16, 20)
    print(image_batches.shape)
    p = model.mmodel(image_batches, 16)
    cost = model.loss(p, label_batches)
    train_op = model.training(cost, 0.001)
    acc = model.get_accuracy(p, label_batches)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(1000):
            print(step)
            if coord.should_stop():
                break
            _, train_acc, train_loss = sess.run([train_op, acc, cost])
            print("loss:{} accuracy:{}".format(train_loss, train_acc))
            if step % 100 == 0:
                check = os.path.join(log_dir, "model.ckpt")
                saver.save(sess, check, global_step=step)
    except tf.errors.OutOfRangeError:
        print("Done!!!")
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Esempio n. 3
0
def run_training(file_tfRecord):
    log_dir = './log/'
    image, label = rdData.read_tfRecord(file_tfRecord)
    image_batches, label_batches = tf.train.batch([image, label],
                                                  batch_size=16,
                                                  capacity=20)
    p = model.mmodel(image_batches, 16)
    cost = model.loss(p, label_batches)
    train_op = model.training(cost, 0.001)
    acc = model.get_accuracy(p, label_batches)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    #merge all summary
    summary_op = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(log_dir, sess.graph)

    sess.run(init)
    saver = tf.train.Saver()

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(1000):
            if coord.should_stop():
                break
            _, train_acc, train_loss = sess.run([train_op, acc, cost])
            print("{} step:{} loss:{} accuracy:{}".format(
                datetime.now(), step, train_loss, train_acc))
            if step % 250 == 0:
                #record the summary
                summary = sess.run(summary_op)
                train_writer.add_summary(summary, step)

                check = os.path.join(log_dir, "mmodel.ckpt")
                saver.save(sess, check, global_step=step)
    except tf.errors.OutOfRangeError:
        print("Done!!!")
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Esempio n. 4
0
# Adapted from : https://www.tensorflow.org/get_started/mnist/pros

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import model as model
import os

from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("/tmp/data/", one_hot=True)

# Load Model
with tf.variable_scope("deep-learning"):
    x = tf.placeholder(tf.float32, [None, 784])
    keep_prob = tf.placeholder(tf.float32)
    y, variables = model.mmodel(x, keep_prob)

# Training Model
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

#
save = tf.train.Saver(variables)

# Train the model
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(20000):
        batch = data.train.next_batch(50)