Beispiel #1
0
def add_data(mnist_data, data_list):
    """
    Args:
    mnist_data: original mnist_data
    dat_list: numpy array list

    Return:
    numy array
    """

    data_num = len(data_list)

    train_images = mnist_data.train.images
    train_labels = mnist_data.train.labels
    test_images = mnist_data.test.images
    test_labels = mnist_data.test.labels

    for data in data_list:
        add_num = len(data)
        train_images = fix_images(train_images, data)
        train_labels = fix_labels(train_labels, add_num)
        test_images = fix_images(test_images, data)
        test_labels = fix_labels(test_labels, add_num)

    VALIDATION_SIZE = 5000

    validation_images = train_images[:VALIDATION_SIZE]
    validation_labels = train_labels[:VALIDATION_SIZE]
    train_images = train_images[VALIDATION_SIZE:]
    train_labels = train_labels[VALIDATION_SIZE:]

    mnist_data.train = input_data.DataSet(train_images, train_labels, add=True)
    mnist_data.validation = input_data.DataSet(validation_images,
                                               validation_labels,
                                               add=True)
    mnist_data.test = input_data.DataSet(test_images, test_labels, add=True)

    return mnist_data
def augment_data(data, folderpath=''):

    images = data.train.images
    labels = data.train.labels

    images = images.reshape((-1, 28, 28, 1))
    imgs = [x for i in range(10) for x in read_results(i, folderpath)[-1][0]]
    imgs = np.array(imgs)[:, :, :, None]

    lbls = np.empty((imgs.shape[0], 10))
    lbls.fill(1/10)

    images = np.concatenate((imgs, images))
    labels = np.concatenate((lbls, labels))

    images, labels = _shuffle_images_labels(images, labels)

    new_train = input_data.DataSet(images, labels)

    data.train = new_train
def augment_random_data(data):
    images = data.train.images
    labels = data.train.labels

    uniform_prob = np.empty((800, 10))
    uniform_prob.fill(1/10)

    images = np.concatenate((np.zeros((800, 784)), images))
    labels = np.concatenate((uniform_prob, labels))

    images = np.concatenate((np.random.rand(800, 784), images))
    labels = np.concatenate((uniform_prob, labels))

    images *= 255
    images = images.reshape((-1, 28, 28, 1))

    images, labels = _shuffle_images_labels(images, labels)

    new_train = input_data.DataSet(images, labels)

    data.train = new_train
Beispiel #4
0
correct = 0
wrong = 0

for i in range(cnt):
    files[i] = dir_name + "/" + files[i]
    print('files:', files[i])
    test_images1, test_labels1 = GetImage([files[i]])

    input_index = int(files[i].strip().split('/')[1][0])
    print("input_index:", input_index)

    # print (tf.cast(correct_prediction, tf.float32).eval)
    # print(shape(test_images1))
    mnist.test = input_data.DataSet(test_images1,
                                    test_labels1,
                                    dtype=tf.float32)
    res = accuracy.eval({x: mnist.test.images, y_: mnist.test.labels})

    # print('++++++++++++++++++++++++++++++++++++++++++')
    # print(shape(mnist.test.images))
    # print('------------------------------------------')
    # print (tf.argmax(y, 1))
    # print(y.eval())
    output_res = int(res[0])
    print("output:", output_res)
    print("\n")

    # if(output_res==input_index):
    #   correct = correct + 1
    #   print("correct!\n")
#validation step
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))# used to get accuracy
accuracy = tf.cast(tf.argmax(y_conv, 1),
                   tf.float32)  # used to get calculated label

labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

dir_name = "data/test/"
files = os.listdir(dir_name)
cnt = len(files)
#print(files)

test_images, test_labels = GetImage(dir_name, files)

test_set = input_data.DataSet(test_images, test_labels)

saver = tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess, 'models/mnist_10.model')
    output = sess.run(accuracy,
                      feed_dict={
                          x: test_set.images,
                          y: test_set.labels,
                          keep_prob: 1.0
                      })
    for a in range(len(output)):
        print 'input_' + str(a) + ':', labels[argmax(test_labels[a])]
        print 'output_' + str(a) + ':', labels[int(output[a])]
    #print("output:",labels[res.argmax()])
'''
Beispiel #6
0
def run_training():
  train_data = input_data.DataSet('train.txt')
  test_data = input_data.DataSet('test.txt')

  # Tell TensorFlow that the model will be built into the default Graph.
  with tf.Graph().as_default():
    # Generate placeholders for the images and truth.
    images_placeholder, truth_placeholder = placeholder_inputs(
        FLAGS.batch_size)

    # Build a Graph that computes result from the inference model.
    result = face_net.inference(images_placeholder)

    # Add to the Graph the Ops for loss calculation.
    loss = face_net.loss(result, truth_placeholder)

    # Add to the Graph the Ops that calculate and apply gradients.
    train_op = face_net.training(loss, FLAGS.learning_rate)

    # Build the summary Tensor based on the TF collection of Summaries.
    summary = tf.summary.merge_all()

    # Add the variable initializer Op.
    init = tf.global_variables_initializer()

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver()

    # Create a session for running Ops on the Graph.
    sess = tf.Session()

    # Instantiate a SummaryWriter to output summaries and the Graph.
    summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

    # And then after everything is built:

    # Run the Op to initialize the variables.
    sess.run(init)

    total_time = 0
    # Start the training loop.
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()

      # Fill a feed dictionary with the actual set of images and truth
      # for this particular training step.
      feed_dict = fill_feed_dict(train_data,
                                 images_placeholder,
                                 truth_placeholder)

      # Run one step of the model.  The return values are the activations
      # from the `train_op` (which is discarded) and the `loss` Op.  To
      # inspect the values of your Ops or variables, you may include them
      # in the list passed to sess.run() and the value tensors will be
      # returned in the tuple from the call.
      _, loss_value = sess.run([train_op, loss],
                               feed_dict=feed_dict)

      duration = time.time() - start_time
      total_time = total_time + duration
      # Write the summaries and print an overview fairly often.
      if (step+1) % 100 == 0:
        # Print status to stdout.
        print('Step %d: loss = %.2f (%.3f sec)' % (step+1, loss_value, total_time))
        # Update the events file.
        summary_str = sess.run(summary, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)
        summary_writer.flush()

      # Save a checkpoint and evaluate the model periodically.
      if (step + 1) % 5000 == 0 or (step + 1) == FLAGS.max_steps:
        checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
        saver.save(sess, checkpoint_file, global_step=step)
        # Evaluate against the training set.
        print('Training Data Eval:')
        do_eval(sess,
                loss,
                images_placeholder,
                truth_placeholder,
                train_data)
        # Evaluate against the test set.
        print('Test Data Eval:')
        do_eval(sess,
                loss,
                images_placeholder,
                truth_placeholder,
                test_data)

    summary_writer.flush()

    print('Total time: %.3f sec' % (total_time))
Beispiel #7
0
        final_train_labels = train_labels
else:
    final_train_images = train_images
    final_train_labels = train_labels

print('total number of images used for training : ', len(final_train_images))
# set aside a few images for validation and tuning
validation_images = final_train_images[:VALIDATION_SIZE]
validation_labels = final_train_labels[:VALIDATION_SIZE]

# train images and labels
train_images = final_train_images[VALIDATION_SIZE:]
train_labels = final_train_labels[VALIDATION_SIZE:]

# update data_sets class
data_sets.train = input_data.DataSet(train_images, train_labels)
data_sets.validation = input_data.DataSet(validation_images, validation_labels)
data_sets.test = input_data.DataSet(test_images, test_labels)

data = data_sets

# model
with tf.variable_scope("convolutional"):
    x = tf.placeholder(tf.float32, [None, 784])
    keep_prob = tf.placeholder(tf.float32)
    y, variables = model.convolutional(x, keep_prob)

# train
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
Beispiel #8
0
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy = tf.cast(tf.argmax(y_conv, 1), tf.float32)
sess = tf.InteractiveSession()

saver = tf.train.Saver()
saver_path = saver.restore(
    sess, "/home/tuopan/PycharmProjects/test/model/model.ckpt")
print(sess.run(b_conv1))
#batch = mnist.train.next_batch(10)
print('start batching')
#print("test accuracy :" ,sess.run(accuracy,feed_dict={
#   x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
dir_name = "test_num"
files = os.listdir(dir_name)
cnt = len(files)
for i in range(cnt):
    files[i] = dir_name + "/" + files[i]
    test_images1, test_labels1 = GetImage([files[i]])
    mnist.test = input_data.DataSet(test_images1, test_labels1)
    res = sess.run(accuracy,
                   feed_dict={
                       x: mnist.test.images,
                       y_: mnist.test.labels,
                       keep_prob: 1.0
                   })
    print("output:", int(res[0]))
    print("\n")
print('end batching')