コード例 #1
0
def model(Input_node, Layer1_model, Output_node, Learning_rate_base,
          Learning_rate_decay, Batch_size):
    x, y = input_data(Input_node, Output_node)
    parameter = parameter_initialize(Input_node, Layer1_node, Output_node)
    y_pred = inference(x, parameter)
    cost = loss(y_pred, y, parameter)
    train_step = train(Learning_rate_base, Learning_rate_decay, Batch_size,
                       cost)
    accuracy = evaluate(y, y_pred)
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        # 验证集feed
        validate_feed = {
            x: mnist.validation.images,
            y: mnist.validation.labels
        }
        # 测试集feed
        test_feed = {x: mnist.test.images, y: mnist.test.labels}

        for i in range(training_steps):
            if i % 1000 == 0:
                validate_accuracy = sess.run(accuracy, feed_dict=validate_feed)
                print("After %d training steps ,validation accuracy is %g" %
                      (i, validate_accuracy))

            # 每一次取batch_size大小的数据
            x_batch, y_batch = mnist.train.next_batch(Batch_size)
            sess.run(train_step, feed_dict={x: x_batch, y: y_batch})

        test_accuracy = sess.run(accuracy, feed_dict=test_feed)
        print("After %d training steps ,test accuracy is %g" %
              (training_steps, test_accuracy))
コード例 #2
0
ファイル: haixue.py プロジェクト: zsjohny/tensorflow
def main(_):
    # Import data
    #mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    mnist = input_data()

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])

    # Build the graph for the deep net
    y_conv, keep_prob = deepnn(x)

    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                                logits=y_conv)
    cross_entropy = tf.reduce_mean(cross_entropy)

    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)

    graph_location = tempfile.mkdtemp()
    print('Saving graph to: %s' % graph_location)
    train_writer = tf.summary.FileWriter(graph_location)
    train_writer.add_graph(tf.get_default_graph())

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(20000):
            batch = mnist.train.next_batch(50)
            if i % 100 == 0:
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    keep_prob: 1.0
                })
                print('step %d, training accuracy %g' % (i, train_accuracy))
            train_step.run(feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob: 0.5
            })

        print('test accuracy %g' % accuracy.eval(feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels,
            keep_prob: 1.0
        }))
コード例 #3
0
from tflearn.layers.estimator import regression

image_rows = 28
image_cols = 28

# reshape the training and test images to 28 X 28 X 1
train_images = mnist.train.images.reshape(mnist.train.images.shape[0],
                                          image_rows, image_cols, 1)
test_images = mnist.test.images.reshape(mnist.test.images.shape[0], image_rows,
                                        image_cols, 1)

num_classes = 10
keep_prob = 0.5  # fraction to keep (0-1.0)

# Define the shape of the data coming into the NN
input = input_data(shape=[None, 28, 28, 1], name='input')

# Do convolution on images, add bias and push through RELU activation
network = conv_2d(input,
                  nb_filter=32,
                  filter_size=3,
                  activation='relu',
                  regularizer="L2")
#   Notice name was not specified.  The name is defaulted to "Conv2D", and will be postfixed with "_n"
#   where n is the number of the occurance.  Nice!
# take results and run through max_pool
network = max_pool_2d(network, 2)

# 2nd Convolution layer
# Do convolution on images, add bias and push through RELU activation
network = conv_2d(network,
コード例 #4
0
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression


image_rows = 28
image_cols = 28

# reshape the training and test images to 28 x 28 x 1
train_images = mnist.train.images.reshape(mnist.train.images.shape[0], image_rows, image_cols, 1)
test_images = mnist.test.images.reshape(mnist.test.images.shape[0], image_rows, image_cols, 1)

num_classes = 10
keep_prob = .5

# 1st Conv Layer
input = input_data(shape = [None, 28, 28, 1], name = "input") # defines shape of data coming into NN
network = conv_2d(input, nb_filter = 32, filter_size = 3, activation = 'relu', regularizer = "L2")
network = max_pool_2d(network, 2)

# 2nd Conv Layer
network = conv_2d(network, nb_filter = 64, filter_size = 3, activation = "relu", regularizer = "L2")
network = max_pool_2d(network, 2)

# FC Layer
network = fully_connected(network, 128, activation = "tanh")

# Dropout
network = dropout(network, keep_prob)

# Readout layer
network = fully_connected(network, 10, activation = "softmax")
コード例 #5
0
# victor
# 23/12/2017
# Created @ 2017-12-23 18:36

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np  # linear algebra
import pandas as pd  # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from tensorflow.python.framework import ops

ops.reset_default_graph()

sess = tf.Session()

mnist = input_data("/temp/MNIST/", one_hot=True)

print(mnist.shape)
コード例 #6
0
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data("MNIST_data/", one_hot=True)
# Parameters
learning_rate = 0.01  # 0.01 this learning rate will be better! Tested
training_epochs = 10  # 10 Epoch 训练
batch_size = 256
display_step = 1
# hidden layer settings
n_input = 784
n_hidden_1 = 128
n_hidden_2 = 64
n_hidden_3 = 10
n_hidden_4 = 2

weights = {
    'encoder_h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1], )),
    'encoder_h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], )),
    'encoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3], )),
    'encoder_h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4], )),
    'decoder_h1': tf.Variable(tf.truncated_normal([n_hidden_4, n_hidden_3], )),
    'decoder_h2': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_2], )),
    'decoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1], )),
    'decoder_h4': tf.Variable(tf.truncated_normal([n_hidden_1, n_input], )),
}
biases = {
    'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])),
    'encoder_b4': tf.Variable(tf.random_normal([n_hidden_4])),
コード例 #7
0
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
#correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
#accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    step = 1
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_x, batch_y = input_data(step)
        # Reshape data to get 28 seq of 28 elements
        # batch_x = batch_x.reshape((batch_size, n_steps, n_input))
        # Run optimization op (backprop)
        if training:
            sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
            # saver.save(sess,"./model/model.ckpt")
            if step % display_step == 0:
                # Calculate batch accuracy
                # acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
                # Calculate batch loss
                loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
                print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                      "{:.6f}".format(loss))
                #print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                #      "{:.6f}".format(loss) + ", Training Accuracy= " + \