Esempio n. 1
0
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# Learning Rate Decay
learning_rate_decay = tf.placeholder(tf.float32)

# training, learning rate = 0.005
# train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)
# train_step = tf.train.AdamOptimizer(0.005).minimize(cross_entropy)
train_step = tf.train.AdamOptimizer(learning_rate_decay).minimize(
    cross_entropy)

# matplotlib visualisation
allweights = tf.reshape(W3, [-1])
allbiases = tf.reshape(b3, [-1])
I = tensorflowvisu.tf_format_mnist_images(
    X, Y, Y_)  # assembles 10x10 images by default
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000,
                                           lines=25)  # 1000 images on 25 lines
datavis = tensorflowvisu.MnistDataVis()

# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)


# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):

    # training on batches of 100 images with 100 labels
    batch_X, batch_Y = mnist.train.next_batch(100)
# to use for sigmoid
#allactivations = tf.concat(0, [tf.reshape(Y1, [-1]), tf.reshape(Y2, [-1]), tf.reshape(Y3, [-1]), tf.reshape(Y4, [-1])])
# to use for RELU
allactivations = tf.concat(0, [
    tf.reduce_max(Y1, [0]),
    tf.reduce_max(Y2, [0]),
    tf.reduce_max(Y3, [0]),
    tf.reduce_max(Y4, [0])
])
alllogits = tf.concat(0, [
    tf.reshape(Y1l, [-1]),
    tf.reshape(Y2l, [-1]),
    tf.reshape(Y3l, [-1]),
    tf.reshape(Y4l, [-1])
])
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis(title4="Logits",
                                      title5="Max activations across batch",
                                      histogram4colornum=2,
                                      histogram5colornum=2)

# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

# init
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100

# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# matplotlib visualisation
allweights = tf.concat([tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1])], 0)
allbiases  = tf.concat([tf.reshape(B1, [-1]), tf.reshape(B2, [-1]), tf.reshape(B3, [-1])], 0)
# to use for sigmoid
#allactivations = tf.concat([tf.reshape(Y1, [-1]), tf.reshape(Y2, [-1]), tf.reshape(Y3, [-1]), tf.reshape(Y4, [-1])], 0)
# to use for RELU
allactivations = tf.concat([tf.reduce_max(Y1, [0]), tf.reduce_max(Y2, [0]), tf.reduce_max(Y3, [0]), tf.reduce_max(Y4, [0])], 0)
alllogits = tf.concat([tf.reshape(Y1l, [-1]), tf.reshape(Y2l, [-1]), tf.reshape(Y3l, [-1]), tf.reshape(Y4l, [-1])], 0)
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis(title4="Logits", title5="Max activations across batch", histogram4colornum=2, histogram5colornum=2)


# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)


# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# reduce_mean will add all the components in the tensor
# so here we end up with the total cross-entropy for all images in the batch
cross_entropy = -tf.reduce_mean(Y_ * tf.log(Y)) * 1000.0  # normalized for batches of 100 images,
                                                          # *10 because  "mean" included an unwanted division by 10

# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# training, learning rate = 0.005
train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)

# matplotlib visualisation
allweights = tf.reshape(W, [-1])
allbiases = tf.reshape(b, [-1])
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)  # assembles 10x10 images by default
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)  # 1000 images on 25 lines
datavis = tensorflowvisu.MnistDataVis()

# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)


# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):

    # training on batches of 100 images with 100 labels
    batch_X, batch_Y = mnist.train.next_batch(100)
    tf.reshape(tensor=W1, shape=[-1]),
    tf.reshape(tensor=W2, shape=[-1]),
    tf.reshape(tensor=W3, shape=[-1]),
    tf.reshape(tensor=W4, shape=[-1]),
    tf.reshape(tensor=W5, shape=[-1])
],
                       axis=0)
allbiases = tf.concat(values=[
    tf.reshape(tensor=B1, shape=[-1]),
    tf.reshape(tensor=B2, shape=[-1]),
    tf.reshape(tensor=B3, shape=[-1]),
    tf.reshape(tensor=B4, shape=[-1]),
    tf.reshape(tensor=B5, shape=[-1])
],
                      axis=0)
I = tensorflowvisu.tf_format_mnist_images(X=X, Y=Y, Y_=Y_)
It = tensorflowvisu.tf_format_mnist_images(X=X, Y=Y, Y_=Y_, n=1000, lines=25)
datavis = tensorflowvisu.MnistDataVis()

# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(learning_rate=lr).minimize(
    loss=cross_entropy)

# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(fetches=init)


# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# ...............................initialization...............................

inputs, labels, step, keep = init.get_placeholders([None, 28, 28, 1], [None, 10], True, True)
lr = init.get_lr(0.003, True)

# ....................................model...................................

logits = model.five_layers_conv_model(inputs, keep)

# .................................train step.................................

cross_entropy, accuracy, train_step = opt.train_step(lr, logits, labels, 100, tf.train.AdamOptimizer)

# .................................visualization..............................

I = tensorflowvisu.tf_format_mnist_images(inputs, tf.nn.softmax(logits), labels)
It = tensorflowvisu.tf_format_mnist_images(inputs, tf.nn.softmax(logits), labels, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis()

# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

def training_step(i, update_test_data, update_train_data):
    batch_X, batch_Y = mnist.train.next_batch(100)

    if update_train_data:
        a, c, im, l = sess.run([accuracy, cross_entropy, I, lr],
                                     feed_dict={inputs: batch_X, labels: batch_Y, step: i, keep:1.0})
        print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(l) + ")")