Exemple #1
0
learning_rate_decay = tf.placeholder(tf.float32)

# training, learning rate = 0.005
# train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)
# train_step = tf.train.AdamOptimizer(0.005).minimize(cross_entropy)
train_step = tf.train.AdamOptimizer(learning_rate_decay).minimize(
    cross_entropy)

# matplotlib visualisation
allweights = tf.reshape(W3, [-1])
allbiases = tf.reshape(b3, [-1])
I = tensorflowvisu.tf_format_mnist_images(
    X, Y, Y_)  # assembles 10x10 images by default
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000,
                                           lines=25)  # 1000 images on 25 lines
datavis = tensorflowvisu.MnistDataVis()

# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)


# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):

    # training on batches of 100 images with 100 labels
    batch_X, batch_Y = mnist.train.next_batch(100)

    # compute training values for visualisation
    if update_train_data:
allactivations = tf.concat(0, [
    tf.reduce_max(Y1, [0]),
    tf.reduce_max(Y2, [0]),
    tf.reduce_max(Y3, [0]),
    tf.reduce_max(Y4, [0])
])
alllogits = tf.concat(0, [
    tf.reshape(Y1l, [-1]),
    tf.reshape(Y2l, [-1]),
    tf.reshape(Y3l, [-1]),
    tf.reshape(Y4l, [-1])
])
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis(title4="Logits",
                                      title5="Max activations across batch",
                                      histogram4colornum=2,
                                      histogram5colornum=2)

# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

# init
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)


# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):

    # training on batches of 100 images with 100 labels
    tf.reshape(B1, [-1]),
    tf.reshape(B2, [-1]),
    tf.reshape(B3, [-1]),
    tf.reshape(B4, [-1]),
    tf.reshape(B5, [-1])
], 0)
conv_activations = tf.concat([
    tf.reshape(tf.reduce_max(Y1r, [0]), [-1]),
    tf.reshape(tf.reduce_max(Y2r, [0]), [-1]),
    tf.reshape(tf.reduce_max(Y3r, [0]), [-1])
], 0)
dense_activations = tf.reduce_max(Y4r, [0])
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis(title4="batch-max conv activation",
                                      title5="batch-max dense activations",
                                      histogram4colornum=2,
                                      histogram5colornum=2)

# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

# init
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess = tf.Session()
sess.run(init)


# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
Exemple #4
0
# problems with log(0) which is NaN
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(Ylogits, Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100

# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# matplotlib visualisation
allweights = tf.concat(0, [tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])])
allbiases  = tf.concat(0, [tf.reshape(B1, [-1]), tf.reshape(B2, [-1]), tf.reshape(B3, [-1]), tf.reshape(B4, [-1]), tf.reshape(B5, [-1])])
allactivations = tf.concat(0, [tf.reshape(Y1, [-1]), tf.reshape(Y2, [-1]), tf.reshape(Y3, [-1]), tf.reshape(Y4, [-1])])
alllogits = tf.concat(0, [tf.reshape(Y1l, [-1]), tf.reshape(Y2l, [-1]), tf.reshape(Y3l, [-1]), tf.reshape(Y4l, [-1]), tf.reshape(Ylogits, [-1])])
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis(title4="Logits", title5="Activations", histogram4colornum=2, histogram5colornum=2)

# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

# init
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)


# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):

    # training on batches of 100 images with 100 labels
    batch_X, batch_Y = mnist.train.next_batch(100)
Exemple #5
0
    tf.reshape(W1, [-1]),
    tf.reshape(W2, [-1]),
    tf.reshape(W3, [-1]),
    tf.reshape(W4, [-1]),
    tf.reshape(W5, [-1])
], 0)
biases = tf.concat([
    tf.reshape(B1, [-1]),
    tf.reshape(B2, [-1]),
    tf.reshape(B3, [-1]),
    tf.reshape(B4, [-1]),
    tf.reshape(B5, [-1])
], 0)
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, lines=25)
visualization = tensorflowvisu.MnistDataVis()

train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)


# training function
def training_step(i, update_test_data, update_train_data):
    batch_X, batch_Y = mnist.train.next_batch(100)
    train_data = {X: batch_X, Y_: batch_Y}

    # learning rate decay
    max_lr = 0.005
Exemple #6
0
labels_negatifs = tf.gather(Y_, indices_negatifs)
labels_negatifs = tf.reshape(labels_negatifs, [tf.size(indices_negatifs), 2])
# tenseur contenant uniquement les predictions face à des images negatives
predictions_negatifs = tf.gather(Y, indices_negatifs)
predictions_negatifs = tf.reshape(predictions_negatifs, [tf.size(indices_negatifs), 2])
# precision sur les images negatives
correct_prediction_neg = tf.equal(tf.argmax(labels_negatifs, 1), tf.argmax(predictions_negatifs, 1))
accuracy_neg = tf.reduce_mean(tf.cast(correct_prediction_neg, tf.float32))

# matplotlib visualisation
allweights = tf.concat(
    [tf.reshape(W1, [-1]), tf.reshape(Wfc1, [-1])], 0)
allbiases = tf.concat(
    [tf.reshape(B1, [-1]), tf.reshape(Bfc1, [-1])], 0)
dense_activations = tf.reduce_max(Yfc1r, [0])
datavis = tensorflowvisu.MnistDataVis(histogram4colornum=2, histogram5colornum=2)

train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy) # Choix de l'optimiseur

# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

saver = tf.train.Saver()

def training_step(i, update_test_data, update_train_data):
    batch_x, batch_y = mnist.train.next_batch(BATCH_SIZE)

    learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i / decay_speed)