Exemple #1
0
    sess.run(tf.global_variables_initializer())
    for i in range(total_batch):
        batch, label = mnist.train.next_batch(batch_size)
        err, _ = sess.run([rbm1.mse, rbm1.train_op], feed_dict={X: batch})

        if i % 100 == 0:
            print("RBM1", i, "[%d]" % total_batch, err)

    for i in range(total_batch):
        batch, label = mnist.train.next_batch(batch_size)
        err, _ = sess.run([rbm2.mse, rbm2.train_op], feed_dict={X: batch})

        if i % 100 == 0:
            print("RBM2", i, "[%d]" % total_batch, err)

    w1s = rbm1.W.eval()
    w2s = rbm2.W.eval()
    vb1s = rbm1.b_in.eval()
    hb1s = rbm1.b_out.eval()
    vr, h1s = sess.run([rbm1.rec, rbm1.out], feed_dict={X: teX[:20, :]})

# vizualizacija težina
draw_weights(w1s, v_shape, Nh)
plt.savefig("ex2_RBM_weights_1")
draw_weights(w2s, (10, 10), 121)
plt.savefig("ex2_RBM_weights_2")

# vizualizacija rekonstrukcije i stanja
draw_reconstructions(teX, vr, h1s, v_shape, h1_shape, Nh)
plt.savefig("ex2_RBM_reconstruction")
Exemple #2
0
        if i % 100 == 0:
            print("RBM1", i, "[%d]" % total_batch, err)

    for i in range(total_batch):
        batch, label = mnist.train.next_batch(batch_size)
        err, _ = sess.run([rbm2.mse, rbm2.train_op], feed_dict={X: batch})

        if i % 100 == 0:
            print("RBM2", i, "[%d]" % total_batch, err)

    w1s = rbm1.W.eval()
    w2s = rbm2.W.eval()
    vb1s = rbm1.b_in.eval()
    hb1s = rbm1.b_out.eval()
    vr, h1s, h2s = sess.run([rbm1.rec, rbm1.out, rbm2.out],
                            feed_dict={X: teX[:20, :]})

# vizualizacija težina
draw_weights(w1s, v_shape, Nh)
plt.savefig("ex2b_RBM_weights_1")
draw_weights(w2s, (10, 10), 784)
plt.savefig("ex2b_RBM_weights_2")

# vizualizacija rekonstrukcije i stanja
draw_reconstructions(teX, vr, h1s, v_shape, h1_shape, Nh)
plt.savefig("ex2b_RBM_reconstruction_1")

draw_reconstructions(teX, vr, h2s, v_shape, (28, 28), 784)
plt.savefig("ex2b_RBM_reconstruction_2")
Exemple #3
0
    err, _ = sess.run([rbm1.err_sum, rbm1.update_all],
                      feed_dict={rbm1.x: batch})

    if i % (int(total_batch / 10)) == 0:
        print("Batch = ", i, "Error = ", err)

w1s = rbm1.w.eval(session=sess)
vb1s = rbm1.v_bias.eval(session=sess)
hb1s = rbm1.h_bias.eval(session=sess)
vr, h1s = sess.run([rbm1.v1_prob, rbm1.h1],
                   feed_dict={rbm1.x: teX[0:RECONSTRUCTION_DRAW_COUNT, :]})

# Visualization of weights
draw_weights(w1s, V_SHAPE, rbm1.h_dim)
# Visualization of reconstructions and states
draw_reconstructions(teX, vr, h1s, V_SHAPE, H1_SHAPE,
                     RECONSTRUCTION_DRAW_COUNT)

#
# SAMPLE
#

# visualization of a reconstructions with the gradual addition of the contributions of active hidden elements
# the first argument is the digit index in the digit matrix
reconstruct(0, h1s, teX, w1s, vb1s, V_SHAPE, H1_SHAPE, H1_DIM)

# The probability that the hidden state is included through Nu input samples
plt.figure()
tmp = (h1s.sum(0) / h1s.shape[0]).reshape(H1_SHAPE)
plt.imshow(tmp, vmin=0, vmax=1, interpolation="nearest")
plt.axis('off')
plt.colorbar()