Example #1
0
# binarize data and create a y "placeholder"
with tf.name_scope('x_binarized'):
    xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)),
                 tf.float32)
with tf.name_scope('y_'):
    y_ = tf.fill(tf.pack([tf.shape(x)[0], k]), 0.0)

# propose distribution over y
qy_logit, qy = qy_graph(xb, k)

# for each proposed y, infer z and reconstruct x
z, zm, zv, zm_prior, zv_prior, px_logit = [[None] * k for i in xrange(6)]
for i in xrange(k):
    with tf.name_scope('graphs/hot_at{:d}'.format(i)):
        y = tf.add(y_, Constant(np.eye(k)[i], name='hot_at_{:d}'.format(i)))
        z[i], zm[i], zv[i] = qz_graph(xb, y)
        zm_prior[i], zv_prior[i], px_logit[i] = px_graph(z[i], y)

# Aggressive name scoping for pretty graph visualization :P
with tf.name_scope('loss'):
    with tf.name_scope('neg_entropy'):
        nent = -cross_entropy_with_logits(qy_logit, qy)
    losses = [None] * k
    for i in xrange(k):
        with tf.name_scope('loss_at{:d}'.format(i)):
            losses[i] = labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i],
                                     zm_prior[i], zv_prior[i])
    with tf.name_scope('final_loss'):
        loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in xrange(k)])
x = Placeholder((None, 784), 'x')

# binarize data and create a y "placeholder"
with tf.name_scope('x_binarized'):
    xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32)
with tf.name_scope('y_'):
    y_ = tf.fill(tf.pack([tf.shape(x)[0], 10]), 0.0)

# propose distribution over y
qy_logit, qy = qy_graph(xb)

# for each proposed y, infer z and reconstruct x
z, zm, zv, px_logit = [[None] * 10 for i in xrange(4)]
for i in xrange(10):
    with tf.name_scope('graphs/hot_at{:d}'.format(i)):
        y = tf.add(y_, Constant(np.eye(10)[i], name='hot_at_{:d}'.format(i)))
        z[i], zm[i], zv[i] = qz_graph(xb, y)
        px_logit[i] = px_graph(z[i], y)

# Aggressive name scoping for pretty graph visualization :P
with tf.name_scope('loss'):
    with tf.name_scope('neg_entropy'):
        nent = -cross_entropy_with_logits(qy_logit, qy)
    losses = [None] * 10
    for i in xrange(10):
        with tf.name_scope('loss_at{:d}'.format(i)):
            losses[i] = labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], Constant(0), Constant(1))
    with tf.name_scope('final_loss'):
        loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in xrange(10)])

train_step = tf.train.AdamOptimizer().minimize(loss)
Example #3
0
# binarize data and create a y "placeholder"
with tf.name_scope('x_binarized'):
    xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)),
                 tf.float32)
with tf.name_scope('y_'):
    y_ = tf.fill(tf.stack([tf.shape(x)[0], 10]), 0.0)

# propose distribution over y
qy_logit, qy = qy_graph(xb)

# for each proposed y, infer z and reconstruct x
z, zm, zv, px_logit = [[None] * 10 for i in xrange(4)]
for i in xrange(10):
    with tf.name_scope('graphs/hot_at{:d}'.format(i)):
        y = tf.add(y_, Constant(np.eye(10)[i], name='hot_at_{:d}'.format(i)))
        z[i], zm[i], zv[i] = qz_graph(xb, y)
        px_logit[i] = px_graph(z[i], y)

# Aggressive name scoping for pretty graph visualization :P
with tf.name_scope('loss'):
    with tf.name_scope('neg_entropy'):
        nent = -cross_entropy_with_logits(qy_logit, qy)
    losses = [None] * 10
    for i in xrange(10):
        with tf.name_scope('loss_at{:d}'.format(i)):
            losses[i] = labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i],
                                     Constant(0), Constant(1))
    with tf.name_scope('final_loss'):
        loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in xrange(10)])
Example #4
0
# h1 = tf.layers.conv2d(inputs=x,filters=16,kernel_size=[3, 3],padding="same",activation=tf.nn.relu)
# pool1 = tf.layers.max_pooling2d(inputs=h1, pool_size=[2, 2], strides=2)
# h2 = tf.layers.conv2d(inputs=pool1,filters=32,kernel_size=[3, 3],padding="same",activation=tf.nn.relu)
# pool2 = tf.layers.max_pooling2d(inputs=h2, pool_size=[2, 2], strides=2)
# pool2_flat = tf.contrib.layers.flatten(pool2)
# dense = tf.layers.dense(inputs=pool2_flat, units=256, activation=tf.nn.relu)
# dense = tf.layers.dense(inputs=dense, units=14*14*28, activation=tf.nn.relu)
# dense = tf.reshape(dense,[-1,14,14,28])
# conv1 = tf.layers.conv2d_transpose(dense,28,[3,3],(1,1),padding="same",activation=tf.nn.relu)
# conv2 = tf.layers.conv2d_transpose(conv1,28,[3,3],(2,2),padding="same",activation=tf.nn.relu)
# conv3 = tf.layers.conv2d_transpose(conv2,1,[3,3],(1,1),padding="same",activation=tf.nn.relu)
# logits = tf.contrib.layers.flatten(conv3)
xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32)
y_ = tf.fill(tf.stack([tf.shape(x)[0], 10]), 0.0)

z, zm, zv, zm_prior, zv_prior, px_logit = [[None] * 10 for i in xrange(6)]

# for i in range(10):
# 	y = tf.add(y_, Constant(np.eye(10)[i]))
# 	z[i], zm[i], zv[i] = qz_graph(xb, y)
# 	zm_prior[i], zv_prior[i], px_logit[i] = px_graph(z[i], y)
y = tf.add(y_, Constant(np.eye(10)[1]))
z[1], zm[1], zv[1] = qz_graph(xb, y)
y = tf.add(y_, Constant(np.eye(10)[2]))
z[2], zm[2], zv[2] = qz_graph(xb, y)
y = tf.add(y_, Constant(np.eye(10)[3]))
z[3], zm[3], zv[3] = qz_graph(xb, y)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
print(np.shape(sess.run(z[2])))