Exemplo n.º 1
0
 def activate(self, X):
     return stats.Bernoulli(T.sigmoid(X), parameter_type='regular')
Exemplo n.º 2
0
num_batches = T.to_float(N / batch_size)

with T.initialization('xavier'):
    # stats_net = Relu(D + 1, 20) >> Relu(20) >> GaussianLayer(D)
    stats_net = GaussianLayer(D + 1, D)
net_out = stats_net(T.concat([x, y[..., None]], -1))
stats = T.sum(net_out.get_parameters('natural'), 0)[None]

natural_gradient = (p_w.get_parameters('natural') + num_batches * stats -
                    q_w.get_parameters('natural')) / N
next_w = Gaussian(q_w.get_parameters('natural') + lr * natural_gradient,
                  parameter_type='natural')

l_w = kl_divergence(q_w, p_w)[0]

p_y = Bernoulli(T.sigmoid(T.einsum('jw,iw->ij', next_w.expected_value(), x)))
l_y = T.sum(p_y.log_likelihood(y[..., None]))
elbo = l_w + l_y

nat_op = T.assign(q_w.get_parameters('natural'),
                  next_w.get_parameters('natural'))
grad_op = tf.train.RMSPropOptimizer(1e-4).minimize(-elbo)
train_op = tf.group(nat_op, grad_op)
sess = T.interactive_session()

predictions = T.cast(
    T.sigmoid(T.einsum('jw,iw->i', q_w.expected_value(), T.to_float(X))) + 0.5,
    np.int32)
accuracy = T.mean(
    T.to_float(T.equal(predictions, T.constant(Y.astype(np.int32)))))