示例#1
0
import tensorflow as tf
import inferpy as inf
from inferpy.data import mnist
import tensorflow_probability as tfp
import numpy as np

N = 1000  # data size
(x_train, y_train), (x_test, y_test) = mnist.load_data(num_instances=N,
                                                       digits=[0, 1],
                                                       vectorize=False)

S = np.shape(x_train)[1:]


@inf.probmodel
def cnn_flipout_classifier(S):
    with inf.datamodel():
        x = inf.Normal(tf.ones(S), 1, name="x")

        nn = inf.layers.Sequential([
            tfp.layers.Convolution2DFlipout(4,
                                            kernel_size=(10, 10),
                                            padding="same",
                                            activation="relu"),
            tf.keras.layers.GlobalMaxPool2D(),
            tf.keras.layers.Dense(1, activation='sigmoid')
        ])

        y = inf.Normal(nn(tf.expand_dims(x, 1)), 0.001, name="y")

示例#2
0
    beta0 = inf.Normal(tf.ones([k, d0]) * loc_init, scale_init, name="beta0")
    alpha0 = inf.Normal(tf.ones([d0]) * loc_init, scale_init, name="alpha0")

    h0 = tf.nn.relu(z @ beta0 + alpha0, name="h0")

    ######

    beta1 = inf.Normal(tf.ones([d0, 2*dx]) * loc_init, scale_init, name="beta1")
    alpha1 = inf.Normal(tf.ones([2*dx]) * loc_init, scale_init, name="alpha1")

    output = z @ beta0 + alpha0

    return output


(x_train, y_train), _ = mnist.load_data(num_instances=N, digits=DIG)

m = nlpca(k, d0, dx, decoder)
inf_method = inf.inference.MCMC()

# learn the parameters
m.fit({"x": x_train}, inf_method)


# Plot the evolution of the loss

L = inf_method.losses
plt.plot(range(len(L)), L)

plt.xlabel('epochs')
plt.ylabel('Loss')
示例#3
0
import tensorflow as tf
import inferpy as inf
from inferpy.data import mnist
import tensorflow_probability as tfp

N, M = 1000, 100  # data and batch size
(x_train, _), _ = mnist.load_data(num_instances=N, digits=[0, 1, 2])


# P model and the  decoder NN
@inf.probmodel
def vae(k, d0, d, decoder):
    with inf.datamodel():
        z = inf.Normal(tf.ones(k), 1, name="z")
        x = inf.Normal(decoder(d0, d, z), 1, name="x")


def decoder(d0, d, z):
    return inf.layers.Sequential([
        tfp.layers.DenseFlipout(d0, activation=tf.nn.relu),
        tf.keras.layers.Dense(d),
    ])(z)


p = vae(k=2, d0=100, d=28 * 28, decoder=decoder)


# Q model and the encoder NN
@inf.probmodel
def qmodel(k, d0, d, encoder):
    with inf.datamodel():