Esempio n. 1
0
transp = [0.9, 0.9, 0.5]

fig = plt.figure()

for c in range(0, len(DIG)):
    col = colors[c]
    plt.scatter(postz[y_train == DIG[c], 0], postz[y_train == DIG[c], 1], color=col,
                label=DIG[c], marker=markers[c], alpha=transp[c], s=60)
    plt.legend()

plt.show()


# Generate new images
x_gen = m.posterior_predictive('x').sample()
mnist.plot_digits(x_gen)


# Generate new images for a specific number
NUM = 0
postz_0 = postz[y_train == DIG[NUM]]
# Less than plaze size, so we need to tile this up to 1000 instances
postz_0 = np.tile(postz_0, [int(np.ceil(N / postz_0.shape[0])), 1])[:N]
x_gen = m.posterior_predictive('x', data={"z": postz_0}).sample()
mnist.plot_digits(x_gen)


# Show how numbers are codified in the domain of the hidden variable z
# First define the range of the z domain
xaxis_min, yaxis_min = np.min(postz, axis=0)
xaxis_max, yaxis_max = np.max(postz, axis=0)
Esempio n. 2
0
DIG = [0, 1, 2]
# minimum scale
scale_epsilon = 0.01
# inference parameters
num_epochs = 1000
learning_rate = 0.01

tf.reset_default_graph()
tf.set_random_seed(1234)
#29
from inferpy.data import mnist

# load the data
(x_train, y_train), _ = mnist.load_data(num_instances=N, digits=DIG)

mnist.plot_digits(x_train, grid=[5, 5])

#38
############## Inferpy ##############


# P model and the  decoder NN
@inf.probmodel
def vae(k, d0, dx):
    with inf.datamodel():
        z = inf.Normal(tf.ones(k), 1, name="z")

        decoder = inf.layers.Sequential([
            tf.keras.layers.Dense(d0, activation=tf.nn.relu),
            tf.keras.layers.Dense(dx)
        ])
Esempio n. 3
0
    p.posterior("z", data={
        "x": x_train[i:i + M, :]
    }).sample() for i in range(0, N, M)
])

# for each input instance, plot the hidden encoding coloured by the number that it represents
markers = ["x", "+", "o"]
colors = [
    plt.get_cmap("gist_rainbow")(0.05),
    plt.get_cmap("gnuplot2")(0.08),
    plt.get_cmap("gist_rainbow")(0.33)
]
transp = [0.9, 0.9, 0.5]

fig = plt.figure()

for c in range(0, len(DIG)):
    col = colors[c]
    plt.scatter(postz[y_train == DIG[c], 0],
                postz[y_train == DIG[c], 1],
                color=col,
                label=DIG[c],
                marker=markers[c],
                alpha=transp[c],
                s=60)
    plt.legend()

plt.show()

mnist.plot_digits(x_gen, grid=[5, 5])
Esempio n. 4
0
DIG = [0, 1, 2]
# minimum scale
scale_epsilon = 0.01
# inference parameters
num_epochs = 1000
learning_rate = 0.01

tf.reset_default_graph()
tf.set_random_seed(1234)
#29
from inferpy.data import mnist

# load the data
(x_train, y_train), _ = mnist.load_data(num_instances=N, digits=DIG)

mnist.plot_digits(x_train, grid=[5, 5])

#38

### Model definition


class Decoder(torch.nn.Module):
    def __init__(self, k, d0, dx):
        super(Decoder, self).__init__()
        # setup the two linear transformations used
        self.fc1 = torch.nn.Linear(k, d0)
        self.fc21 = torch.nn.Linear(d0, dx)
        # setup the non-linearities
        self.softplus = torch.nn.Softplus()
        self.sigmoid = torch.nn.Sigmoid()
Esempio n. 5
0
DIG = [0, 1, 2]
# minimum scale
scale_epsilon = 0.01
# inference parameters
num_epochs = 1000
learning_rate = 0.01

tf.reset_default_graph()
tf.set_random_seed(1234)
#29
from inferpy.data import mnist

# load the data
(x_train, y_train), _ = mnist.load_data(num_instances=N, digits=DIG)

mnist.plot_digits(x_train, grid=[5, 5])

#38

# Model definition
######################

############## Edward ##############


def vae(k, d0, dx, N):
    z = ed.Normal(loc=tf.ones(k), scale=1., sample_shape=N, name="z")

    decoder = inf.layers.Sequential([
        tf.keras.layers.Dense(d0, activation=tf.nn.relu, name="h0"),
        tf.keras.layers.Dense(dx, name="h1")