Example #1
0
def pca(K, d, N):
    #define the weights
    with inf.replicate(size=K):
        w = inf.models.Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        z = inf.models.Normal(0, 1, dim=K)
        x = inf.models.Normal(inf.matmul(z, w), 1.0, observed=True, dim=d)
Example #2
0
def pca_with_ard_prior(K, d, N):
    #define the weights
    with inf.replicate(size=K):
        w = inf.models.Normal(0, 1, dim=d)

    sigma = inf.models.InverseGamma(1.0, 1.0)

    # define the generative model
    with inf.replicate(size=N):
        z = inf.models.Normal(0, 1, dim=K)
        x = inf.models.Normal(inf.matmul(z, w), sigma, observed=True, dim=d)
Example #3
0
def log_regression(K, d, N):

    #define the weights
    w0 = inf.models.Normal(0, 1, dim=K)

    with inf.replicate(size=K):
        w = inf.models.Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = inf.models.Normal(0, 1, observed=True, dim=d)
        y = inf.models.Bernoulli(logits=w0 +
                                 inf.matmul(x, w, transpose_b=True),
                                 observed=True)
import edward as ed
import inferpy as inf
from inferpy.models import Normal, InverseGamma

K, d, N = 5, 10, 200

# model definition
with inf.ProbModel() as m:
    #define the weights
    with inf.replicate(size=K):
        w = Normal(0, 1, dim=d)

    sigma = InverseGamma(1.0, 1.0)

    # define the generative model
    with inf.replicate(size=N):
        z = Normal(0, 1, dim=K)
        x = Normal(inf.matmul(z, w), sigma, observed=True, dim=d)

# toy data generation
x_train = Normal(loc=0, scale=1., dim=d).sample(N)
data = {x.name: x_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

#extract the hidden representation from a set of observations
hidden_encoding = m.posterior(z)
d, N = 10, 500

#number of classes
K = 3

# model definition
with inf.ProbModel() as m:

    #define the weights
    w0 = Normal(0, 1, dim=K)

    with inf.replicate(size=K):
        w = Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = Normal(0, 1, observed=True, dim=d)
        y = Bernoulli(logits=w0 + inf.matmul(x, w, transpose_b=True),
                      observed=True)

# toy data generation
x_train = Normal(loc=0, scale=1, dim=d).sample(N)
y_train = Bernoulli(probs=np.random.rand(K)).sample(N)
data = {x.name: x_train, y.name: y_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

print(m.posterior([w, w0]))
Example #6
0
import edward as ed
import inferpy as inf
from inferpy.models import Normal

K, d, N = 5, 10, 200

# model definition
with inf.ProbModel() as m:
    #define the weights
    with inf.replicate(size=K):
        w = Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        z = Normal(0, 1, dim=K)
        x = Normal(inf.matmul(z, w), 1.0, observed=True, dim=d)

# toy data generation
x_train = Normal(loc=0, scale=1., dim=d).sample(N)
data = {x.name: x_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

#extract the hidden representation from a set of observations
hidden_encoding = m.posterior(z)
Example #7
0
import inferpy as inf
from inferpy.models import Normal, Bernoulli, Categorical
import numpy as np

d, N = 10, 500

# model definition
with inf.ProbModel() as m:

    #define the weights
    w0 = Normal(0, 1)
    with inf.replicate(size=d):
        w = Normal(0, 1)

    # define the generative model
    with inf.replicate(size=N):
        x = Normal(0, 1, observed=True, dim=d)
        p = w0 + inf.matmul(x, w)
        y = Bernoulli(logits=p, observed=True)

# toy data generation
x_train = Normal(loc=0, scale=1, dim=d).sample(N)
y_train = Bernoulli(probs=[0.4]).sample(N)
data = {x.name: x_train, y.name: np.reshape(y_train, (N, 1))}

# compile and fit the model with training data
m.compile()
m.fit(data)

print(m.posterior([w, w0]))
Example #8
0
                      scale=tf.nn.softplus(
                          tf.Variable(tf.random_normal([K, d]))))

inference = ed.KLqp({w: qw}, data={x: x_train})
inference.run()

# print the posterior distributions
print([qw.loc.eval()])

######## Inferpy ###########

# model definition
with inf.ProbModel() as m:
    #define the weights
    with inf.replicate(size=K):
        w = inf.models.Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        z = inf.models.Normal(0, 1, dim=K)
        x = inf.models.Normal(inf.matmul(z, w), 1.0, observed=True, dim=d)

data = {x.name: x_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

#extract the hidden representation from a set of observations
hidden_encoding = m.posterior(z)
###### Inferpy ########

# model definition
with inf.ProbModel() as m:

    #define the weights
    w0 = inf.models.Normal(0,1, dim=K)

    with inf.replicate(size=K):
        w = inf.models.Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = inf.models.Normal(0, 1, observed=True, dim=d)
        y = inf.models.Bernoulli(logits = w0 + inf.matmul(x, w, transpose_b=True), observed=True)



data = {x.name: x_train, y.name: y_train}


# compile and fit the model with training data
m.compile()
m.fit(data)

print(m.posterior([w, w0]))



Example #10
0
import inferpy as inf
from inferpy.models import Normal

N = 200
M = 50
K = 5

# Shape [M,K]
with inf.replicate(size=K):
    gamma = Normal(0, 1, dim=M)

# Shape [N,K]
with inf.replicate(size=N):
    w = Normal(0, 1, dim=K)

# x_mn has shape [N,K] x [K,M] = [N,M]

with inf.replicate(size=N):
    x = Normal(inf.matmul(w, gamma), 1, observed=True)

m = inf.ProbModel([w, gamma, x])

data = m.sample(size=N)

log_prob = m.log_prob(data)

m.compile(infMethod='KLqp')

m.fit(data)

print(m.posterior([w, gamma]))
Example #11
0
d, N =  10, 200


#define the weights
w0 = Normal(0,1)
with inf.replicate(size=d):
    w = Normal(0, 1)


p = Beta(1,1)

# define the generative model
with inf.replicate(size=N):
    x = Normal(0, 1, observed=True, dim=d)
    y0 = Normal(inf.matmul(x,w), 1.0, observed=True)

    h = Categorical(probs=[p, 1-p])
    y1 = Deterministic(1.)

    # not working until issue #58 is solved
    y = Deterministic(inf.case({h.equal(0) : y0, h.equal(1) : y1}), observed = True)


h = Categorical(probs=[0.2,0.8])
h.probs
h.sample()



Example #12
0
d=20

#Prior for the principal components
with inf.replicate(size = K):
    w = Normal(loc = 0, scale = 1, dim = d)  # x.shape = [K,d]


###

# Number of observations
N = 1000

# define the generative model
with inf.replicate(size=N):
    z = Normal(0, 1, dim=K)  # z.shape = [N,K]
    x = Normal(inf.matmul(z,w), 1.0, observed=True, dim=d)  # x.shape = [N,d]


###

from inferpy import ProbModel

# Define the model
pca = ProbModel(varlist = [w,z,x])

# Compile the model
pca.compile(infMethod = 'KLqp')



###
Example #13
0
import edward as ed
import inferpy as inf
from inferpy.models import Normal

K, d, N = 5, 10, 200

# model definition
with inf.ProbModel() as m:
    #define the weights
    with inf.replicate(size=K):
        w = Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        z = Normal(0, 1, dim=K)
        x = Normal(inf.matmul(z,w),
                   1.0, observed=True, dim=d)

# data generation
x_train = Normal(loc=0, scale=1., dim=d).sample(N)
data = {x.name: x_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

#extract the hidden representation from a set of observations
hidden_encoding = m.posterior(z)


Example #14
0
from inferpy.models import Normal

d, N =  10, 200

# model definition
with inf.ProbModel() as m:

    #define the weights
    w0 = Normal(0,1)
    with inf.replicate(size=d):
        w = Normal(0, 1)

    # define the generative model
    with inf.replicate(size=N):
        x = Normal(0, 1, observed=True, dim=d)
        y = Normal(w0 + inf.matmul(x,w), 1.0, observed=True)

# toy data generation
x_train = Normal(loc=0, scale=1, dim=d).sample(N)
y_train = Normal(loc=5, scale=1, dim=1).sample(N)
data = {x.name: x_train, y.name: y_train}


# compile and fit the model with training data
m.compile()
m.fit(data)

print(m.posterior([w, w0]))