예제 #1
0
def log_regression(d, N):

    #define the weights
    w0 = inf.models.Normal(0, 1)
    w = inf.models.Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = inf.models.Normal(0, 1, observed=True, dim=d)
        y = inf.models.Bernoulli(logits=w0 + inf.dot(x, w), observed=True)
예제 #2
0
def linear_regression(d, N):

    #define the weights
    w0 = inf.models.Normal(0, 1)
    w = inf.models.Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = inf.models.Normal(0, 1, observed=True, dim=d)
        y = inf.models.Normal(w0 + inf.dot(x, w), 1.0, observed=True)
예제 #3
0
import inferpy as inf
from inferpy.models import Normal
import numpy as np

d, N = 5, 20000

# model definition
with inf.ProbModel() as m:

    #define the weights
    w0 = Normal(0, 1)
    w = Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = Normal(0, 1, observed=True, dim=d)
        y = Normal(w0 + inf.dot(x, w), 1.0, observed=True)

# toy data generation
x_train = inf.models.Normal(loc=10, scale=5, dim=d).sample(N)
y_train = np.matmul(x_train, np.array([10,10,0.1,0.5,2]).reshape((d,1))) \
          + inf.models.Normal(loc=0, scale=5, dim=1).sample(N)

data = {x.name: x_train, y.name: y_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

print(m.posterior([w, w0]))
예제 #4
0
y_train = np.matmul(x_train, np.array([10,10,0.1,0.5,2]).reshape((d,1))) \
          + inf.models.Normal(loc=0, scale=5, dim=1).sample(N)

############################## InferPy #################################################

# model definition
with inf.ProbModel() as model:

    # define the weights
    w0 = inf.models.Normal(0, 1)
    w = inf.models.Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = inf.models.Normal(0, 1, observed=True, dim=d)
        y = inf.models.Normal(w0 + inf.dot(x, w), 1.0, observed=True)

# compile and fit the model with training data
model.compile()
data = {x: x_train, y: y_train}
model.fit(data)

# print the posterior distributions
print(model.posterior([w, w0]))

############################## Edward ##################################################

# define the weights
w0 = ed.models.Normal(loc=tf.zeros(1), scale=tf.ones(1))
w = ed.models.Normal(loc=tf.zeros(d), scale=tf.ones(d))
예제 #5
0
                                                                          ]))))

inference = ed.KLqp({w: qw, w0: qw0}, data={x: x_train, y: y_train.reshape(N)})
inference.run()

# print the posterior distributions
print([qw.loc.eval(), qw0.loc.eval()])

########### Inferpy ###########

# model definition
with inf.ProbModel() as m:

    #define the weights
    w0 = inf.models.Normal(0, 1)
    w = inf.models.Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = inf.models.Normal(0, 1, observed=True, dim=d)
        y = inf.models.Bernoulli(logits=w0 + inf.dot(x, w), observed=True)

# toy data generation
data = {x: x_train, y: y_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

print(m.posterior([w, w0]))
예제 #6
0
import edward as ed
import inferpy as inf
from inferpy.models import Normal, Bernoulli, Categorical

d, N = 10, 500

# model definition
with inf.ProbModel() as m:

    #define the weights
    w0 = Normal(0, 1)
    w = Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = Normal(0, 1, observed=True, dim=d)
        y = Bernoulli(logits=w0 + inf.dot(x, w), observed=True)

# toy data generation
x_train = Normal(loc=0, scale=1, dim=d).sample(N)
y_train = Bernoulli(probs=0.4).sample(N)
data = {x.name: x_train, y.name: y_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

print(m.posterior([w, w0]))