def test(self):

        with inf.ProbModel() as m:
            x = Normal(loc=1., scale=1., name="x", observed=True)
            y = Normal(loc=x, scale=1., dim=3, name="y")

        # print the list of variables
        print(m.varlist)
        print(m.latent_vars)
        print(m.observed_vars)

        # get a sample

        m_sample = m.sample()

        # compute the log_prob for each element in the sample
        print(m.log_prob(m_sample))

        # compute the sum of the log_prob
        print(m.sum_log_prob(m_sample))

        self.assertTrue(len(m.varlist) == 2)
        self.assertTrue(len(m.latent_vars) == 1)
        self.assertTrue(len(m.latent_vars) == 1)

        self.assertFalse(m.is_compiled())

        m.compile()

        self.assertTrue(m.is_compiled())

        z = Normal(loc=1., scale=1., dim=3, name="z")
        m.add_var(z)

        self.assertFalse(m.is_compiled())
Esempio n. 2
0
    def test(self):

        with inf.ProbModel() as m:
            x = Normal(loc=1., scale=100, name="x")

            with inf.replicate(size=100):
                y = Normal(loc=x, scale=0.0001, dim=3, name="y", observed=True)

        # print the list of variables
        print(m.varlist)
        print(m.latent_vars)
        print(m.observed_vars)

        # get a sample
        m_sample = m.sample()
        print("sample:")
        print(m_sample)

        self.assertTrue(
            np.abs(
                np.mean(
                    list(m_sample.values())[0] -
                    list(m_sample.values())[1])) < 1)

        # compute the log_prob for each element in the sample
        print(m.log_prob(m_sample))

        # compute the sum of the log_prob
        print(m.sum_log_prob(m_sample))

        self.assertTrue(len(m.varlist) == 2)
        self.assertTrue(len(m.latent_vars) == 1)
        self.assertTrue(len(m.latent_vars) == 1)

        self.assertFalse(m.is_compiled())

        m.compile()

        self.assertTrue(m.is_compiled())

        z = Normal(loc=1., scale=1., dim=3, name="z")
        m.add_var(z)

        self.assertFalse(m.is_compiled())
Esempio n. 3
0
import edward as ed
import inferpy as inf
from inferpy.models import Normal, InverseGamma

K, d, N = 5, 10, 200

# model definition
with inf.ProbModel() as m:
    #define the weights
    with inf.replicate(size=K):
        w = Normal(0, 1, dim=d)

    sigma = InverseGamma(1.0, 1.0)

    # define the generative model
    with inf.replicate(size=N):
        z = Normal(0, 1, dim=K)
        x = Normal(inf.matmul(z, w), sigma, observed=True, dim=d)

# toy data generation
x_train = Normal(loc=0, scale=1., dim=d).sample(N)
data = {x.name: x_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

#extract the hidden representation from a set of observations
hidden_encoding = m.posterior(z)
import edward as ed
import inferpy as inf
from inferpy.models import Normal, Bernoulli, Categorical
import numpy as np

d, N = 10, 500

#number of classes
K = 3

# model definition
with inf.ProbModel() as m:

    #define the weights
    w0 = Normal(0, 1, dim=K)

    with inf.replicate(size=K):
        w = Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = Normal(0, 1, observed=True, dim=d)
        y = Bernoulli(logits=w0 + inf.matmul(x, w, transpose_b=True),
                      observed=True)

# toy data generation
x_train = Normal(loc=0, scale=1, dim=d).sample(N)
y_train = Bernoulli(probs=np.random.rand(K)).sample(N)
data = {x.name: x_train, y.name: y_train}

# compile and fit the model with training data
Esempio n. 5
0
import edward as ed
import inferpy as inf
from inferpy.models import Normal
import numpy as np

d, N = 5, 20000

# model definition
with inf.ProbModel() as m:

    #define the weights
    w0 = Normal(0, 1)
    w = Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        x = Normal(0, 1, observed=True, dim=d)
        y = Normal(w0 + inf.dot(x, w), 1.0, observed=True)

# toy data generation
x_train = inf.models.Normal(loc=10, scale=5, dim=d).sample(N)
y_train = np.matmul(x_train, np.array([10,10,0.1,0.5,2]).reshape((d,1))) \
          + inf.models.Normal(loc=0, scale=5, dim=1).sample(N)

data = {x.name: x_train, y.name: y_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

print(m.posterior([w, w0]))
Esempio n. 6
0
import inferpy as inf
from inferpy.models import Normal

# K defines the number of components.
K = 10

# d defines the number of dimensions
d = 20

#Prior for the principal components
with inf.replicate(size=K):
    w = Normal(loc=0, scale=1, dim=d)

###

# Number of observations
N = 1000

# define the generative model
with inf.replicate(size=N):
    z = Normal(0, 1, dim=K)
    x = Normal(inf.matmul(z, w), 1.0, observed=True, dim=d)

###

from inferpy import ProbModel

# Define the model
pca = ProbModel(varlist=[w, z, x])

# Compile the model
Esempio n. 7
0
import inferpy as inf
from inferpy.models import Normal

N = 200
M = 50
K = 5

# Shape [M,K]
with inf.replicate(size=K):
    gamma = Normal(0, 1, dim=M)

# Shape [N,K]
with inf.replicate(size=N):
    w = Normal(0, 1, dim=K)

# x_mn has shape [N,K] x [K,M] = [N,M]

with inf.replicate(size=N):
    x = Normal(inf.matmul(w, gamma), 1, observed=True)

m = inf.ProbModel([w, gamma, x])

data = m.sample(size=N)

log_prob = m.log_prob(data)

m.compile(infMethod='KLqp')

m.fit(data)

print(m.posterior([w, gamma]))
Esempio n. 8
0
import inferpy as inf
from inferpy.models import Normal, Beta, Categorical, Deterministic



d, N =  10, 200


#define the weights
w0 = Normal(0,1)
with inf.replicate(size=d):
    w = Normal(0, 1)


p = Beta(1,1)

# define the generative model
with inf.replicate(size=N):
    x = Normal(0, 1, observed=True, dim=d)
    y0 = Normal(inf.matmul(x,w), 1.0, observed=True)

    h = Categorical(probs=[p, 1-p])
    y1 = Deterministic(1.)

    # not working until issue #58 is solved
    y = Deterministic(inf.case({h.equal(0) : y0, h.equal(1) : y1}), observed = True)


h = Categorical(probs=[0.2,0.8])
h.probs
h.sample()
Esempio n. 9
0
import inferpy as inf
from inferpy.models import Normal

# K defines the number of components.
K=10

# d defines the number of dimensions
d=20

#Prior for the principal components
with inf.replicate(size = K):
    w = Normal(loc = 0, scale = 1, dim = d)  # x.shape = [K,d]


###

# Number of observations
N = 1000

# define the generative model
with inf.replicate(size=N):
    z = Normal(0, 1, dim=K)  # z.shape = [N,K]
    x = Normal(inf.matmul(z,w), 1.0, observed=True, dim=d)  # x.shape = [N,d]


###

from inferpy import ProbModel

# Define the model
pca = ProbModel(varlist = [w,z,x])
Esempio n. 10
0
import edward as ed
import inferpy as inf
from inferpy.models import Normal

K, d, N = 5, 10, 200

# model definition
with inf.ProbModel() as m:
    #define the weights
    with inf.replicate(size=K):
        w = Normal(0, 1, dim=d)

    # define the generative model
    with inf.replicate(size=N):
        z = Normal(0, 1, dim=K)
        x = Normal(inf.matmul(z,w),
                   1.0, observed=True, dim=d)

# data generation
x_train = Normal(loc=0, scale=1., dim=d).sample(N)
data = {x.name: x_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

#extract the hidden representation from a set of observations
hidden_encoding = m.posterior(z)


Esempio n. 11
0
import inferpy as inf
from inferpy.models import Normal

with inf.ProbModel() as m:

    x = Normal(loc=1., scale=1., name="x", observed=True)
    y = Normal(loc=x, scale=1., dim=3, name="y")

# print the list of variables
print(m.varlist)
print(m.latent_vars)
print(m.observed_vars)

# get a sample

m_sample = m.sample()

# compute the log_prob for each element in the sample
print(m.log_prob(m_sample))

# compute the sum of the log_prob
print(m.sum_log_prob(m_sample))

### alternative definition

x2 = Normal(loc=1., scale=1., name="x2", observed=True)
y2 = Normal(loc=x, scale=1., dim=3, name="y2")

m2 = inf.ProbModel(varlist=[x2, y2])