def test(self):

        with inf.ProbModel() as m:
            x = Normal(loc=1., scale=1., name="x", observed=True)
            y = Normal(loc=x, scale=1., dim=3, name="y")

        # print the list of variables
        print(m.varlist)
        print(m.latent_vars)
        print(m.observed_vars)

        # get a sample

        m_sample = m.sample()

        # compute the log_prob for each element in the sample
        print(m.log_prob(m_sample))

        # compute the sum of the log_prob
        print(m.sum_log_prob(m_sample))

        self.assertTrue(len(m.varlist) == 2)
        self.assertTrue(len(m.latent_vars) == 1)
        self.assertTrue(len(m.latent_vars) == 1)

        self.assertFalse(m.is_compiled())

        m.compile()

        self.assertTrue(m.is_compiled())

        z = Normal(loc=1., scale=1., dim=3, name="z")
        m.add_var(z)

        self.assertFalse(m.is_compiled())
Example #2
0
    def test(self):

        with inf.ProbModel() as m:
            x = Normal(loc=1., scale=100, name="x")

            with inf.replicate(size=100):
                y = Normal(loc=x, scale=0.0001, dim=3, name="y", observed=True)

        # print the list of variables
        print(m.varlist)
        print(m.latent_vars)
        print(m.observed_vars)

        # get a sample
        m_sample = m.sample()
        print("sample:")
        print(m_sample)

        self.assertTrue(
            np.abs(
                np.mean(
                    list(m_sample.values())[0] -
                    list(m_sample.values())[1])) < 1)

        # compute the log_prob for each element in the sample
        print(m.log_prob(m_sample))

        # compute the sum of the log_prob
        print(m.sum_log_prob(m_sample))

        self.assertTrue(len(m.varlist) == 2)
        self.assertTrue(len(m.latent_vars) == 1)
        self.assertTrue(len(m.latent_vars) == 1)

        self.assertFalse(m.is_compiled())

        m.compile()

        self.assertTrue(m.is_compiled())

        z = Normal(loc=1., scale=1., dim=3, name="z")
        m.add_var(z)

        self.assertFalse(m.is_compiled())
import edward as ed
import inferpy as inf
from inferpy.models import Normal, InverseGamma

K, d, N = 5, 10, 200

# model definition
with inf.ProbModel() as m:
    #define the weights
    with inf.replicate(size=K):
        w = Normal(0, 1, dim=d)

    sigma = InverseGamma(1.0, 1.0)

    # define the generative model
    with inf.replicate(size=N):
        z = Normal(0, 1, dim=K)
        x = Normal(inf.matmul(z, w), sigma, observed=True, dim=d)

# toy data generation
x_train = Normal(loc=0, scale=1., dim=d).sample(N)
data = {x.name: x_train}

# compile and fit the model with training data
m.compile()
m.fit(data)

#extract the hidden representation from a set of observations
hidden_encoding = m.posterior(z)
Example #4
0
    def wrapper(*args, **kwargs):

        with inf.ProbModel() as m:
            f(*args, **kwargs)
        return m
    def test(self):

        #### learning a 1-dim parameter from 1-dim data

        N = 500
        sampling_mean = [30.]
        sampling_std = 0.000000001
        sess = ed.util.get_session()

        with inf.ProbModel() as m:
            theta = inf.models.Normal(loc=0., scale=1.)

            with inf.replicate(size=N):
                x = inf.models.Normal(loc=theta, scale=1., observed=True)

        m.compile()

        x_train = inf.models.Normal(loc=sampling_mean, scale=sampling_std).sample(N)
        data = {x.name: x_train}

        m.fit(data)

        p1 = m.posterior(theta).loc[0]











        #### learning two 1-dim parameter from 2-dim data

        sampling_mean = [30., 10.]
        sess = ed.util.get_session()

        with inf.ProbModel() as m:
            theta1 = inf.models.Normal(loc=0., scale=1., dim=1)
            theta2 = inf.models.Normal(loc=0., scale=1., dim=1)

            with inf.replicate(size=N):
                x = inf.models.Normal(loc=[theta1, theta2], scale=1., observed=True)

        m.compile()

        x_train = inf.models.Normal(loc=sampling_mean, scale=sampling_std).sample(N)
        data = {x.name: x_train}


        m.fit(data)

        p3_1 = m.posterior(theta1).loc[0]
        p3_2 = m.posterior(theta2).loc[0]


        #### learning two 1-dim parameter from 2-dim data (with qmodel)

        sampling_mean = [30., 10.]
        sess = ed.util.get_session()

        with inf.ProbModel() as m:
            theta1 = inf.models.Normal(loc=0., scale=1., dim=1)
            theta2 = inf.models.Normal(loc=0., scale=1., dim=1)

            with inf.replicate(size=N):
                x = inf.models.Normal(loc=[theta1, theta2], scale=1., observed=True)

        # define the Qmodel

        q_theta1 = inf.Qmodel.new_qvar(theta1)
        q_theta2 = inf.Qmodel.new_qvar(theta2, initializer="ones")

        qmodel = inf.Qmodel([q_theta1, q_theta2])

        m.compile(Q=qmodel)

        x_train = inf.models.Normal(loc=sampling_mean, scale=sampling_std).sample(N)
        data = {x.name: x_train}


        m.fit(data)

        p4_1 = m.posterior(theta1).loc[0]
        p4_2 = m.posterior(theta2).loc[0]

        print(p4_1)
        print(p4_2)




        ## asserts
        self.assertTrue(abs(p1 - 29.66) < 0.1)

        self.assertTrue(abs(p3_1-29.66)<0.1)
        self.assertTrue(abs(p3_2-9.98)<0.1)


        self.assertTrue(abs(p4_1 - 29.66) < 0.1)
        self.assertTrue(abs(p4_2 - 9.98) < 0.1)
    def test(self):
        # import the package for using it

        sess = ed.get_session()

        flag_y = True
        f = 2
        # graph

        N = 1000  # number of observations
        # model definition
        with inf.ProbModel() as m:
            # prior (latent variable)
            beta = inf.models.Normal(loc=0, scale=1, name="beta")
            w = inf.models.Normal(loc=0, scale=1, name="w")
            b = inf.models.Normal(loc=0, scale=1, name="b")
            betaz = inf.models.Normal(loc=0, scale=1, name="beta")

            # observed variable
            with inf.replicate(size=N):
                z = inf.models.Normal(loc=betaz,
                                      scale=1,
                                      observed=False,
                                      name="z")
                x = inf.models.Normal(loc=beta + z,
                                      scale=1,
                                      observed=True,
                                      name="x")
                y = inf.models.Normal(loc=w * x + b + z,
                                      scale=1,
                                      observed=True,
                                      name="y")

        # toy data generation

        x_train = inf.models.Normal(loc=10, scale=3, dim=1).sample(N)
        y_train = x_train * f + inf.models.Normal(loc=1, scale=0.1,
                                                  dim=1).sample(N)

        data = {x.name: x_train, y.name: y_train}

        m.compile()
        m.fit(data)

        qbeta = m.posterior(beta)
        qw = m.posterior(w)
        qb = m.posterior(b)
        qz = m.posterior(z)

        x_test = inf.models.Normal(loc=10, scale=3, dim=1).sample(N)
        y_test = x_test * f + inf.models.Normal(loc=1, scale=0.1,
                                                dim=1).sample(N)

        y_pred = m.predict(y, data={x: x_test})

        self.assertTrue(np.max((y_pred - y_test) < 0.5))

        inf.evaluate("log_lik",
                     data={
                         x: x_test,
                         y_pred: y_test
                     },
                     output_key=y_pred)
        inf.evaluate("mean_squared_error",
                     data={
                         x: x_test,
                         y_pred: y_test
                     },
                     output_key=y_pred)
Example #7
0
import inferpy as inf
from inferpy.models import Normal

N = 200
M = 50
K = 5

# Shape [M,K]
with inf.replicate(size=K):
    gamma = Normal(0, 1, dim=M)

# Shape [N,K]
with inf.replicate(size=N):
    w = Normal(0, 1, dim=K)

# x_mn has shape [N,K] x [K,M] = [N,M]

with inf.replicate(size=N):
    x = Normal(inf.matmul(w, gamma), 1, observed=True)

m = inf.ProbModel([w, gamma, x])

data = m.sample(size=N)

log_prob = m.log_prob(data)

m.compile(infMethod='KLqp')

m.fit(data)

print(m.posterior([w, gamma]))
Example #8
0
import inferpy as inf

with inf.ProbModel() as m:
    theta = inf.models.Beta(0.5, 0.5)
    z = inf.models.Categorical(probs=[theta, 1 - theta], name="z")

m.sample()

# Categorical variable depending on another categorical variable

with inf.ProbModel() as m2:
    y = inf.models.Categorical(probs=[0.4, 0.6], name="y")
    x = inf.models.Categorical(probs=inf.case({
        y.equal(0): [0.0, 1.0],
        y.equal(1): [1.0, 0.0]
    }),
                               name="x")
m2.sample()

# Categorical variable depending on a Normal distributed variable

with inf.ProbModel() as m3:
    a = inf.models.Normal(0, 1, name="a")
    b = inf.models.Categorical(probs=inf.case({
        a > 0: [0.0, 1.0],
        a <= 0: [1.0, 0.0]
    }),
                               name="b")
m3.sample()

# Normal distributed variable depending on a Categorical variable
    def test(self):

        #### learning a 1-dim parameter from 1-dim data

        N = 500
        sampling_mean = [30.]
        sampling_std = 0.000000001
        sess = ed.util.get_session()

        with inf.ProbModel() as m:
            theta = inf.models.Normal(loc=0., scale=1.)

            with inf.replicate(size=N):
                x = inf.models.Normal(loc=theta, scale=1., observed=True)

        m.compile()

        x_train = inf.models.Normal(loc=sampling_mean,
                                    scale=sampling_std).sample(N)
        data = {x.name: x_train}

        m.fit(data)

        p1 = m.posterior(theta).loc[0]

        #### learning a 2-dim parameter from 2-dim data

        sampling_mean = [30., 10.]
        sess = ed.util.get_session()

        with inf.ProbModel() as m:
            theta = inf.models.Normal(loc=0., scale=1., dim=2)

            with inf.replicate(size=N):
                x = inf.models.Normal(loc=theta, scale=1., observed=True)

        m.compile()

        x_train = inf.models.Normal(loc=sampling_mean,
                                    scale=sampling_std).sample(N)
        data = {x.name: x_train}

        m.fit(data)

        p2_1 = m.posterior(theta).loc[0]
        p2_2 = m.posterior(theta).loc[1]

        #### learning two 1-dim parameter from 2-dim data

        sampling_mean = [30., 10.]
        sess = ed.util.get_session()

        with inf.ProbModel() as m:
            theta1 = inf.models.Normal(loc=0., scale=1., dim=1)
            theta2 = inf.models.Normal(loc=0., scale=1., dim=1)

            with inf.replicate(size=N):
                x = inf.models.Normal(loc=[theta1, theta2],
                                      scale=1.,
                                      observed=True)

        m.compile()

        x_train = inf.models.Normal(loc=sampling_mean,
                                    scale=sampling_std).sample(N)
        data = {x.name: x_train}

        m.fit(data)

        p3_1 = m.posterior(theta1).loc[0]
        p3_2 = m.posterior(theta2).loc[0]

        ## asserts

        print(p1)
        print(p2_1)
        print(p2_2)
        print(p3_1)
        print(p3_2)

        self.assertTrue(abs(p1 - 29.66) < 0.1)

        self.assertTrue(abs(p2_1 - 29.66) < 0.1)
        self.assertTrue(abs(p2_2 - 9.98) < 0.1)

        self.assertTrue(abs(p3_1 - 29.66) < 0.1)
        self.assertTrue(abs(p3_2 - 9.98) < 0.1)
Example #10
0
import inferpy as inf
from inferpy.models import Normal

with inf.ProbModel() as m:

    x = Normal(loc=1., scale=1., name="x", observed=True)
    y = Normal(loc=x, scale=1., dim=3, name="y")

# print the list of variables
print(m.varlist)
print(m.latent_vars)
print(m.observed_vars)

# get a sample

m_sample = m.sample()

# compute the log_prob for each element in the sample
print(m.log_prob(m_sample))

# compute the sum of the log_prob
print(m.sum_log_prob(m_sample))

### alternative definition

x2 = Normal(loc=1., scale=1., name="x2", observed=True)
y2 = Normal(loc=x, scale=1., dim=3, name="y2")

m2 = inf.ProbModel(varlist=[x2, y2])