コード例 #1
0
def test_normal_nonscalar():
    s_rng = RandomStreams(234)
    n = s_rng.normal()

    data = numpy.asarray([1, 2, 3, 4, 5])
    p_data = rv.lpdf(n, data)

    f = theano.function([], [p_data])

    pvals = f()
    targets = numpy.log(numpy.exp(-0.5 * (data**2)) / numpy.sqrt(2 * numpy.pi))

    assert numpy.allclose(pvals, targets), (pvals, targets)
コード例 #2
0
ファイル: test_rv.py プロジェクト: gwtaylor/MonteTheano
def test_normal_nonscalar():
    s_rng = RandomStreams(234)
    n = s_rng.normal()

    data = numpy.asarray([1, 2, 3, 4, 5])
    p_data = rv.lpdf(n, data)

    f = theano.function([], [p_data])

    pvals = f()
    targets = numpy.log(numpy.exp(-0.5 * (data**2)) / numpy.sqrt(2*numpy.pi))

    assert numpy.allclose(pvals,targets), (pvals, targets)
コード例 #3
0
def test_normal_simple():
    s_rng = RandomStreams(23)
    n = s_rng.normal()

    p0 = rv.lpdf(n, 0)
    p1 = rv.lpdf(n, 1)
    pn1 = rv.lpdf(n, -1)

    f = theano.function([], [p0, p1, pn1])

    pvals = f()
    targets = numpy.asarray([
        numpy.log(1.0 / numpy.sqrt(2 * numpy.pi)),
        numpy.log(numpy.exp(-0.5) / numpy.sqrt(2 * numpy.pi)),
        numpy.log(numpy.exp(-0.5) / numpy.sqrt(2 * numpy.pi)),
    ])

    assert numpy.allclose(pvals, targets), (pvals, targets)
コード例 #4
0
ファイル: test_rv.py プロジェクト: gwtaylor/MonteTheano
def test_normal_simple():
    s_rng = RandomStreams(23)
    n = s_rng.normal()

    p0 = rv.lpdf(n, 0)
    p1 = rv.lpdf(n, 1)
    pn1 = rv.lpdf(n, -1)

    f = theano.function([], [p0, p1, pn1])

    pvals = f()
    targets = numpy.asarray([
                numpy.log(1.0 / numpy.sqrt(2*numpy.pi)),
                numpy.log(numpy.exp(-0.5) / numpy.sqrt(2*numpy.pi)),
                numpy.log(numpy.exp(-0.5) / numpy.sqrt(2*numpy.pi)),
                ])

    assert numpy.allclose(pvals,targets), (pvals, targets)
コード例 #5
0
ファイル: test_rv.py プロジェクト: gwtaylor/MonteTheano
def test_normal_w_params():
    s_rng = RandomStreams(23)
    n = s_rng.normal(mu=2, sigma=3)

    p0 = rv.lpdf(n, 0)
    p1 = rv.lpdf(n, 2)
    pn1 = rv.lpdf(n, -1)

    f = theano.function([], [p0, p1, pn1])

    pvals = f()
    targets = numpy.asarray([
                numpy.log(numpy.exp(-0.5 * ((2.0/3.0)**2)) /
                    numpy.sqrt(2*numpy.pi*9.0)),
                numpy.log(numpy.exp(0) / numpy.sqrt(2*numpy.pi*9)),
                numpy.log(numpy.exp(-0.5 * ((3.0/3.0)**2)) /
                    numpy.sqrt(2*numpy.pi*9.0)),
                ])

    assert numpy.allclose(pvals,targets), (pvals, targets)
コード例 #6
0
class Fitting1D(unittest.TestCase):
    def setUp(self):
        self.obs = tensor.as_tensor_variable(
                numpy.asarray([0.0, 1.01, 0.7, 0.65, 0.3]))
        self.rstream = RandomStreams(234)
        self.n = self.rstream.normal()
        self.u = self.rstream.uniform()

    def test_normal_ml(self):
        up = self.rstream.ml(self.n, self.obs)
        p = self.rstream.params(self.n)
        f = theano.function([], [up[p[0]], up[p[1]]])
        m,v = f()
        assert numpy.allclose([m,v], [.532, 0.34856276335])

    def test_uniform_ml(self):
        up = self.rstream.ml(self.u, self.obs)
        p = self.rstream.params(self.u)
        f = theano.function([], [up[p[0]], up[p[1]]])
        l,h = f()
        assert numpy.allclose([l,h], [0.0, 1.01])
コード例 #7
0
class Fitting1D(unittest.TestCase):
    def setUp(self):
        self.obs = tensor.as_tensor_variable(
            numpy.asarray([0.0, 1.01, 0.7, 0.65, 0.3]))
        self.rstream = RandomStreams(234)
        self.n = self.rstream.normal()
        self.u = self.rstream.uniform()

    def test_normal_ml(self):
        up = self.rstream.ml(self.n, self.obs)
        p = self.rstream.params(self.n)
        f = theano.function([], [up[p[0]], up[p[1]]])
        m, v = f()
        assert numpy.allclose([m, v], [.532, 0.34856276335])

    def test_uniform_ml(self):
        up = self.rstream.ml(self.u, self.obs)
        p = self.rstream.params(self.u)
        f = theano.function([], [up[p[0]], up[p[1]]])
        l, h = f()
        assert numpy.allclose([l, h], [0.0, 1.01])
コード例 #8
0
def test_normal_w_params():
    s_rng = RandomStreams(23)
    n = s_rng.normal(mu=2, sigma=3)

    p0 = rv.lpdf(n, 0)
    p1 = rv.lpdf(n, 2)
    pn1 = rv.lpdf(n, -1)

    f = theano.function([], [p0, p1, pn1])

    pvals = f()
    targets = numpy.asarray([
        numpy.log(
            numpy.exp(-0.5 * ((2.0 / 3.0)**2)) /
            numpy.sqrt(2 * numpy.pi * 9.0)),
        numpy.log(numpy.exp(0) / numpy.sqrt(2 * numpy.pi * 9)),
        numpy.log(
            numpy.exp(-0.5 * ((3.0 / 3.0)**2)) /
            numpy.sqrt(2 * numpy.pi * 9.0)),
    ])

    assert numpy.allclose(pvals, targets), (pvals, targets)
コード例 #9
0
import numpy
import theano
from theano import tensor
from rstreams import RandomStreams
import distributions
from sample import hybridmc_sample
from rv import full_log_likelihood

from max_lik import likelihood_gradient 

s_rng = RandomStreams(3424)

# Weight prior:
w = s_rng.normal(0, 2, draw_shape=(3,))

# Linear model:
x = tensor.matrix('x')
y = tensor.nnet.sigmoid(tensor.dot(x, w))

# Bernouilli observation model:
t = s_rng.binomial(p=y, draw_shape=(4,))

# Some data:
X_data = numpy.asarray([[-1.5, -0.4, 1.3, 2.2], [-1.1, -2.2, 1.3, 0], [1., 1., 1., 1.]], dtype=theano.config.floatX).T 
Y_data = numpy.asarray([1., 1., 0., 0.], dtype=theano.config.floatX)

# Compute gradient updates:
observations = dict([(t, Y_data)])
params, updates, log_likelihood = likelihood_gradient(observations)

# Compile training function and assign input data as givens:
コード例 #10
0
import numpy, pylab
import theano
from theano import tensor
from rstreams import RandomStreams
import distributions
from sample import hybridmc_sample
from rv import full_log_likelihood

s_rng = RandomStreams(3424)

# Define model
w = s_rng.normal(0, 4, draw_shape=(2, ))

x = tensor.matrix('x')
y = tensor.nnet.sigmoid(tensor.dot(x, w))

t = s_rng.binomial(p=y, draw_shape=(4, ))

# Define data
X_data = numpy.asarray([[-1.5, -0.4, 1.3, 2.2], [-1.1, -2.2, 1.3, 0]],
                       dtype=theano.config.floatX).T
Y_data = numpy.asarray([1., 1., 0., 0.], dtype=theano.config.floatX)

# Plot full likelihood function
RVs = dict([(t, Y_data)])
lik = full_log_likelihood(RVs)

givens = dict([(x, X_data)])
lik_func = theano.function([w], lik, givens=givens, allow_input_downcast=True)

delta = .1
コード例 #11
0
import numpy, pylab
import theano
from theano import tensor
from rstreams import RandomStreams
import distributions
from sample import mh2_sample
from rv import full_log_likelihood

s_rng = RandomStreams(3424)

p = s_rng.dirichlet(numpy.asarray([1, 1]))[0]
m1 = s_rng.uniform(low=-5, high=5)
m2 = s_rng.uniform(low=-5, high=5)
v = s_rng.uniform(low=0, high=1)

C = s_rng.binomial(1, p, draw_shape=(4,))
m = tensor.switch(C, m1, m2)
D = s_rng.normal(m, v, draw_shape=(4,))        

D_data = numpy.asarray([1, 1.2, 3, 3.4], dtype=theano.config.floatX)

givens = dict([(D, D_data)])
sampler = mh2_sample(s_rng, [p, m1, m2, v], givens)            

samples = sampler(200, 1000, 100)
print samples[0].mean(), samples[1].mean(), samples[2].mean(), samples[3].mean()
コード例 #12
0
import numpy, pylab
import theano
from theano import tensor
from rstreams import RandomStreams
import distributions
from sample import hybridmc_sample
from rv import full_log_likelihood

s_rng = RandomStreams(3424)

# Define model
w = s_rng.normal(0, 4, draw_shape=(2,))

x = tensor.matrix("x")
y = tensor.nnet.sigmoid(tensor.dot(x, w))

t = s_rng.binomial(p=y, draw_shape=(4,))

# Define data
X_data = numpy.asarray([[-1.5, -0.4, 1.3, 2.2], [-1.1, -2.2, 1.3, 0]], dtype=theano.config.floatX).T
Y_data = numpy.asarray([1.0, 1.0, 0.0, 0.0], dtype=theano.config.floatX)

# Plot full likelihood function
RVs = dict([(t, Y_data)])
lik = full_log_likelihood(RVs)

givens = dict([(x, X_data)])
lik_func = theano.function([w], lik, givens=givens, allow_input_downcast=True)

delta = 0.1
x_range = numpy.arange(-10.0, 10.0, delta)
コード例 #13
0
    return tensor.concatenate([
        tensor.ones([x.shape[1], 1]),
        tensor.reshape(result.T, (x.shape[1], x.shape[0] * order))
    ],
                              axis=1)


# Define priors to be inverse gamma distributions
alpha = 1 / s_rng.gamma(1., 2.)
beta = 1 / s_rng.gamma(1., .1)

# Order of the model
# TODO: this currently has to be fixed, would be nice if this could also be a RV!
m = 7  #s_rng.random_integers(1, 10)
w = s_rng.normal(0, beta, draw_shape=(m + 1, ))

# Input variable used for training
x = tensor.matrix('x')
# Input variable used for testing
xn = tensor.matrix('xn')

# Actual linear model
y = lambda x_in: tensor.dot(poly_expansion(x_in, m), w)

# Observation model
t = s_rng.normal(y(x), alpha, draw_shape=(10, ))

# Generate some noisy training data (sine + noise)
X_data = numpy.arange(-1, 1, 0.3)
Y_data = numpy.sin(numpy.pi * X_data) + 0.1 * numpy.random.randn(*X_data.shape)
コード例 #14
0
	x = x.T
	result, updates = theano.scan(fn=lambda prior_result, x: prior_result * x,
			outputs_info=tensor.ones_like(x),
			non_sequences=x,
			n_steps=order)
			
	return tensor.concatenate([tensor.ones([x.shape[1],1]), tensor.reshape(result.T, (x.shape[1], x.shape[0]*order))], axis=1)

# Define priors to be inverse gamma distributions
alpha = 1/s_rng.gamma(1., 2.)
beta = 1/s_rng.gamma(1., .1)

# Order of the model
# TODO: this currently has to be fixed, would be nice if this could also be a RV!
m = 7 #s_rng.random_integers(1, 10)
w = s_rng.normal(0, beta, draw_shape=(m+1,))

# Input variable used for training
x = tensor.matrix('x')
# Input variable used for testing
xn = tensor.matrix('xn')

# Actual linear model
y = lambda x_in: tensor.dot(poly_expansion(x_in, m), w)

# Observation model
t = s_rng.normal(y(x), alpha, draw_shape=(10,))

# Generate some noisy training data (sine + noise)
X_data = numpy.arange(-1,1,0.3)
Y_data = numpy.sin(numpy.pi*X_data) + 0.1*numpy.random.randn(*X_data.shape)
コード例 #15
0
import numpy, pylab
import theano
from theano import tensor
from rstreams import RandomStreams
import distributions
from sample import mh2_sample
from rv import full_log_likelihood

s_rng = RandomStreams(3424)

p = s_rng.dirichlet(numpy.asarray([1, 1]))[0]
m1 = s_rng.uniform(low=-5, high=5)
m2 = s_rng.uniform(low=-5, high=5)
v = s_rng.uniform(low=0, high=1)

C = s_rng.binomial(1, p, draw_shape=(4, ))
m = tensor.switch(C, m1, m2)
D = s_rng.normal(m, v, draw_shape=(4, ))

D_data = numpy.asarray([1, 1.2, 3, 3.4], dtype=theano.config.floatX)

givens = dict([(D, D_data)])
sampler = mh2_sample(s_rng, [p, m1, m2, v], givens)

samples = sampler(200, 1000, 100)
print samples[0].mean(), samples[1].mean(), samples[2].mean(), samples[3].mean(
)