コード例 #1
0
s_rng = RandomStreams(123)

nr_words = 4
nr_topics = 2
alpha = 0.8
beta = 1.

# Topic distribution per document
doc_mixture = memoized(lambda doc_id: s_rng.dirichlet([alpha/nr_topics]*nr_topics))

# Word distribution per topic
topic_mixture = memoized(lambda top_id: s_rng.dirichlet([beta/nr_words]*nr_words))

# For each word in the document, draw a topic according to multinomial with document specific prior
# TODO, see comment below: topics = memoized(lambda doc_id, nr: s_rng.multinomial(1, doc_mixture[doc_id], draw_shape=(nr,)))
topics = memoized(lambda doc_id, nr: s_rng.binomial(1, doc_mixture(doc_id)[0], draw_shape=(nr,)))

# Draw words for a specific topic
word_topic = lambda top_id: s_rng.multinomial(1, topic_mixture(top_id))

# TODO: memoized only works on the pre-compiled graph. This makes it fail in the case where we have to map 
# a vector of topics to individual multinomials with as priors the different topics. In the case of two topics
# we can hack around this by using a binomial topic distribution and using a switch statement here:
word_topic_mapper = lambda top_id: tensor.switch(top_id, word_topic(0), word_topic(1))

# Maps topics to words
# TODO, see comment above: get_words = memoized(lambda doc_id, nr: theano.map(word_topic, topics(doc_id, nr))[0])
get_words = memoized(lambda doc_id, nr: theano.map(word_topic_mapper, topics(doc_id, nr))[0])

# Define training 'documents'
document_1 = numpy.asarray([[1,0,0,0],
コード例 #2
0
from sample import hybridmc_sample
from rv import full_log_likelihood

from max_lik import likelihood_gradient 

s_rng = RandomStreams(3424)

# Weight prior:
w = s_rng.normal(0, 2, draw_shape=(3,))

# Linear model:
x = tensor.matrix('x')
y = tensor.nnet.sigmoid(tensor.dot(x, w))

# Bernouilli observation model:
t = s_rng.binomial(p=y, draw_shape=(4,))

# Some data:
X_data = numpy.asarray([[-1.5, -0.4, 1.3, 2.2], [-1.1, -2.2, 1.3, 0], [1., 1., 1., 1.]], dtype=theano.config.floatX).T 
Y_data = numpy.asarray([1., 1., 0., 0.], dtype=theano.config.floatX)

# Compute gradient updates:
observations = dict([(t, Y_data)])
params, updates, log_likelihood = likelihood_gradient(observations)

# Compile training function and assign input data as givens:
givens = dict([(x, X_data)])
train = theano.function([], [log_likelihood], givens=givens, updates=updates)

# Run 100 epochs of training:
for i in range(100):
コード例 #3
0
nr_words = 4
nr_topics = 2
alpha = 0.8
beta = 1.

# Topic distribution per document
doc_mixture = memoized(
    lambda doc_id: s_rng.dirichlet([alpha / nr_topics] * nr_topics))

# Word distribution per topic
topic_mixture = memoized(
    lambda top_id: s_rng.dirichlet([beta / nr_words] * nr_words))

# For each word in the document, draw a topic according to multinomial with document specific prior
# TODO, see comment below: topics = memoized(lambda doc_id, nr: s_rng.multinomial(1, doc_mixture[doc_id], draw_shape=(nr,)))
topics = memoized(lambda doc_id, nr: s_rng.binomial(
    1, doc_mixture(doc_id)[0], draw_shape=(nr, )))

# Draw words for a specific topic
word_topic = lambda top_id: s_rng.multinomial(1, topic_mixture(top_id))

# TODO: memoized only works on the pre-compiled graph. This makes it fail in the case where we have to map
# a vector of topics to individual multinomials with as priors the different topics. In the case of two topics
# we can hack around this by using a binomial topic distribution and using a switch statement here:
word_topic_mapper = lambda top_id: tensor.switch(top_id, word_topic(0),
                                                 word_topic(1))

# Maps topics to words
# TODO, see comment above: get_words = memoized(lambda doc_id, nr: theano.map(word_topic, topics(doc_id, nr))[0])
get_words = memoized(
    lambda doc_id, nr: theano.map(word_topic_mapper, topics(doc_id, nr))[0])
コード例 #4
0
import numpy, pylab
import theano
from theano import tensor
from rstreams import RandomStreams
import distributions
from sample import mh2_sample
from rv import full_log_likelihood

s_rng = RandomStreams(3424)

p = s_rng.dirichlet(numpy.asarray([1, 1]))[0]
m1 = s_rng.uniform(low=-5, high=5)
m2 = s_rng.uniform(low=-5, high=5)
v = s_rng.uniform(low=0, high=1)

C = s_rng.binomial(1, p, draw_shape=(4,))
m = tensor.switch(C, m1, m2)
D = s_rng.normal(m, v, draw_shape=(4,))        

D_data = numpy.asarray([1, 1.2, 3, 3.4], dtype=theano.config.floatX)

givens = dict([(D, D_data)])
sampler = mh2_sample(s_rng, [p, m1, m2, v], givens)            

samples = sampler(200, 1000, 100)
print samples[0].mean(), samples[1].mean(), samples[2].mean(), samples[3].mean()
コード例 #5
0
import numpy, pylab
import theano
from theano import tensor
from rstreams import RandomStreams
import distributions
from sample import mh2_sample
from for_theano import evaluate
from rv import full_log_likelihood

s_rng = RandomStreams(23424)

fair_prior = 0.999

coin_weight = tensor.switch(
    s_rng.binomial(1, fair_prior) > 0.5, 0.5,
    s_rng.dirichlet([1, 1])[0])

make_coin = lambda p, size: s_rng.binomial(1, p, draw_shape=(size, ))
coin = lambda size: make_coin(coin_weight, size)

for size in [1, 3, 6, 10, 20, 30, 50, 70, 100]:
    data = evaluate(make_coin(0.9, size))

    sampler = mh2_sample(s_rng, [coin_weight], {coin(size): data})

    print "nr of examples", size, ", estimated probability", sampler(
        nr_samples=400, burnin=20000, lag=10)[0].mean()
コード例 #6
0
import numpy, pylab
import theano
from theano import tensor
from rstreams import RandomStreams
import distributions
from sample import mh2_sample
from for_theano import evaluate
from rv import full_log_likelihood

s_rng = RandomStreams(23424)

fair_prior = 0.999

coin_weight = tensor.switch(s_rng.binomial(1, fair_prior) > 0.5, 0.5, s_rng.dirichlet([1, 1])[0])

make_coin = lambda p, size: s_rng.binomial(1, p, draw_shape=(size,))    
coin = lambda size: make_coin(coin_weight, size)
            
for size in [1, 3, 6, 10, 20, 30, 50, 70, 100]:
    data = evaluate(make_coin(0.9, size))
            
    sampler = mh2_sample(s_rng, [coin_weight], {coin(size) : data})            
    
    print "nr of examples", size, ", estimated probability", sampler(nr_samples=400, burnin=20000, lag=10)[0].mean()
コード例 #7
0
import numpy, pylab
import theano
from theano import tensor
from rstreams import RandomStreams
import distributions
from sample import mh2_sample
from rv import full_log_likelihood

s_rng = RandomStreams(3424)

p = s_rng.dirichlet(numpy.asarray([1, 1]))[0]
m1 = s_rng.uniform(low=-5, high=5)
m2 = s_rng.uniform(low=-5, high=5)
v = s_rng.uniform(low=0, high=1)

C = s_rng.binomial(1, p, draw_shape=(4, ))
m = tensor.switch(C, m1, m2)
D = s_rng.normal(m, v, draw_shape=(4, ))

D_data = numpy.asarray([1, 1.2, 3, 3.4], dtype=theano.config.floatX)

givens = dict([(D, D_data)])
sampler = mh2_sample(s_rng, [p, m1, m2, v], givens)

samples = sampler(200, 1000, 100)
print samples[0].mean(), samples[1].mean(), samples[2].mean(), samples[3].mean(
)