Exemplo n.º 1
0
    def mcmc(ll, *frvs):
        full_observations = dict(observations)
        full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, frvs)]))
        
        loglik = -full_log_likelihood(full_observations)

        proposals = free_RVs_prop
        H = tensor.add(*[tensor.sum(tensor.sqr(p)) for p in proposals])/2. + loglik

# -- this should be an inner loop
        g = []
        g.append(tensor.grad(loglik, frvs))
        
        proposals = [(p - epsilon*gg[0]/2.) for p, gg in zip(proposals, g)]

        rvsp = [(rvs + epsilon*rvp) for rvs,rvp in zip(frvs, proposals)]
        
        full_observations = dict(observations)
        full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, rvsp)]))
        new_loglik = -full_log_likelihood(full_observations)
        
        gnew = []
        gnew.append(tensor.grad(new_loglik, rvsp))
        proposals = [(p - epsilon*gn[0]/2.) for p, gn in zip(proposals, gnew)]
# --
        
        Hnew = tensor.add(*[tensor.sum(tensor.sqr(p)) for p in proposals])/2. + new_loglik

        dH = Hnew - H
        accept = tensor.or_(dH < 0., U < tensor.exp(-dH))

        return [tensor.switch(accept, -new_loglik, ll)] + \
            [tensor.switch(accept, p, f) for p, f in zip(rvsp, frvs)], \
            {}, theano.scan_module.until(accept)
Exemplo n.º 2
0
def likelihood_gradient(observations = {}, learning_rate = 0.1):
    all_vars = ancestors(list(observations.keys()))
    
    for o in observations:
        assert o in all_vars
        if not is_raw_rv(o):
            raise TypeError(o)

    RVs = [v for v in all_vars if is_raw_rv(v)]
    free_RVs = [v for v in RVs if v not in observations]

    # Instantiate actual values for the different random variables:
    params = dict()
    for v in free_RVs:
        f = theano.function([], v, mode=theano.Mode(linker='py', optimizer=None))
        params[v] = theano.shared(f())

    # Compute the full log likelihood:
    full_observations = dict(observations)
    full_observations.update(params)    
    log_likelihood = full_log_likelihood(full_observations)
    
    # Construct the update equations for learning:
    updates = dict()
    for frvs in params.values():
        updates[frvs] = frvs + learning_rate * tensor.grad(log_likelihood, frvs)
        
    return params, updates, log_likelihood
Exemplo n.º 3
0
 def test_tt(self):
     RVs = dict([(self.D, self.D_data)])
     lik = full_log_likelihood(RVs)
     
     lf = theano.function([self.m1, self.m2, self.C], lik)
     
     print lf(1,3,0)
     print lf(1,3,1)
Exemplo n.º 4
0
    def test_tt(self):
        RVs = dict([(self.D, self.D_data)])
        lik = full_log_likelihood(RVs)

        lf = theano.function([self.m1, self.m2, self.C], lik)

        print lf(1, 3, 0)
        print lf(1, 3, 1)
Exemplo n.º 5
0
    def mcmc(ll, *frvs):
        full_observations = dict(observations)
        full_observations.update(
            dict([(rv, s) for rv, s in zip(free_RVs, frvs)]))

        loglik = -full_log_likelihood(full_observations)

        proposals = free_RVs_prop
        H = tensor.add(*[tensor.sum(tensor.sqr(p))
                         for p in proposals]) / 2. + loglik

        # -- this should be an inner loop
        g = []
        g.append(tensor.grad(loglik, frvs))

        proposals = [(p - epsilon * gg[0] / 2.) for p, gg in zip(proposals, g)]

        rvsp = [(rvs + epsilon * rvp) for rvs, rvp in zip(frvs, proposals)]

        full_observations = dict(observations)
        full_observations.update(
            dict([(rv, s) for rv, s in zip(free_RVs, rvsp)]))
        new_loglik = -full_log_likelihood(full_observations)

        gnew = []
        gnew.append(tensor.grad(new_loglik, rvsp))
        proposals = [(p - epsilon * gn[0] / 2.)
                     for p, gn in zip(proposals, gnew)]
        # --

        Hnew = tensor.add(*[tensor.sum(tensor.sqr(p))
                            for p in proposals]) / 2. + new_loglik

        dH = Hnew - H
        accept = tensor.or_(dH < 0., U < tensor.exp(-dH))

        return [tensor.switch(accept, -new_loglik, ll)] + \
            [tensor.switch(accept, p, f) for p, f in zip(rvsp, frvs)], \
            {}, theano.scan_module.until(accept)
Exemplo n.º 6
0
    def mcmc(ll, *frvs):
        proposals = [s_rng.local_proposal(v, rvs) for v, rvs in zip(free_RVs, frvs)]
        proposals_rev = [s_rng.local_proposal(v, rvs) for v, rvs in zip(free_RVs, proposals)]

        full_observations = dict(observations)
        full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, proposals)]))
        new_log_likelihood = full_log_likelihood(full_observations)

        logratio = new_log_likelihood - ll \
            + tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals_rev, frvs)]) \
            - tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals, proposals)])
                   
        accept = tensor.gt(logratio, tensor.log(U))
        
        return [tensor.switch(accept, new_log_likelihood, ll)] + \
               [tensor.switch(accept, p, f) for p, f in zip(proposals, frvs)], \
               {}, theano.scan_module.until(accept)
Exemplo n.º 7
0
    def mcmc(ll, *frvs):
        proposals = [
            s_rng.local_proposal(v, rvs) for v, rvs in zip(free_RVs, frvs)
        ]
        proposals_rev = [
            s_rng.local_proposal(v, rvs)
            for v, rvs in zip(free_RVs, proposals)
        ]

        full_observations = dict(observations)
        full_observations.update(
            dict([(rv, s) for rv, s in zip(free_RVs, proposals)]))
        new_log_likelihood = full_log_likelihood(full_observations)

        logratio = new_log_likelihood - ll \
            + tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals_rev, frvs)]) \
            - tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals, proposals)])

        accept = tensor.gt(logratio, tensor.log(U))

        return [tensor.switch(accept, new_log_likelihood, ll)] + \
               [tensor.switch(accept, p, f) for p, f in zip(proposals, frvs)], \
               {}, theano.scan_module.until(accept)
Exemplo n.º 8
0
def mh2_sample(s_rng, outputs, observations = {}, givens = {}):    
    all_vars = ancestors(list(observations.keys()) + list(outputs))
        
    for o in observations:
        assert o in all_vars
        if not is_raw_rv(o):
            raise TypeError(o)
    
    RVs = [v for v in all_vars if is_raw_rv(v)]
    free_RVs = [v for v in RVs if v not in observations]
    
    free_RVs_state = []
    for v in free_RVs:
        f = theano.function([], v,
                mode=theano.Mode(linker='py', optimizer=None))
        free_RVs_state.append(theano.shared(f()))
    
    U = s_rng.uniform(low=0.0, high=1.0)
    
    rr = []
    for index in range(len(free_RVs)):
        # TODO: why does the compiler crash when we try to expose the likelihood ?
        full_observations = dict(observations)
        full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
        log_likelihood = full_log_likelihood(full_observations)
        
        proposal = s_rng.local_proposal(free_RVs[index], free_RVs_state[index])
        proposal_rev = s_rng.local_proposal(free_RVs[index], proposal)

        full_observations = dict(observations)
        full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
        full_observations.update(dict([(free_RVs[index], proposal)]))
        new_log_likelihood = full_log_likelihood(full_observations)

        bw = tensor.sum(lpdf(proposal_rev, free_RVs_state[index]))
        fw = tensor.sum(lpdf(proposal, proposal))

        lr = new_log_likelihood-log_likelihood+bw-fw

        accept = tensor.gt(lr, tensor.log(U))

        updates = {free_RVs_state[index] : tensor.switch(accept, proposal, free_RVs_state[index])}
        rr.append(theano.function([], [accept], updates=updates, givens=givens))
    
    # TODO: this exacte amount of samples given back is still wrong
    def sampler(nr_samples, burnin = 100, lag = 100):
        data = [[] for o in outputs]
        for i in range(nr_samples*lag+burnin):        
            accept = False
            while not accept:
                index = numpy.random.randint(len(free_RVs))

                accept = rr[index]()            
                if accept and i > burnin and (i-burnin) % lag == 0:
                    for d, o in zip(data, outputs):
                        # TODO: this can be optimized
                        if is_raw_rv(o):
                            d.append(free_RVs_state[free_RVs.index(o)].get_value())
                        else:
                            full_observations = dict(observations)
                            full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
                            d.append(evaluate(evaluate_with_assignments(o, full_observations), givens=givens))
        data = [numpy.asarray(d).squeeze() for d in data]
        
        return data
    
    return sampler
# Define model
w = s_rng.normal(0, 4, draw_shape=(2, ))

x = tensor.matrix('x')
y = tensor.nnet.sigmoid(tensor.dot(x, w))

t = s_rng.binomial(p=y, draw_shape=(4, ))

# Define data
X_data = numpy.asarray([[-1.5, -0.4, 1.3, 2.2], [-1.1, -2.2, 1.3, 0]],
                       dtype=theano.config.floatX).T
Y_data = numpy.asarray([1., 1., 0., 0.], dtype=theano.config.floatX)

# Plot full likelihood function
RVs = dict([(t, Y_data)])
lik = full_log_likelihood(RVs)

givens = dict([(x, X_data)])
lik_func = theano.function([w], lik, givens=givens, allow_input_downcast=True)

delta = .1
x_range = numpy.arange(-10.0, 10.0, delta)
y_range = numpy.arange(-10.0, 10.0, delta)
X, Y = numpy.meshgrid(x_range, y_range)

response = []
for xl, yl in zip(X.flatten(), Y.flatten()):
    response.append(lik_func([xl, yl]))

pylab.figure(1)
pylab.contour(X, Y, numpy.exp(numpy.asarray(response)).reshape(X.shape), 20)
Exemplo n.º 10
0
def mh2_sample(s_rng, outputs, observations={}, givens={}):
    all_vars = ancestors(list(observations.keys()) + list(outputs))

    for o in observations:
        assert o in all_vars
        if not is_raw_rv(o):
            raise TypeError(o)

    RVs = [v for v in all_vars if is_raw_rv(v)]
    free_RVs = [v for v in RVs if v not in observations]

    free_RVs_state = []
    for v in free_RVs:
        f = theano.function([],
                            v,
                            mode=theano.Mode(linker='py', optimizer=None))
        free_RVs_state.append(theano.shared(f()))

    U = s_rng.uniform(low=0.0, high=1.0)

    rr = []
    for index in range(len(free_RVs)):
        # TODO: why does the compiler crash when we try to expose the likelihood ?
        full_observations = dict(observations)
        full_observations.update(
            dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
        log_likelihood = full_log_likelihood(full_observations)

        proposal = s_rng.local_proposal(free_RVs[index], free_RVs_state[index])
        proposal_rev = s_rng.local_proposal(free_RVs[index], proposal)

        full_observations = dict(observations)
        full_observations.update(
            dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
        full_observations.update(dict([(free_RVs[index], proposal)]))
        new_log_likelihood = full_log_likelihood(full_observations)

        bw = tensor.sum(lpdf(proposal_rev, free_RVs_state[index]))
        fw = tensor.sum(lpdf(proposal, proposal))

        lr = new_log_likelihood - log_likelihood + bw - fw

        accept = tensor.gt(lr, tensor.log(U))

        updates = {
            free_RVs_state[index]:
            tensor.switch(accept, proposal, free_RVs_state[index])
        }
        rr.append(theano.function([], [accept], updates=updates,
                                  givens=givens))

    # TODO: this exacte amount of samples given back is still wrong
    def sampler(nr_samples, burnin=100, lag=100):
        data = [[] for o in outputs]
        for i in range(nr_samples * lag + burnin):
            accept = False
            while not accept:
                index = numpy.random.randint(len(free_RVs))

                accept = rr[index]()
                if accept and i > burnin and (i - burnin) % lag == 0:
                    for d, o in zip(data, outputs):
                        # TODO: this can be optimized
                        if is_raw_rv(o):
                            d.append(
                                free_RVs_state[free_RVs.index(o)].get_value())
                        else:
                            full_observations = dict(observations)
                            full_observations.update(
                                dict([
                                    (rv, s)
                                    for rv, s in zip(free_RVs, free_RVs_state)
                                ]))
                            d.append(
                                evaluate(evaluate_with_assignments(
                                    o, full_observations),
                                         givens=givens))
        data = [numpy.asarray(d).squeeze() for d in data]

        return data

    return sampler
# Define model
w = s_rng.normal(0, 4, draw_shape=(2,))

x = tensor.matrix("x")
y = tensor.nnet.sigmoid(tensor.dot(x, w))

t = s_rng.binomial(p=y, draw_shape=(4,))

# Define data
X_data = numpy.asarray([[-1.5, -0.4, 1.3, 2.2], [-1.1, -2.2, 1.3, 0]], dtype=theano.config.floatX).T
Y_data = numpy.asarray([1.0, 1.0, 0.0, 0.0], dtype=theano.config.floatX)

# Plot full likelihood function
RVs = dict([(t, Y_data)])
lik = full_log_likelihood(RVs)

givens = dict([(x, X_data)])
lik_func = theano.function([w], lik, givens=givens, allow_input_downcast=True)

delta = 0.1
x_range = numpy.arange(-10.0, 10.0, delta)
y_range = numpy.arange(-10.0, 10.0, delta)
X, Y = numpy.meshgrid(x_range, y_range)

response = []
for xl, yl in zip(X.flatten(), Y.flatten()):
    response.append(lik_func([xl, yl]))

pylab.figure(1)
pylab.contour(X, Y, numpy.exp(numpy.asarray(response)).reshape(X.shape), 20)