Пример #1
0
def test_uniform_w_params():
    s_rng = RandomStreams(234)
    u = s_rng.uniform(low=-0.999, high=9.001)

    p0 = rv.lpdf(u, 0)
    p1 = rv.lpdf(u, 2)
    p05 = rv.lpdf(u, -1.5)
    pn1 = rv.lpdf(u, 10)

    f = theano.function([], [p0, p1, p05, pn1])

    pvals = f()
    targets = numpy.log(numpy.asarray([.1, .1, 0, 0]))
    assert numpy.allclose(pvals, targets), (pvals, targets)
Пример #2
0
def test_uniform_w_params():
    s_rng = RandomStreams(234)
    u = s_rng.uniform(low=-0.999, high=9.001)

    p0 = rv.lpdf(u, 0)
    p1 = rv.lpdf(u, 2)
    p05 = rv.lpdf(u, -1.5)
    pn1 = rv.lpdf(u, 10)

    f = theano.function([], [p0, p1, p05, pn1])

    pvals = f()
    targets = numpy.log(numpy.asarray([.1, .1, 0, 0]))
    assert numpy.allclose(pvals,targets), (pvals, targets)
Пример #3
0
def test_uniform_simple():
    s_rng = RandomStreams(234)
    u = s_rng.uniform()

    p0 = rv.lpdf(u, 0)
    p1 = rv.lpdf(u, 1)
    p05 = rv.lpdf(u, 0.5)
    pn1 = rv.lpdf(u, -1)

    f = theano.function([], [p0, p1, p05, pn1])

    pvals = f()
    targets = numpy.log(numpy.asarray([1.0, 1.0, 1.0, 0.0]))

    assert numpy.allclose(pvals, targets), (pvals, targets)
Пример #4
0
def test_uniform_simple():
    s_rng = RandomStreams(234)
    u = s_rng.uniform()

    p0 = rv.lpdf(u, 0)
    p1 = rv.lpdf(u, 1)
    p05 = rv.lpdf(u, 0.5)
    pn1 = rv.lpdf(u, -1)

    f = theano.function([], [p0, p1, p05, pn1])

    pvals = f()
    targets = numpy.log(numpy.asarray([1.0, 1.0, 1.0, 0.0]))

    assert numpy.allclose(pvals,targets), (pvals, targets)
Пример #5
0
    def test_lpdf_matrix_N_components(self):
        xval = tensor.dmatrix()
        ll = lpdf(self.xmat, xval)
        assert ll.ndim == 2, ll.type
        f = theano.function(
                [xval, self.weights, self.mus, self.sigmas],
                ll)
        llval = f([[1.0, 0.0, 0.0], [0, 0, 1]], # x
                [0.25, 0.25, .5],  # weights
                [0.0, 1.0, 2.0], # mu
                [1.0, 2.0, 5.0], # sigma
                )

        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0 ** 2)
                * numpy.exp(-.5 * (1.0)**2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0 ** 2))
        a += (.5 /  numpy.sqrt(2 * numpy.pi * 5.0 ** 2)
                * numpy.exp(-.5 * (1.0 / 5.0) ** 2))

        assert llval.shape == (2,3)
        assert numpy.allclose(llval[0,0], numpy.log(a))
        assert numpy.allclose(llval[1,2], numpy.log(a))


        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0 ** 2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0 ** 2)
                * numpy.exp(-.5 * (1.0 / 2.0)**2))
        a += (.5 /  numpy.sqrt(2 * numpy.pi * 5.0 ** 2)
                * numpy.exp(-.5 * (2.0 / 5.0) ** 2))

        assert numpy.allclose(llval[0,1], numpy.log(a))
        assert numpy.allclose(llval[0,2], numpy.log(a))
        assert numpy.allclose(llval[1,0], numpy.log(a))
        assert numpy.allclose(llval[1,1], numpy.log(a))
Пример #6
0
    def test_lpdf_matrix_N_components(self):
        xval = tensor.dmatrix()
        ll = lpdf(self.xmat, xval)
        assert ll.ndim == 2, ll.type
        f = theano.function([xval, self.weights, self.mus, self.sigmas], ll)
        llval = f(
            [[1.0, 0.0, 0.0], [0, 0, 1]],  # x
            [0.25, 0.25, .5],  # weights
            [0.0, 1.0, 2.0],  # mu
            [1.0, 2.0, 5.0],  # sigma
        )

        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0**2) * numpy.exp(-.5 *
                                                                 (1.0)**2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0**2))
        a += (.5 / numpy.sqrt(2 * numpy.pi * 5.0**2) *
              numpy.exp(-.5 * (1.0 / 5.0)**2))

        assert llval.shape == (2, 3)
        assert numpy.allclose(llval[0, 0], numpy.log(a))
        assert numpy.allclose(llval[1, 2], numpy.log(a))

        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0**2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0**2) *
              numpy.exp(-.5 * (1.0 / 2.0)**2))
        a += (.5 / numpy.sqrt(2 * numpy.pi * 5.0**2) *
              numpy.exp(-.5 * (2.0 / 5.0)**2))

        assert numpy.allclose(llval[0, 1], numpy.log(a))
        assert numpy.allclose(llval[0, 2], numpy.log(a))
        assert numpy.allclose(llval[1, 0], numpy.log(a))
        assert numpy.allclose(llval[1, 1], numpy.log(a))
Пример #7
0
    def test_lpdf_vector_N_components(self):
        xval = tensor.dvector()
        ll = lpdf(self.xvec, xval)
        assert ll.ndim == 1, ll.type
        f = theano.function([xval, self.weights, self.mus, self.sigmas], ll)
        llval = f(
            [1.0, 0.0],  # x
            [0.25, 0.25, .5],  # weights
            [0.0, 1.0, 2.0],  # mu
            [1.0, 2.0, 5.0],  # sigma
        )

        # case x = 1.0
        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0**2) * numpy.exp(-.5 *
                                                                 (1.0)**2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0**2))
        a += (.5 / numpy.sqrt(2 * numpy.pi * 5.0**2) *
              numpy.exp(-.5 * (1.0 / 5.0)**2))

        assert llval.shape == (2, )
        assert numpy.allclose(llval[0], numpy.log(a))

        # case x = 0.0
        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0**2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0**2) *
              numpy.exp(-.5 * (1.0 / 2.0)**2))
        a += (.5 / numpy.sqrt(2 * numpy.pi * 5.0**2) *
              numpy.exp(-.5 * (2.0 / 5.0)**2))
        assert numpy.allclose(llval[1], numpy.log(a))
Пример #8
0
    def test_lpdf_vector_N_components(self):
        xval = tensor.dvector()
        ll = lpdf(self.xvec, xval)
        assert ll.ndim == 1, ll.type
        f = theano.function(
                [xval, self.weights, self.mus, self.sigmas],
                ll)
        llval = f([1.0, 0.0],     # x
                [0.25, 0.25, .5], # weights
                [0.0, 1.0, 2.0],  # mu
                [1.0, 2.0, 5.0],  # sigma
                )

        # case x = 1.0
        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0 ** 2)
                * numpy.exp(-.5 * (1.0)**2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0 ** 2))
        a += (.5 /  numpy.sqrt(2 * numpy.pi * 5.0 ** 2)
                * numpy.exp(-.5 * (1.0 / 5.0) ** 2))

        assert llval.shape == (2,)
        assert numpy.allclose(llval[0], numpy.log(a))


        # case x = 0.0
        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0 ** 2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0 ** 2)
                * numpy.exp(-.5 * (1.0 / 2.0) ** 2))
        a += (.5 /  numpy.sqrt(2 * numpy.pi * 5.0 ** 2)
                * numpy.exp(-.5 * (2.0 / 5.0) ** 2))
        assert numpy.allclose(llval[1], numpy.log(a))
Пример #9
0
    def mcmc(ll, *frvs):
        proposals = [s_rng.local_proposal(v, rvs) for v, rvs in zip(free_RVs, frvs)]
        proposals_rev = [s_rng.local_proposal(v, rvs) for v, rvs in zip(free_RVs, proposals)]

        full_observations = dict(observations)
        full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, proposals)]))
        new_log_likelihood = full_log_likelihood(full_observations)

        logratio = new_log_likelihood - ll \
            + tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals_rev, frvs)]) \
            - tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals, proposals)])
                   
        accept = tensor.gt(logratio, tensor.log(U))
        
        return [tensor.switch(accept, new_log_likelihood, ll)] + \
               [tensor.switch(accept, p, f) for p, f in zip(proposals, frvs)], \
               {}, theano.scan_module.until(accept)
Пример #10
0
def test_normal_simple():
    s_rng = RandomStreams(23)
    n = s_rng.normal()

    p0 = rv.lpdf(n, 0)
    p1 = rv.lpdf(n, 1)
    pn1 = rv.lpdf(n, -1)

    f = theano.function([], [p0, p1, pn1])

    pvals = f()
    targets = numpy.asarray([
        numpy.log(1.0 / numpy.sqrt(2 * numpy.pi)),
        numpy.log(numpy.exp(-0.5) / numpy.sqrt(2 * numpy.pi)),
        numpy.log(numpy.exp(-0.5) / numpy.sqrt(2 * numpy.pi)),
    ])

    assert numpy.allclose(pvals, targets), (pvals, targets)
Пример #11
0
def test_normal_simple():
    s_rng = RandomStreams(23)
    n = s_rng.normal()

    p0 = rv.lpdf(n, 0)
    p1 = rv.lpdf(n, 1)
    pn1 = rv.lpdf(n, -1)

    f = theano.function([], [p0, p1, pn1])

    pvals = f()
    targets = numpy.asarray([
                numpy.log(1.0 / numpy.sqrt(2*numpy.pi)),
                numpy.log(numpy.exp(-0.5) / numpy.sqrt(2*numpy.pi)),
                numpy.log(numpy.exp(-0.5) / numpy.sqrt(2*numpy.pi)),
                ])

    assert numpy.allclose(pvals,targets), (pvals, targets)
Пример #12
0
def test_normal_w_params():
    s_rng = RandomStreams(23)
    n = s_rng.normal(mu=2, sigma=3)

    p0 = rv.lpdf(n, 0)
    p1 = rv.lpdf(n, 2)
    pn1 = rv.lpdf(n, -1)

    f = theano.function([], [p0, p1, pn1])

    pvals = f()
    targets = numpy.asarray([
                numpy.log(numpy.exp(-0.5 * ((2.0/3.0)**2)) /
                    numpy.sqrt(2*numpy.pi*9.0)),
                numpy.log(numpy.exp(0) / numpy.sqrt(2*numpy.pi*9)),
                numpy.log(numpy.exp(-0.5 * ((3.0/3.0)**2)) /
                    numpy.sqrt(2*numpy.pi*9.0)),
                ])

    assert numpy.allclose(pvals,targets), (pvals, targets)
Пример #13
0
def test_normal_nonscalar():
    s_rng = RandomStreams(234)
    n = s_rng.normal()

    data = numpy.asarray([1, 2, 3, 4, 5])
    p_data = rv.lpdf(n, data)

    f = theano.function([], [p_data])

    pvals = f()
    targets = numpy.log(numpy.exp(-0.5 * (data**2)) / numpy.sqrt(2*numpy.pi))

    assert numpy.allclose(pvals,targets), (pvals, targets)
Пример #14
0
def test_normal_w_params():
    s_rng = RandomStreams(23)
    n = s_rng.normal(mu=2, sigma=3)

    p0 = rv.lpdf(n, 0)
    p1 = rv.lpdf(n, 2)
    pn1 = rv.lpdf(n, -1)

    f = theano.function([], [p0, p1, pn1])

    pvals = f()
    targets = numpy.asarray([
        numpy.log(
            numpy.exp(-0.5 * ((2.0 / 3.0)**2)) /
            numpy.sqrt(2 * numpy.pi * 9.0)),
        numpy.log(numpy.exp(0) / numpy.sqrt(2 * numpy.pi * 9)),
        numpy.log(
            numpy.exp(-0.5 * ((3.0 / 3.0)**2)) /
            numpy.sqrt(2 * numpy.pi * 9.0)),
    ])

    assert numpy.allclose(pvals, targets), (pvals, targets)
Пример #15
0
def test_normal_nonscalar():
    s_rng = RandomStreams(234)
    n = s_rng.normal()

    data = numpy.asarray([1, 2, 3, 4, 5])
    p_data = rv.lpdf(n, data)

    f = theano.function([], [p_data])

    pvals = f()
    targets = numpy.log(numpy.exp(-0.5 * (data**2)) / numpy.sqrt(2 * numpy.pi))

    assert numpy.allclose(pvals, targets), (pvals, targets)
Пример #16
0
    def mcmc(ll, *frvs):
        proposals = [
            s_rng.local_proposal(v, rvs) for v, rvs in zip(free_RVs, frvs)
        ]
        proposals_rev = [
            s_rng.local_proposal(v, rvs)
            for v, rvs in zip(free_RVs, proposals)
        ]

        full_observations = dict(observations)
        full_observations.update(
            dict([(rv, s) for rv, s in zip(free_RVs, proposals)]))
        new_log_likelihood = full_log_likelihood(full_observations)

        logratio = new_log_likelihood - ll \
            + tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals_rev, frvs)]) \
            - tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals, proposals)])

        accept = tensor.gt(logratio, tensor.log(U))

        return [tensor.switch(accept, new_log_likelihood, ll)] + \
               [tensor.switch(accept, p, f) for p, f in zip(proposals, frvs)], \
               {}, theano.scan_module.until(accept)
Пример #17
0
 def test_lpdf_scalar_one_component(self):
     xval = tensor.dscalar()
     ll = lpdf(self.xsca, xval)
     assert ll.ndim == 0, ll.type
     f = theano.function(
         [xval, self.weights, self.mus, self.sigmas, self.draw_shape], ll)
     llval = f(
         1.0,  # x
         [1.],  # weights
         [1.0],  # mu
         [2.0],  # sigma
         []  # shape
     )
     assert llval.shape == ()
     assert numpy.allclose(
         llval, numpy.log(1.0 / numpy.sqrt(2 * numpy.pi * 2.0**2)))
Пример #18
0
 def test_lpdf_scalar_one_component(self):
     xval = tensor.dscalar()
     ll = lpdf(self.xsca, xval)
     assert ll.ndim == 0, ll.type
     f = theano.function(
             [xval, self.weights, self.mus, self.sigmas, self.draw_shape],
             ll)
     llval = f(1.0, # x
             [1.],  # weights
             [1.0], # mu
             [2.0], # sigma
             [] # shape
             )
     assert llval.shape == ()
     assert numpy.allclose(llval,
             numpy.log(1.0 / numpy.sqrt(2 * numpy.pi * 2.0**2)))
Пример #19
0
    def test_lpdf_scalar_N_components(self):
        xval = tensor.dscalar()
        ll = lpdf(self.xsca, xval)
        assert ll.ndim == 0, ll.type
        f = theano.function(
                [xval, self.weights, self.mus, self.sigmas, self.draw_shape],
                ll)
        llval = f(1.0, # x
                [0.25, 0.25, .5],  # weights
                [0.0, 1.0, 2.0], # mu
                [1.0, 2.0, 5.0], # sigma
                [] # shape
                )

        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0 ** 2)
                * numpy.exp(-.5 * (1.0)**2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0 ** 2))
        a += (.5 /  numpy.sqrt(2 * numpy.pi * 5.0 ** 2)
                * numpy.exp(-.5 * (1.0 / 5.0) ** 2))
Пример #20
0
    def test_lpdf_scalar_N_components(self):
        xval = tensor.dscalar()
        ll = lpdf(self.xsca, xval)
        assert ll.ndim == 0, ll.type
        f = theano.function(
            [xval, self.weights, self.mus, self.sigmas, self.draw_shape], ll)
        llval = f(
            1.0,  # x
            [0.25, 0.25, .5],  # weights
            [0.0, 1.0, 2.0],  # mu
            [1.0, 2.0, 5.0],  # sigma
            []  # shape
        )

        a = (.25 / numpy.sqrt(2 * numpy.pi * 1.0**2) * numpy.exp(-.5 *
                                                                 (1.0)**2))
        a += (.25 / numpy.sqrt(2 * numpy.pi * 2.0**2))
        a += (.5 / numpy.sqrt(2 * numpy.pi * 5.0**2) *
              numpy.exp(-.5 * (1.0 / 5.0)**2))
Пример #21
0
def mh2_sample(s_rng, outputs, observations={}, givens={}):
    all_vars = ancestors(list(observations.keys()) + list(outputs))

    for o in observations:
        assert o in all_vars
        if not is_raw_rv(o):
            raise TypeError(o)

    RVs = [v for v in all_vars if is_raw_rv(v)]
    free_RVs = [v for v in RVs if v not in observations]

    free_RVs_state = []
    for v in free_RVs:
        f = theano.function([],
                            v,
                            mode=theano.Mode(linker='py', optimizer=None))
        free_RVs_state.append(theano.shared(f()))

    U = s_rng.uniform(low=0.0, high=1.0)

    rr = []
    for index in range(len(free_RVs)):
        # TODO: why does the compiler crash when we try to expose the likelihood ?
        full_observations = dict(observations)
        full_observations.update(
            dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
        log_likelihood = full_log_likelihood(full_observations)

        proposal = s_rng.local_proposal(free_RVs[index], free_RVs_state[index])
        proposal_rev = s_rng.local_proposal(free_RVs[index], proposal)

        full_observations = dict(observations)
        full_observations.update(
            dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
        full_observations.update(dict([(free_RVs[index], proposal)]))
        new_log_likelihood = full_log_likelihood(full_observations)

        bw = tensor.sum(lpdf(proposal_rev, free_RVs_state[index]))
        fw = tensor.sum(lpdf(proposal, proposal))

        lr = new_log_likelihood - log_likelihood + bw - fw

        accept = tensor.gt(lr, tensor.log(U))

        updates = {
            free_RVs_state[index]:
            tensor.switch(accept, proposal, free_RVs_state[index])
        }
        rr.append(theano.function([], [accept], updates=updates,
                                  givens=givens))

    # TODO: this exacte amount of samples given back is still wrong
    def sampler(nr_samples, burnin=100, lag=100):
        data = [[] for o in outputs]
        for i in range(nr_samples * lag + burnin):
            accept = False
            while not accept:
                index = numpy.random.randint(len(free_RVs))

                accept = rr[index]()
                if accept and i > burnin and (i - burnin) % lag == 0:
                    for d, o in zip(data, outputs):
                        # TODO: this can be optimized
                        if is_raw_rv(o):
                            d.append(
                                free_RVs_state[free_RVs.index(o)].get_value())
                        else:
                            full_observations = dict(observations)
                            full_observations.update(
                                dict([
                                    (rv, s)
                                    for rv, s in zip(free_RVs, free_RVs_state)
                                ]))
                            d.append(
                                evaluate(evaluate_with_assignments(
                                    o, full_observations),
                                         givens=givens))
        data = [numpy.asarray(d).squeeze() for d in data]

        return data

    return sampler
Пример #22
0
def mh2_sample(s_rng, outputs, observations = {}, givens = {}):    
    all_vars = ancestors(list(observations.keys()) + list(outputs))
        
    for o in observations:
        assert o in all_vars
        if not is_raw_rv(o):
            raise TypeError(o)
    
    RVs = [v for v in all_vars if is_raw_rv(v)]
    free_RVs = [v for v in RVs if v not in observations]
    
    free_RVs_state = []
    for v in free_RVs:
        f = theano.function([], v,
                mode=theano.Mode(linker='py', optimizer=None))
        free_RVs_state.append(theano.shared(f()))
    
    U = s_rng.uniform(low=0.0, high=1.0)
    
    rr = []
    for index in range(len(free_RVs)):
        # TODO: why does the compiler crash when we try to expose the likelihood ?
        full_observations = dict(observations)
        full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
        log_likelihood = full_log_likelihood(full_observations)
        
        proposal = s_rng.local_proposal(free_RVs[index], free_RVs_state[index])
        proposal_rev = s_rng.local_proposal(free_RVs[index], proposal)

        full_observations = dict(observations)
        full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
        full_observations.update(dict([(free_RVs[index], proposal)]))
        new_log_likelihood = full_log_likelihood(full_observations)

        bw = tensor.sum(lpdf(proposal_rev, free_RVs_state[index]))
        fw = tensor.sum(lpdf(proposal, proposal))

        lr = new_log_likelihood-log_likelihood+bw-fw

        accept = tensor.gt(lr, tensor.log(U))

        updates = {free_RVs_state[index] : tensor.switch(accept, proposal, free_RVs_state[index])}
        rr.append(theano.function([], [accept], updates=updates, givens=givens))
    
    # TODO: this exacte amount of samples given back is still wrong
    def sampler(nr_samples, burnin = 100, lag = 100):
        data = [[] for o in outputs]
        for i in range(nr_samples*lag+burnin):        
            accept = False
            while not accept:
                index = numpy.random.randint(len(free_RVs))

                accept = rr[index]()            
                if accept and i > burnin and (i-burnin) % lag == 0:
                    for d, o in zip(data, outputs):
                        # TODO: this can be optimized
                        if is_raw_rv(o):
                            d.append(free_RVs_state[free_RVs.index(o)].get_value())
                        else:
                            full_observations = dict(observations)
                            full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, free_RVs_state)]))
                            d.append(evaluate(evaluate_with_assignments(o, full_observations), givens=givens))
        data = [numpy.asarray(d).squeeze() for d in data]
        
        return data
    
    return sampler