Example #1
0
 def random(self, point=None, size=None):
     if self.lower is None and self.upper is None:
         return self._wrapped.random(point=point, size=size)
     elif self.lower is not None and self.upper is not None:
         lower, upper = draw_values([self.lower, self.upper], point=point, size=size)
         return generate_samples(
             self._random,
             lower,
             upper,
             dist_shape=self.shape,
             size=size,
             not_broadcast_kwargs={'point': point},
         )
     elif self.lower is not None:
         lower = draw_values([self.lower], point=point, size=size)
         return generate_samples(
             self._random,
             lower,
             np.inf,
             dist_shape=self.shape,
             size=size,
             not_broadcast_kwargs={'point': point},
         )
     else:
         upper = draw_values([self.upper], point=point, size=size)
         return generate_samples(
             self._random,
             -np.inf,
             upper,
             dist_shape=self.shape,
             size=size,
             not_broadcast_kwargs={'point': point},
         )
Example #2
0
    def test_simple_model(self):
        with pm.Model():
            mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
            a = pm.Normal('a', mu=mu, sd=5, shape=2)

        val1 = draw_values([a])
        val2 = draw_values([a])
        assert np.all(val1[0] != val2[0])

        point = {'a': np.array([3., 4.])}
        npt.assert_equal(draw_values([a], point=point), [point['a']])
Example #3
0
    def test_joint_distribution(self):
        with pm.Model() as model:
            a = pm.Normal('a', mu=0, sigma=100)
            b = pm.Normal('b', mu=a, sigma=1e-8)
            c = pm.Normal('c', mu=a, sigma=1e-8)
            d = pm.Deterministic('d', b + c)

        # Expected RVs
        N = 1000
        norm = np.random.randn(3, N)
        eA = norm[0] * 100
        eB = eA + norm[1] * 1e-8
        eC = eA + norm[2] * 1e-8
        eD = eB + eC

        # Drawn RVs
        nr.seed(self.random_seed)
#        A, B, C, D = list(zip(*[draw_values([a, b, c, d]) for i in range(N)]))
        A, B, C, D = draw_values([a, b, c, d], size=N)
        A = np.array(A).flatten()
        B = np.array(B).flatten()
        C = np.array(C).flatten()
        D = np.array(D).flatten()

        # Assert that the drawn samples match the expected values
        assert np.allclose(eA, A)
        assert np.allclose(eB, B)
        assert np.allclose(eC, C)
        assert np.allclose(eD, D)

        # Assert that A, B and C have the expected difference
        assert np.all(np.abs(A - B) < 1e-6)
        assert np.all(np.abs(A - C) < 1e-6)
        assert np.all(np.abs(B - C) < 1e-6)

        # Marginal draws
        mA = np.array([draw_values([a]) for i in range(N)]).flatten()
        mB = np.array([draw_values([b]) for i in range(N)]).flatten()
        mC = np.array([draw_values([c]) for i in range(N)]).flatten()
        # Also test the with model context of draw_values
        with model:
            mD = np.array([draw_values([d]) for i in range(N)]).flatten()

        # Assert that the marginal distributions have different sample values
        assert not np.all(np.abs(B - mB) < 1e-2)
        assert not np.all(np.abs(C - mC) < 1e-2)
        assert not np.all(np.abs(D - mD) < 1e-2)

        # Assert that the marginal distributions do not have high cross
        # correlation
        assert np.abs(np.corrcoef(mA, mB)[0, 1]) < 0.1
        assert np.abs(np.corrcoef(mA, mC)[0, 1]) < 0.1
        assert np.abs(np.corrcoef(mB, mC)[0, 1]) < 0.1
    def test_draw_dependencies(self):
        with pm.Model():
            x = pm.Normal('x', mu=0., sd=1.)
            exp_x = pm.Deterministic('exp_x', pm.math.exp(x))

        x, exp_x = draw_values([x, exp_x])
        npt.assert_almost_equal(np.exp(x), exp_x)
Example #5
0
    def random(self, point=None, size=None):
        c = draw_values([self.c], point=point, size=size)[0]

        def _random(c, dtype=self.dtype, size=None):
            return np.full(size, fill_value=c, dtype=dtype)

        return generate_samples(_random, c=c, dist_shape=self.shape,
                                size=size).astype(self.dtype)
 def test_random_sample_returns_nd_array(self):
     with pm.Model():
         mu = pm.Normal('mu', mu=0., tau=1e-3)
         sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)
         y = pm.Normal('y', mu=mu, sd=sigma)
         mu, tau = draw_values([y.distribution.mu, y.distribution.tau])
     assert isinstance(mu, np.ndarray)
     assert isinstance(tau, np.ndarray)
Example #7
0
 def test_random_sample_returns_nd_array(self):
     with pm.Model():
         mu = pm.Normal('mu', mu=0., tau=1e-3)
         sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)
         y = pm.Normal('y', mu=mu, sd=sigma)
         mu, tau = draw_values([y.distribution.mu, y.distribution.tau])
     assert isinstance(mu, np.ndarray)
     assert isinstance(tau, np.ndarray)
Example #8
0
    def test_draw_order(self):
        with pm.Model():
            x = pm.Normal('x', mu=0., sigma=1.)
            exp_x = pm.Deterministic('exp_x', pm.math.exp(x))

        # Need to draw x before drawing log_x
        exp_x, x = draw_values([exp_x, x])
        npt.assert_almost_equal(np.exp(x), exp_x)
def test_draw_values():
    with Model():
        mu = Normal('mu', mu=0., tau=1e-3)
        sigma = Gamma('sigma', alpha=1., beta=1., transform=None)
        y1 = Normal('y1', mu=0., sd=1.)
        y2 = Normal('y2', mu=mu, sd=sigma)

        mu1, tau1 = draw_values([y1.distribution.mu, y1.distribution.tau])
        assert mu1 == 0. and tau1 == 1, "Draw values failed with scalar parameters"

        mu2, tau2 = draw_values([y2.distribution.mu, y2.distribution.tau],
                                point={'mu': 5., 'sigma': 2.})
        assert mu2 == 5. and tau2 == 0.25, "Draw values failed using point replacement"

        mu3, tau3 = draw_values([y2.distribution.mu, y2.distribution.tau])
        assert isinstance(mu3, np.ndarray) and isinstance(tau3, np.ndarray), \
            "Draw values did not return np.ndarray with random sampling"
 def random(self, point=None, size=None):
     n, p = draw_values([self.n, self.p], point=point)
     samples = generate_samples(self._random,
                                n,
                                p,
                                dist_shape=self.shape,
                                size=size)
     return samples
    def test_draw_order(self):
        with pm.Model():
            x = pm.Normal('x', mu=0., sd=1.)
            exp_x = pm.Deterministic('exp_x', pm.math.exp(x))

        # Need to draw x before drawing log_x
        exp_x, x = draw_values([exp_x, x])
        npt.assert_almost_equal(np.exp(x), exp_x)
Example #12
0
 def random(self, point=None, size=None):
     a = draw_values([self.a], point=point)
     samples = generate_samples(lambda a, size=None: \
                                 st.dirichlet.rvs(a, None if size == a.shape else size),
                                a,
                                dist_shape=self.shape,
                                size=size)
     return samples
def test_draw_values():
    with Model():
        mu = Normal('mu', mu=0., tau=1e-3)
        sigma = Gamma('sigma', alpha=1., beta=1., transform=None)
        y1 = Normal('y1', mu=0., sd=1.)
        y2 = Normal('y2', mu=mu, sd=sigma)

        mu1, tau1 = draw_values([y1.distribution.mu, y1.distribution.tau])
        assert mu1 == 0. and tau1 == 1, "Draw values failed with scalar parameters"

        mu2, tau2 = draw_values([y2.distribution.mu, y2.distribution.tau],
                                point={'mu': 5., 'sigma': 2.})
        assert mu2 == 5. and tau2 == 0.25, "Draw values failed using point replacement"

        mu3, tau3 = draw_values([y2.distribution.mu, y2.distribution.tau])
        assert isinstance(mu3, np.ndarray) and isinstance(tau3, np.ndarray), \
            "Draw values did not return np.ndarray with random sampling"
Example #14
0
 def random(self, point=None, size=None):
     a = draw_values([self.a], point=point)
     samples = generate_samples(lambda a, size=None: \
                                 st.dirichlet.rvs(a, None if size == a.shape else size),
                                a,
                                dist_shape=self.shape,
                                size=size)
     return samples
Example #15
0
 def random(self, point=None, size=None):
     mu, cov = draw_values([self.mu, self.cov], point=point)
     def _random(mean, cov, size=None):
         return stats.multivariate_normal.rvs(mean, cov, 
                                              None if size==mean.shape else size)
     samples = generate_samples(_random, mean=mu, cov=cov, 
                                dist_shape=self.shape, broadcast_shape=mu.shape,
                                size=size)
     return samples
Example #16
0
 def random(self, point=None, size=None):
     mu, tau = draw_values([self.mu, self.tau], point=point)
     samples = generate_samples(lambda mean, cov, size=None: \
                                 st.multivariate_normal.rvs(mean, cov, None if size == mean.shape else size),
                                mean=mu, cov=tau,
                                dist_shape=self.shape,
                                broadcast_shape=mu.shape,
                                size=size)
     return samples
 def test_draw_point_replacement(self):
     with pm.Model():
         mu = pm.Normal('mu', mu=0., tau=1e-3)
         sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)
         y = pm.Normal('y', mu=mu, sd=sigma)
         mu2, tau2 = draw_values([y.distribution.mu, y.distribution.tau],
                                                  point={'mu': 5., 'sigma': 2.})
     npt.assert_almost_equal(mu2, 5)
     npt.assert_almost_equal(tau2, 1 / 2.**2)
Example #18
0
 def random(self, point=None, size=None):
     mu, std, corr, clust = draw_values(
         [self.mu, self.std, self.corr, self.clust], point=point)
     return self.st_random(mu,
                           std,
                           corr,
                           clust,
                           size=size,
                           _dist_shape=self.shape)
Example #19
0
 def test_draw_point_replacement(self):
     with pm.Model():
         mu = pm.Normal('mu', mu=0., tau=1e-3)
         sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)
         y = pm.Normal('y', mu=mu, sigma=sigma)
         mu2, tau2 = draw_values([y.distribution.mu, y.distribution.tau],
                                                  point={'mu': 5., 'sigma': 2.})
     npt.assert_almost_equal(mu2, 5)
     npt.assert_almost_equal(tau2, 1 / 2.**2)
Example #20
0
    def test_dep_vars(self):
        with pm.Model():
            mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
            sd = pm.HalfNormal('sd', shape=2)
            tau = 1 / sd ** 2
            a = pm.Normal('a', mu=mu, tau=tau, shape=2)

        point = {'a': np.array([1., 2.])}
        npt.assert_equal(draw_values([a], point=point), [point['a']])

        val1 = draw_values([a])[0]
        val2 = draw_values([a], point={'sd': np.array([2., 3.])})[0]
        val3 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
        val4 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
        
        assert all([np.all(val1 != val2), np.all(val1 != val3),
                    np.all(val1 != val4), np.all(val2 != val3),
                    np.all(val2 != val4), np.all(val3 != val4)])
Example #21
0
 def random(self, point=None, size=None):
     mu, tau = draw_values([self.mu, self.tau], point=point)
     samples = generate_samples(lambda mean, cov, size=None: \
                                 st.multivariate_normal.rvs(mean, cov, None if size == mean.shape else size),
                                mean=mu, cov=tau,
                                dist_shape=self.shape,
                                broadcast_shape=mu.shape,
                                size=size)
     return samples
Example #22
0
 def random(self, point=None, size=None):
     if self.lower is None and self.upper is None:
         return self._wrapped.random(point=point, size=size)
     elif self.lower is not None and self.upper is not None:
         lower, upper = draw_values([self.lower, self.upper], point=point, size=size)
         return generate_samples(self._random, lower, upper, point,
                                 dist_shape=self.shape,
                                 size=size)
     elif self.lower is not None:
         lower = draw_values([self.lower], point=point, size=size)
         return generate_samples(self._random, lower, np.inf, point,
                                 dist_shape=self.shape,
                                 size=size)
     else:
         upper = draw_values([self.upper], point=point, size=size)
         return generate_samples(self._random, -np.inf, upper, point,
                                 dist_shape=self.shape,
                                 size=size)
Example #23
0
 def random(self, point=None, size=None, repeat=None):
     if self.lower is None and self.upper is None:
         return self._wrapped.random(point=point, size=size)
     elif self.lower is not None and self.upper is not None:
         lower, upper = draw_values([self.lower, self.upper], point=point)
         return generate_samples(self._random, lower, upper, point,
                                 dist_shape=self.shape,
                                 size=size)
     elif self.lower is not None:
         lower = draw_values([self.lower], point=point)
         return generate_samples(self._random, lower, np.inf, point,
                                 dist_shape=self.shape,
                                 size=size)
     else:
         upper = draw_values([self.upper], point=point)
         return generate_samples(self._random, -np.inf, upper, point,
                                 dist_shape=self.shape,
                                 size=size)
Example #24
0
def test_mixture_random_shape_fast():
    # test the shape broadcasting in mixture random
    y = np.concatenate([nr.poisson(5, size=10),
                        nr.poisson(9, size=10)])
    with pm.Model() as m:
        comp0 = pm.Poisson.dist(mu=np.ones(2))
        w0 = pm.Dirichlet('w0', a=np.ones(2))
        like0 = pm.Mixture('like0',
                           w=w0,
                           comp_dists=comp0,
                           observed=y)

        comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),
                                shape=(20, 2))
        w1 = pm.Dirichlet('w1', a=np.ones(2))
        like1 = pm.Mixture('like1',
                           w=w1,
                           comp_dists=comp1,
                           observed=y)

        comp2 = pm.Poisson.dist(mu=np.ones(2))
        w2 = pm.Dirichlet('w2',
                          a=np.ones(2),
                          shape=(20, 2))
        like2 = pm.Mixture('like2',
                           w=w2,
                           comp_dists=comp2,
                           observed=y)

        comp3 = pm.Poisson.dist(mu=np.ones(2),
                                shape=(20, 2))
        w3 = pm.Dirichlet('w3',
                          a=np.ones(2),
                          shape=(20, 2))
        like3 = pm.Mixture('like3',
                           w=w3,
                           comp_dists=comp3,
                           observed=y)

    rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],
                                             point=m.test_point,
                                             size=100)
    assert rand0.shape == (100, 20)
    assert rand1.shape == (100, 20)
    assert rand2.shape == (100, 20)
    assert rand3.shape == (100, 20)

    # I *think* that the mixture means that this is not going to work,
    # but I could be wrong. [2019/08/22:rpg]
    with m:
        ppc = pm.fast_sample_posterior_predictive([m.test_point], samples=200)
    assert ppc['like0'].shape == (200, 20)
    assert ppc['like1'].shape == (200, 20)
    assert ppc['like2'].shape == (200, 20)
    assert ppc['like3'].shape == (200, 20)
Example #25
0
 def random(self, point=None, size=None):
     raise NotImplementedError
     # This needs to give Ai(X\beta + (Bi)\epsilon)
     mu, cov = draw_values([self.mu, self.cov], point=point)
     def _random(mean, cov, size=None):
         return stats.multivariate_normal.rvs(mean, cov, 
                                              None if size==mean.shape else size)
     samples = generate_samples(_random, mean=mu, cov=cov, 
                                dist_shape=self.shape, broadcast_shape=mu.shape,
                                size=size)
     return samples
Example #26
0
 def random(self, point=None, size=None):
     r = super(ScaledSdMvNormalNonZero, self).random(point=point, size=size)
     shape = r.shape
     scale_sd, mu = draw_values([self.scale_sd, self._mu], point=point)
     if scale_sd.ndim == 0:
         scale_sd = np.repeat(scale_sd, r.shape[-1])
     if scale_sd.ndim == 1:
         scale_sd = scale_sd[None, :]
     r *= scale_sd
     r += mu
     # reshape back just in case
     return r.reshape(shape)
Example #27
0
def test_mixture_random_shape():
    # test the shape broadcasting in mixture random
    y = np.concatenate([nr.poisson(5, size=10),
                        nr.poisson(9, size=10)])
    with pm.Model() as m:
        comp0 = pm.Poisson.dist(mu=np.ones(2))
        w0 = pm.Dirichlet('w0', a=np.ones(2))
        like0 = pm.Mixture('like0',
                           w=w0,
                           comp_dists=comp0,
                           observed=y)

        comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),
                                shape=(20, 2))
        w1 = pm.Dirichlet('w1', a=np.ones(2))
        like1 = pm.Mixture('like1',
                           w=w1,
                           comp_dists=comp1,
                           observed=y)

        comp2 = pm.Poisson.dist(mu=np.ones(2))
        w2 = pm.Dirichlet('w2',
                          a=np.ones(2),
                          shape=(20, 2))
        like2 = pm.Mixture('like2',
                           w=w2,
                           comp_dists=comp2,
                           observed=y)

        comp3 = pm.Poisson.dist(mu=np.ones(2),
                                shape=(20, 2))
        w3 = pm.Dirichlet('w3',
                          a=np.ones(2),
                          shape=(20, 2))
        like3 = pm.Mixture('like3',
                           w=w3,
                           comp_dists=comp3,
                           observed=y)

    rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],
                                             point=m.test_point,
                                             size=100)
    assert rand0.shape == (100, 20)
    assert rand1.shape == (100, 20)
    assert rand2.shape == (100, 20)
    assert rand3.shape == (100, 20)

    with m:
        ppc = pm.sample_ppc([m.test_point], samples=200)
    assert ppc['like0'].shape == (200, 20)
    assert ppc['like1'].shape == (200, 20)
    assert ppc['like2'].shape == (200, 20)
    assert ppc['like3'].shape == (200, 20)
def test_mixture_random_shape():
    # test the shape broadcasting in mixture random
    y = np.concatenate([nr.poisson(5, size=10),
                        nr.poisson(9, size=10)])
    with pm.Model() as m:
        comp0 = pm.Poisson.dist(mu=np.ones(2))
        w0 = pm.Dirichlet('w0', a=np.ones(2))
        like0 = pm.Mixture('like0',
                           w=w0,
                           comp_dists=comp0,
                           observed=y)

        comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),
                                shape=(20, 2))
        w1 = pm.Dirichlet('w1', a=np.ones(2))
        like1 = pm.Mixture('like1',
                           w=w1,
                           comp_dists=comp1,
                           observed=y)

        comp2 = pm.Poisson.dist(mu=np.ones(2))
        w2 = pm.Dirichlet('w2',
                          a=np.ones(2),
                          shape=(20, 2))
        like2 = pm.Mixture('like2',
                           w=w2,
                           comp_dists=comp2,
                           observed=y)

        comp3 = pm.Poisson.dist(mu=np.ones(2),
                                shape=(20, 2))
        w3 = pm.Dirichlet('w3',
                          a=np.ones(2),
                          shape=(20, 2))
        like3 = pm.Mixture('like3',
                           w=w3,
                           comp_dists=comp3,
                           observed=y)

    rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],
                                             point=m.test_point,
                                             size=100)
    assert rand0.shape == (100, 20)
    assert rand1.shape == (100, 20)
    assert rand2.shape == (100, 20)
    assert rand3.shape == (100, 20)

    with m:
        ppc = pm.sample_posterior_predictive([m.test_point], samples=200)
    assert ppc['like0'].shape == (200, 20)
    assert ppc['like1'].shape == (200, 20)
    assert ppc['like2'].shape == (200, 20)
    assert ppc['like3'].shape == (200, 20)
Example #29
0
    def random(self, point=None, size=None):
        """Sample from this distribution conditional on a given set of values.

        Parameters
        ----------
        point: dict, optional
            Dict of variable values on which random values are to be
            conditioned (uses default point if not specified).
        size: int, optional
            Desired size of random sample (returns one sample if not
            specified).

        Returns
        -------
        array
        """
        with _DrawValuesContext() as draw_context:

            # TODO FIXME: Very, very lame...
            term_smpl = draw_context.drawn_vars.get((self.states, 1), None)
            if term_smpl is not None:
                point[self.states.name] = term_smpl

            # `draw_values` is inconsistent and will not use the `size`
            # parameter if the variables aren't random variables.
            if hasattr(self.states, "distribution"):
                (states,) = draw_values([self.states], point=point, size=size)
            else:
                states = pm.Constant.dist(self.states).random(point=point, size=size)

            # states = states.T

            samples = np.empty(states.shape)

            for i, dist in enumerate(self.comp_dists):
                # We want to sample from only the parts of our component
                # distributions that are active given the states.
                # This is only really relevant when the component distributions
                # change over the state space (e.g. Poisson means that change
                # over time).
                # We could always sample such components over the entire space
                # (e.g. time), but, for spaces with large dimension, that would
                # be extremely costly and wasteful.
                i_idx = np.where(states == i)
                i_size = len(i_idx[0])
                if i_size > 0:
                    subset_args = distribution_subset_args(
                        dist, states.shape, i_idx, point=point
                    )
                    samples[i_idx] = dist.dist(*subset_args).random(point=point)

        return samples
Example #30
0
    def test_vals(self):
        npt.assert_equal(draw_values([np.array([5, 6])])[0], [5, 6])
        npt.assert_equal(draw_values([np.array(5.)])[0], 5)

        npt.assert_equal(draw_values([tt.constant([5., 6.])])[0], [5, 6])
        assert draw_values([tt.constant(5)])[0] == 5
        npt.assert_equal(draw_values([2 * tt.constant([5., 6.])])[0], [10, 12])

        val = theano.shared(np.array([5., 6.]))
        npt.assert_equal(draw_values([val])[0], [5, 6])
        npt.assert_equal(draw_values([2 * val])[0], [10, 12])
Example #31
0
    def test_vals(self):
        npt.assert_equal(draw_values([np.array([5, 6])])[0], [5, 6])
        npt.assert_equal(draw_values([np.array(5.)])[0], 5)

        npt.assert_equal(draw_values([tt.constant([5., 6.])])[0], [5, 6])
        assert draw_values([tt.constant(5)])[0] == 5
        npt.assert_equal(draw_values([2 * tt.constant([5., 6.])])[0], [10, 12])

        val = theano.shared(np.array([5., 6.]))
        npt.assert_equal(draw_values([val])[0], [5, 6])
        npt.assert_equal(draw_values([2 * val])[0], [10, 12])
Example #32
0
    def test_vals(self):
        npt.assert_equal(draw_values([np.array([5, 6])])[0], [5, 6])
        npt.assert_equal(draw_values([np.array(5.0)])[0], 5)

        npt.assert_equal(draw_values([aet.constant([5.0, 6.0])])[0], [5, 6])
        assert draw_values([aet.constant(5)])[0] == 5
        npt.assert_equal(
            draw_values([2 * aet.constant([5.0, 6.0])])[0], [10, 12])

        val = aesara.shared(np.array([5.0, 6.0]))
        npt.assert_equal(draw_values([val])[0], [5, 6])
        npt.assert_equal(draw_values([2 * val])[0], [10, 12])
Example #33
0
    def test_dep_vars(self):
        with pm.Model():
            mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(
                np.array(5))
            sd = pm.HalfNormal('sd', shape=2)
            tau = 1 / sd**2
            a = pm.Normal('a', mu=mu, tau=tau, shape=2)

        point = {'a': np.array([1., 2.])}
        npt.assert_equal(draw_values([a], point=point), [point['a']])

        val1 = draw_values([a])[0]
        val2 = draw_values([a], point={'sd': np.array([2., 3.])})[0]
        val3 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
        val4 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]

        assert all([
            np.all(val1 != val2),
            np.all(val1 != val3),
            np.all(val1 != val4),
            np.all(val2 != val3),
            np.all(val2 != val4),
            np.all(val3 != val4)
        ])
Example #34
0
    def test_dep_vars(self):
        with pm.Model():
            mu = 2 * aet.constant(np.array([5.0, 6.0])) + aesara.shared(
                np.array(5))
            sd = pm.HalfNormal("sd", shape=2)
            tau = 1 / sd**2
            a = pm.Normal("a", mu=mu, tau=tau, shape=2)

        point = {"a": np.array([1.0, 2.0])}
        npt.assert_equal(draw_values([a], point=point), [point["a"]])

        val1 = draw_values([a])[0]
        val2 = draw_values([a], point={"sd": np.array([2.0, 3.0])})[0]
        val3 = draw_values([a], point={"sd_log__": np.array([2.0, 3.0])})[0]
        val4 = draw_values([a], point={"sd_log__": np.array([2.0, 3.0])})[0]

        assert all([
            np.all(val1 != val2),
            np.all(val1 != val3),
            np.all(val1 != val4),
            np.all(val2 != val3),
            np.all(val2 != val4),
            np.all(val3 != val4),
        ])
Example #35
0
    def __init__(self, vars, values=None, model=None):

        if len(vars) > 1:
            raise ValueError("This sampler only takes one variable.")

        (var, ) = pm.inputvars(vars)

        if not isinstance(var.distribution, DiscreteMarkovChain):
            raise TypeError(
                "This sampler only samples `DiscreteMarkovChain`s.")

        model = pm.modelcontext(model)

        self.vars = [var]

        self.dependent_rvs = [
            v for v in model.basic_RVs
            if v is not var and var in graph_inputs([v.logpt])
        ]

        dep_comps_logp_stacked = []
        for i, dependent_rv in enumerate(self.dependent_rvs):
            if isinstance(dependent_rv.distribution, SwitchingProcess):
                comp_logps = []

                # Get the log-likelihoood sequences for each state in this
                # `SwitchingProcess` observations distribution
                for comp_dist in dependent_rv.distribution.comp_dists:
                    comp_logps.append(comp_dist.logp(dependent_rv))

                comp_logp_stacked = at.stack(comp_logps)
            else:
                raise TypeError(
                    "This sampler only supports `SwitchingProcess` observations"
                )

            dep_comps_logp_stacked.append(comp_logp_stacked)

        comp_logp_stacked = at.sum(dep_comps_logp_stacked, axis=0)

        (M, ) = draw_values([var.distribution.gamma_0.shape[-1]],
                            point=model.test_point)
        N = model.test_point[var.name].shape[-1]
        self.alphas = np.empty((M, N), dtype=float)

        self.log_lik_states = model.fn(comp_logp_stacked)
        self.gamma_0_fn = model.fn(var.distribution.gamma_0)
        self.Gammas_fn = model.fn(var.distribution.Gammas)
Example #36
0
    def random(self, point=None, size=None):
        """Sample from this distribution conditional on a given set of values.

        Parameters
        ----------
        point: dict, optional
            Dict of variable values on which random values are to be
            conditioned (uses default point if not specified).
        size: int, optional
            Desired size of random sample (returns one sample if not
            specified).

        Returns
        -------
        array
        """
        with _DrawValuesContext() as draw_context:
            terms = [self.gamma_0, self.Gammas]

            gamma_0, Gamma = draw_values(terms, point=point)

            # Sample state 0 in each state sequence
            state_n = pm.Categorical.dist(gamma_0, shape=self.shape[:-1]).random(
                point=point, size=size
            )
            state_shape = state_n.shape

            N = self.shape[-1]

            states = np.empty(state_shape + (N,), dtype=self.dtype)

            unif_samples = np.random.uniform(size=states.shape)

            # Make sure we have a transition matrix for each element in a state
            # sequence
            Gamma = np.broadcast_to(Gamma, tuple(states.shape) + Gamma.shape[-2:])

            # Slices across each independent/replication dimension
            slices = [slice(None, d) for d in state_shape]
            slices = tuple(np.ogrid[slices])

            for n in range(0, N):
                gamma_t = Gamma[..., n, :, :]
                gamma_t = gamma_t[slices + (state_n,)]
                state_n = vsearchsorted(gamma_t.cumsum(axis=-1), unif_samples[..., n])
                states[..., n] = state_n

            return states
Example #37
0
 def test_gof_constant(self):
     # Issue 3595 pointed out that slice(None) can introduce
     # theano.gof.graph.Constant into the compute graph, which wasn't
     # handled correctly by draw_values
     n_d = 500
     n_x = 2
     n_y = 1
     n_g = 10
     g = np.random.randint(0, n_g, (n_d, ))  # group
     x = np.random.randint(0, n_x, (n_d, ))  # x factor
     with pm.Model():
         multi_dim_rv = pm.Normal('multi_dim_rv',
                                  mu=0,
                                  sd=1,
                                  shape=(n_x, n_g, n_y))
         indexed_rv = multi_dim_rv[x, g, :]
         i = draw_values([indexed_rv])
         assert i is not None
Example #38
0
    def random(self, point=None, size=None):
        if size is None:
            size = tuple()
        else:
            if not isinstance(size, tuple):
                try:
                    size = tuple(size)
                except TypeError:
                    size = (size, )

        mu, U, P, d, W = draw_values(
            [self.mean, self.gp._U, self.gp._P, self.gp._d, self.gp._W],
            point=point,
            size=size,
        )
        n = np.random.randn(*(size + tuple([d.shape[-1]])))

        func = np.vectorize(driver.dot_tril,
                            signature="(n,j),(m,j),(n),(n,j),(n)->(n)")
        return func(U, P, d, W, n) + mu
Example #39
0
    def random(self, point=None, size=None):
        """
		Draw random values from HalfNormal distribution.
		Parameters
		----------
		point : dict, optional
			Dict of variable values on which random values are to be
			conditioned (uses default point if not specified).
		size : int, optional
			Desired size of random sample (returns one sample if not
			specified).
		Returns
		-------
		array
		"""
        scale = draw_values([self.scale], point=point)[0]
        return generate_samples(edsd.rvs,
                                L=scale,
                                dist_shape=self.shape,
                                size=size)
Example #40
0
    def test_dep_vars(self):
        with pm.Model():
            mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
            sd = pm.HalfNormal('sd', shape=2)
            tau = 1 / sd ** 2
            a = pm.Normal('a', mu=mu, tau=tau, shape=2)

        point = {'a': np.array([1., 2.])}
        npt.assert_equal(draw_values([a], point=point), [point['a']])

        with pytest.raises(theano.gof.MissingInputError):
            draw_values([a])

        # We need the untransformed vars
        with pytest.raises(theano.gof.MissingInputError):
            draw_values([a], point={'sd': np.array([2., 3.])})

        val1 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
        val2 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
        assert np.all(val1 != val2)
Example #41
0
    def random(self, point=None, size=None):
        """
        Draw random values from Simulator.

        Parameters
        ----------
        point: dict, optional
            Dict of variable values on which random values are to be conditioned (uses default
            point if not specified).
        size: int, optional
            Desired size of random sample (returns one sample if not specified).

        Returns
        -------
        array
        """
        size = to_tuple(size)
        params = draw_values([*self.params], point=point, size=size)
        if len(size) == 0:
            return self.function(*params)
        else:
            return np.array([self.function(*params) for _ in range(size[0])])
Example #42
0
    def test_dep_vars(self):
        with pm.Model():
            mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(
                np.array(5))
            sd = pm.HalfNormal('sd', shape=2)
            tau = 1 / sd**2
            a = pm.Normal('a', mu=mu, tau=tau, shape=2)

        point = {'a': np.array([1., 2.])}
        npt.assert_equal(draw_values([a], point=point), [point['a']])

        with pytest.raises(theano.gof.MissingInputError):
            draw_values([a])

        # We need the untransformed vars
        with pytest.raises(theano.gof.MissingInputError):
            draw_values([a], point={'sd': np.array([2., 3.])})

        val1 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
        val2 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
        assert np.all(val1 != val2)
Example #43
0
    def random(self, point=None, size=None):
        """
        Draw random values (weights) from the Stick-Breaking Process

        Parameters
        ----------
        point : dict, optional
            Dict of variable values on which random values are to be
            conditioned (uses default point if not specified).
        size : int, optional
            Desired size of random sample (returns one sample if not
            specified).

        Returns
        -------
        array
        """
        wts = draw_values([self.a], point=point, size=size)
        samples = generate_samples(self._random,
                                   wts=wts,
                                   dist_shape=self.shape,
                                   size=size)
        return samples
Example #44
0
    def random(self, point=None, size=None):
        """
		Draw random values from King's distribution.
		Parameters
		----------
		point : dict, optional
			Dict of variable values on which random values are to be
			conditioned (uses default point if not specified).
		size : int, optional
			Desired size of random sample (returns one sample if not
			specified).
		Returns
		-------
		array
		"""
        location, scale, rt = draw_values([self.location, self.scale, self.rt],
                                          point=point,
                                          size=size)
        return generate_samples(king.rvs,
                                loc=location,
                                scale=scale,
                                rt=rt,
                                dist_shape=self.shape,
                                size=size)
 def test_draw_scalar_parameters(self):
     with pm.Model():
         y = pm.Normal('y1', mu=0., sd=1.)
         mu, tau = draw_values([y.distribution.mu, y.distribution.tau])
     npt.assert_almost_equal(mu, 0)
     npt.assert_almost_equal(tau, 1)
Example #46
0
 def random(self, point=None, size=None):
     n, p = draw_values([self.n, self.p], point=point)
     samples = generate_samples(self._random, n, p, dist_shape=self.shape, size=size)
     return samples
Example #47
0
 def forward_val(self, x, point=None):
     # 2017-06-19
     # the `self.b-0.` below is important for the testval to propagates
     # For an explanation see pull/2328#issuecomment-309303811
     b = draw_values([self.b - 0.0], point=point)[0]
     return floatX(np.log(b - x))
Example #48
0
 def test_empty(self):
     assert draw_values([]) == []
Example #49
0
def init_nuts(init='auto', njobs=1, n_init=500000, model=None,
              random_seed=-1, progressbar=True, **kwargs):
    """Set up the mass matrix initialization for NUTS.

    NUTS convergence and sampling speed is extremely dependent on the
    choice of mass/scaling matrix. This function implements different
    methods for choosing or adapting the mass matrix.

    Parameters
    ----------
    init : str
        Initialization method to use.

        * auto : Choose a default initialization method automatically.
          Currently, this is `'advi+adapt_diag'`, but this can change in
          the future. If you depend on the exact behaviour, choose an
          initialization method explicitly.
        * adapt_diag : Start with a identity mass matrix and then adapt
          a diagonal based on the variance of the tuning samples.
        * advi+adapt_diag : Run ADVI and then adapt the resulting diagonal
          mass matrix based on the sample variance of the tuning samples.
        * advi+adapt_diag_grad : Run ADVI and then adapt the resulting
          diagonal mass matrix based on the variance of the gradients
          during tuning. This is **experimental** and might be removed
          in a future release.
        * advi : Run ADVI to estimate posterior mean and diagonal mass
          matrix.
        * advi_map: Initialize ADVI with MAP and use MAP as starting point.
        * map : Use the MAP as starting point. This is discouraged.
        * nuts : Run NUTS and estimate posterior mean and mass matrix from
          the trace.
    njobs : int
        Number of parallel jobs to start.
    n_init : int
        Number of iterations of initializer
        If 'ADVI', number of iterations, if 'nuts', number of draws.
    model : Model (optional if in `with` context)
    progressbar : bool
        Whether or not to display a progressbar for advi sampling.
    **kwargs : keyword arguments
        Extra keyword arguments are forwarded to pymc3.NUTS.

    Returns
    -------
    start : pymc3.model.Point
        Starting point for sampler
    nuts_sampler : pymc3.step_methods.NUTS
        Instantiated and initialized NUTS sampler object
    """
    model = pm.modelcontext(model)

    vars = kwargs.get('vars', model.vars)
    if set(vars) != set(model.vars):
        raise ValueError('Must use init_nuts on all variables of a model.')
    if not pm.model.all_continuous(vars):
        raise ValueError('init_nuts can only be used for models with only '
                         'continuous variables.')

    if not isinstance(init, str):
        raise TypeError('init must be a string.')

    if init is not None:
        init = init.lower()

    if init == 'auto':
        init = 'advi+adapt_diag'

    pm._log.info('Initializing NUTS using {}...'.format(init))

    random_seed = int(np.atleast_1d(random_seed)[0])

    cb = [
        pm.callbacks.CheckParametersConvergence(tolerance=1e-2, diff='absolute'),
        pm.callbacks.CheckParametersConvergence(tolerance=1e-2, diff='relative'),
    ]

    if init == 'adapt_diag':
        start = []
        for _ in range(njobs):
            vals = distribution.draw_values(model.free_RVs)
            point = {var.name: vals[i] for i, var in enumerate(model.free_RVs)}
            start.append(point)
        mean = np.mean([model.dict_to_array(vals) for vals in start], axis=0)
        var = np.ones_like(mean)
        potential = quadpotential.QuadPotentialDiagAdapt(model.ndim, mean, var, 10)
        if njobs == 1:
            start = start[0]
    elif init == 'advi+adapt_diag_grad':
        approx = pm.fit(
            random_seed=random_seed,
            n=n_init, method='advi', model=model,
            callbacks=cb,
            progressbar=progressbar,
            obj_optimizer=pm.adagrad_window,
        )
        start = approx.sample(draws=njobs)
        start = list(start)
        stds = approx.gbij.rmap(approx.std.eval())
        cov = model.dict_to_array(stds) ** 2
        mean = approx.gbij.rmap(approx.mean.get_value())
        mean = model.dict_to_array(mean)
        weight = 50
        potential = quadpotential.QuadPotentialDiagAdaptGrad(
            model.ndim, mean, cov, weight)
        if njobs == 1:
            start = start[0]
    elif init == 'advi+adapt_diag':
        approx = pm.fit(
            random_seed=random_seed,
            n=n_init, method='advi', model=model,
            callbacks=cb,
            progressbar=progressbar,
            obj_optimizer=pm.adagrad_window,
        )
        start = approx.sample(draws=njobs)
        start = list(start)
        stds = approx.gbij.rmap(approx.std.eval())
        cov = model.dict_to_array(stds) ** 2
        mean = approx.gbij.rmap(approx.mean.get_value())
        mean = model.dict_to_array(mean)
        weight = 50
        potential = quadpotential.QuadPotentialDiagAdapt(
            model.ndim, mean, cov, weight)
        if njobs == 1:
            start = start[0]
    elif init == 'advi':
        approx = pm.fit(
            random_seed=random_seed,
            n=n_init, method='advi', model=model,
            callbacks=cb,
            progressbar=progressbar,
            obj_optimizer=pm.adagrad_window
        )  # type: pm.MeanField
        start = approx.sample(draws=njobs)
        start = list(start)
        stds = approx.gbij.rmap(approx.std.eval())
        cov = model.dict_to_array(stds) ** 2
        potential = quadpotential.QuadPotentialDiag(cov)
        if njobs == 1:
            start = start[0]
    elif init == 'advi_map':
        start = pm.find_MAP()
        approx = pm.MeanField(model=model, start=start)
        pm.fit(
            random_seed=random_seed,
            n=n_init, method=pm.ADVI.from_mean_field(approx),
            callbacks=cb,
            progressbar=progressbar,
            obj_optimizer=pm.adagrad_window
        )
        start = approx.sample(draws=njobs)
        start = list(start)
        stds = approx.gbij.rmap(approx.std.eval())
        cov = model.dict_to_array(stds) ** 2
        potential = quadpotential.QuadPotentialDiag(cov)
        if njobs == 1:
            start = start[0]
    elif init == 'map':
        start = pm.find_MAP()
        cov = pm.find_hessian(point=start)
        start = [start] * njobs
        potential = quadpotential.QuadPotentialFull(cov)
        if njobs == 1:
            start = start[0]
    elif init == 'nuts':
        init_trace = pm.sample(draws=n_init, step=pm.NUTS(),
                               tune=n_init // 2,
                               random_seed=random_seed)
        cov = np.atleast_1d(pm.trace_cov(init_trace))
        start = list(np.random.choice(init_trace, njobs))
        potential = quadpotential.QuadPotentialFull(cov)
        if njobs == 1:
            start = start[0]
    else:
        raise NotImplementedError('Initializer {} is not supported.'.format(init))

    step = pm.NUTS(potential=potential, **kwargs)

    return start, step