Exemplo n.º 1
0
    def comp_dists(self, comp_dists):
        self._comp_dists = comp_dists
        if isinstance(comp_dists, Distribution):
            self._comp_dist_shapes = to_tuple(comp_dists.shape)
            self._broadcast_shape = self._comp_dist_shapes
            self.comp_is_distribution = True
        else:
            # Now we check the comp_dists distribution shape, see what
            # the broadcast shape would be. This shape will be the dist_shape
            # used by generate samples (the shape of a single random sample)
            # from the mixture
            self._comp_dist_shapes = [to_tuple(d.shape) for d in comp_dists]
            # All component distributions must broadcast with each other
            try:
                self._broadcast_shape = np.broadcast(
                    *(np.empty(shape)
                      for shape in self._comp_dist_shapes)).shape
            except Exception:
                raise TypeError("Supplied comp_dists shapes do not broadcast "
                                "with each other. comp_dists shapes are: "
                                "{}".format(self._comp_dist_shapes))

            # We wrap the _comp_dist.random by adding the kwarg raw_size_,
            # which will be the size attribute passed to _comp_samples.
            # _comp_samples then calls generate_samples, which may change the
            # size value to make it compatible with scipy.stats.*.rvs
            self._generators = []
            for comp_dist in comp_dists:
                generator = Mixture._comp_dist_random_wrapper(comp_dist.random)
                self._generators.append(generator)
            self.comp_is_distribution = False
Exemplo n.º 2
0
    def _set_values(cls, lower, upper, size, shape, initval):
        if size is None:
            size = shape

        lower = np.asarray(lower)
        lower = floatX(np.where(lower == None, -np.inf, lower))
        upper = np.asarray(upper)
        upper = floatX(np.where(upper == None, np.inf, upper))

        if initval is None:
            _size = np.broadcast_shapes(to_tuple(size), np.shape(lower),
                                        np.shape(upper))
            _lower = np.broadcast_to(lower, _size)
            _upper = np.broadcast_to(upper, _size)
            initval = np.where(
                (_lower == -np.inf) & (_upper == np.inf),
                0,
                np.where(
                    _lower == -np.inf,
                    _upper - 1,
                    np.where(_upper == np.inf, _lower + 1,
                             (_lower + _upper) / 2),
                ),
            )

        lower = as_tensor_variable(floatX(lower))
        upper = as_tensor_variable(floatX(upper))
        return lower, upper, initval
Exemplo n.º 3
0
def test_sample_generate_values(fixture_model, fixture_sizes):
    model, RVs = fixture_model
    size = to_tuple(fixture_sizes)
    with model:
        prior = pm.sample_prior_predictive(samples=fixture_sizes)
        for rv in RVs:
            assert prior[rv.name].shape == size + tuple(rv.distribution.shape)
Exemplo n.º 4
0
def test_density_dist_default_moment_multivariate(with_random, size):
    def _random(mu, rng=None, size=None):
        return rng.normal(mu, scale=1, size=to_tuple(size) + mu.shape)

    if with_random:
        random = _random
    else:
        random = None

    mu_val = np.random.normal(loc=2, scale=1,
                              size=5).astype(aesara.config.floatX)
    with pm.Model():
        mu = pm.Normal("mu", size=5)
        a = pm.DensityDist("a",
                           mu,
                           random=random,
                           ndims_params=[1],
                           ndim_supp=1,
                           size=size)
    if with_random:
        evaled_moment = get_moment(a).eval({mu: mu_val})
        assert evaled_moment.shape == to_tuple(size) + (5, )
        assert np.all(evaled_moment == 0)
    else:
        with pytest.raises(
                TypeError,
                match=
                "Cannot safely infer the size of a multivariate random variable's moment.",
        ):
            evaled_moment = get_moment(a).eval({mu: mu_val})
Exemplo n.º 5
0
def random_choice(p, size):
    """Return draws from categorical probability functions

    Args:
        p: array
           Probability of each class. If p.ndim > 1, the last axis is
           interpreted as the probability of each class, and numpy.random.choice
           is iterated for every other axis element.
        size: int or tuple
            Shape of the desired output array. If p is multidimensional, size
            should broadcast with p.shape[:-1].

    Returns:
        random sample: array

    """
    k = p.shape[-1]

    if p.ndim > 1:
        # If p is an nd-array, the last axis is interpreted as the class
        # probability. We must iterate over the elements of all the other
        # dimensions.
        # We first ensure that p is broadcasted to the output's shape
        size = to_tuple(size) + (1, )
        p = np.broadcast_arrays(p, np.empty(size))[0]
        out_shape = p.shape[:-1]
        # np.random.choice accepts 1D p arrays, so we semiflatten p to
        # iterate calls using the last axis as the category probabilities
        p = np.reshape(p, (-1, p.shape[-1]))
        samples = np.array([np.random.choice(k, p=p_) for p_ in p])
        # We reshape to the desired output shape
        samples = np.reshape(samples, out_shape)
    else:
        samples = np.random.choice(k, p=p, size=size)
    return samples
Exemplo n.º 6
0
def generate_poisson_mixture_data(w, mu, size=1000):
    component = np.random.choice(w.size, size=size, p=w)
    mu = np.atleast_1d(mu)
    out_size = to_tuple(size) + mu.shape[:-1]
    mu_ = np.array([mu[..., comp] for comp in component.ravel()])
    mu_ = np.reshape(mu_, out_size)
    x = np.random.poisson(mu_, size=out_size)

    return x
Exemplo n.º 7
0
def samples_to_broadcast_to(request, samples_to_broadcast):
    to_shape = request.param
    size, samples, broadcast_shape = samples_to_broadcast
    if broadcast_shape is not None:
        try:
            broadcast_shape = broadcast_dist_samples_shape(
                [broadcast_shape, to_tuple(to_shape)], size=size)
        except ValueError:
            broadcast_shape = None
    return to_shape, size, samples, broadcast_shape
Exemplo n.º 8
0
def get_steps(
    steps: Optional[Union[int, np.ndarray, TensorVariable]],
    *,
    shape: Optional[Shape] = None,
    dims: Optional[Dims] = None,
    observed: Optional[Any] = None,
    step_shape_offset: int = 0,
):
    """Extract number of steps from shape / dims / observed information

    Parameters
    ----------
    steps:
        User specified steps for timeseries distribution
    shape:
        User specified shape for timeseries distribution
    dims:
        User specified dims for timeseries distribution
    observed:
        User specified observed data from timeseries distribution
    step_shape_offset:
        Difference between last shape dimension and number of steps in timeseries
        distribution, defaults to 0

    Returns
    -------
    steps
        Steps, if specified directly by user, or inferred from the last dimension of
        shape / dims / observed. When two sources of step information are provided,
        a symbolic Assert is added to ensure they are consistent.
    """
    inferred_steps = None
    if shape is not None:
        shape = to_tuple(shape)
        if shape[-1] is not ...:
            inferred_steps = shape[-1] - step_shape_offset

    if inferred_steps is None and dims is not None:
        dims = convert_dims(dims)
        if dims[-1] is not ...:
            model = modelcontext(None)
            inferred_steps = model.dim_lengths[dims[-1]] - step_shape_offset

    if inferred_steps is None and observed is not None:
        observed = convert_observed_data(observed)
        inferred_steps = observed.shape[-1] - step_shape_offset

    if inferred_steps is None:
        inferred_steps = steps
    # If there are two sources of information for the steps, assert they are consistent
    elif steps is not None:
        inferred_steps = Assert(msg="Steps do not match last shape dimension")(
            inferred_steps, at.eq(inferred_steps, steps)
        )
    return inferred_steps
def test_density_dist_custom_moment_multivariate(size):
    def moment(rv, size, mu):
        return (at.ones(size)[..., None] * mu).astype(rv.dtype)

    mu_val = np.random.normal(loc=2, scale=1, size=5).astype(aesara.config.floatX)
    with pm.Model():
        mu = pm.Normal("mu", size=5)
        a = pm.DensityDist("a", mu, get_moment=moment, ndims_params=[1], ndim_supp=1, size=size)
    evaled_moment = get_moment(a).eval({mu: mu_val})
    assert evaled_moment.shape == to_tuple(size) + (5,)
    assert np.all(evaled_moment == mu_val)
Exemplo n.º 10
0
def test_density_dist_custom_moment_univariate(size):
    def moment(rv, size, mu):
        return (at.ones(size) * mu).astype(rv.dtype)

    mu_val = np.array(np.random.normal(loc=2, scale=1)).astype(aesara.config.floatX)
    with pm.Model():
        mu = pm.Normal("mu")
        a = pm.DensityDist("a", mu, get_moment=moment, size=size)
    evaled_moment = get_moment(a).eval({mu: mu_val})
    assert evaled_moment.shape == to_tuple(size)
    assert np.all(evaled_moment == mu_val)
Exemplo n.º 11
0
def generate_normal_mixture_data(w, mu, sigma, size=1000):
    component = np.random.choice(w.size, size=size, p=w)
    mu, sigma = np.broadcast_arrays(mu, sigma)
    out_size = to_tuple(size) + mu.shape[:-1]
    mu_ = np.array([mu[..., comp] for comp in component.ravel()])
    sigma_ = np.array([sigma[..., comp] for comp in component.ravel()])
    mu_ = np.reshape(mu_, out_size)
    sigma_ = np.reshape(sigma_, out_size)
    x = np.random.normal(mu_, sigma_, size=out_size)

    return x
Exemplo n.º 12
0
    def test_normal_mixture_nd(self, nd, ncomp):
        nd = to_tuple(nd)
        ncomp = int(ncomp)
        comp_shape = nd + (ncomp,)
        test_mus = np.random.randn(*comp_shape)
        test_taus = np.random.gamma(1, 1, size=comp_shape)
        observed = generate_normal_mixture_data(
            w=np.ones(ncomp) / ncomp, mu=test_mus, sigma=1 / np.sqrt(test_taus), size=10
        )

        with Model() as model0:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            mixture0 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)
            obs0 = NormalMixture(
                "obs", w=ws, mu=mus, tau=taus, comp_shape=comp_shape, observed=observed
            )

        with Model() as model1:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            comp_dist = [
                Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd) for i in range(ncomp)
            ]
            mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
            obs1 = Mixture("obs", w=ws, comp_dists=comp_dist, observed=observed)

        with Model() as model2:
            # Test that results are correct without comp_shape being passed to the Mixture.
            # This used to fail in V3
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            obs2 = NormalMixture("obs", w=ws, mu=mus, tau=taus, observed=observed)

        testpoint = model0.compute_initial_point()
        testpoint["mus"] = test_mus
        testpoint["taus_log__"] = np.log(test_taus)
        for logp0, logp1, logp2 in zip(
            model0.compile_logp(vars=[mixture0, obs0], sum=False)(testpoint),
            model1.compile_logp(vars=[mixture1, obs1], sum=False)(testpoint),
            model2.compile_logp(vars=[mixture2, obs2], sum=False)(testpoint),
        ):
            assert_allclose(logp0, logp1)
            assert_allclose(logp0, logp2)
Exemplo n.º 13
0
    def change_size(cls, rv, new_size, expand=False):
        weights = rv.tag.weights
        components = rv.tag.components

        if expand:
            component = rv.tag.components[0]
            # Old size is equal to `shape[:-ndim_supp]`, with care needed for `ndim_supp == 0`
            size_dims = component.ndim - component.owner.op.ndim_supp
            if len(rv.tag.components) == 1:
                # If we have a single component, new size should ignore the mixture axis
                # dimension, as that is not touched by `_resize_components`
                size_dims -= 1
            old_size = components[0].shape[:size_dims]
            new_size = to_tuple(new_size) + tuple(old_size)

        components = cls._resize_components(new_size, *components)

        return cls.rv_op(weights, *components, size=None)
Exemplo n.º 14
0
    def __init__(self, w, comp_dists, mixture_axis=-1, *args, **kwargs):
        self.w = at.as_tensor_variable(w)
        if not isinstance(comp_dists, Distribution):
            raise TypeError(
                "The MixtureSameFamily distribution only accepts Distribution "
                f"instances as its components. Got {type(comp_dists)} instead."
            )
        self.comp_dists = comp_dists
        if mixture_axis < 0:
            mixture_axis = len(comp_dists.shape) + mixture_axis
            if mixture_axis < 0:
                raise ValueError(
                    "`mixture_axis` is supposed to be in shape of components' distribution. "
                    f"Got {mixture_axis + len(comp_dists.shape)} axis instead out of the bounds."
                )
        comp_shape = to_tuple(comp_dists.shape)
        self.shape = comp_shape[:mixture_axis] + comp_shape[mixture_axis + 1:]
        self.mixture_axis = mixture_axis
        kwargs.setdefault("dtype", self.comp_dists.dtype)

        # Compute the mode so we don't always have to pass a initval
        defaults = kwargs.pop("defaults", [])
        event_shape = self.comp_dists.shape[mixture_axis + 1:]
        _w = at.shape_padleft(
            at.shape_padright(w, len(event_shape)),
            len(self.comp_dists.shape) - w.ndim - len(event_shape),
        )
        mode = take_along_axis(
            self.comp_dists.mode,
            at.argmax(_w, keepdims=True),
            axis=mixture_axis,
        )
        self.mode = mode[(..., 0) + (slice(None), ) * len(event_shape)]

        if not all_discrete(comp_dists):
            mean = at.as_tensor_variable(self.comp_dists.mean)
            self.mean = (_w * mean).sum(axis=mixture_axis)
            if "mean" not in defaults:
                defaults.append("mean")
        defaults.append("mode")

        super().__init__(defaults=defaults, *args, **kwargs)
Exemplo n.º 15
0
 def test_get_broadcastable_dist_samples(self, samples_to_broadcast):
     size, samples, broadcast_shape = samples_to_broadcast
     if broadcast_shape is not None:
         size_ = to_tuple(size)
         outs, out_shape = get_broadcastable_dist_samples(
             samples, size=size, return_out_shape=True)
         assert out_shape == broadcast_shape
         for i, o in zip(samples, outs):
             ishape = i.shape
             if ishape[:min([len(size_), len(ishape)])] == size_:
                 expected_shape = (size_ + (1, ) *
                                   (len(broadcast_shape) - len(ishape)) +
                                   ishape[len(size_):])
             else:
                 expected_shape = ishape
             assert o.shape == expected_shape
         assert shapes_broadcasting(*(o.shape
                                      for o in outs)) == broadcast_shape
     else:
         with pytest.raises(ValueError):
             get_broadcastable_dist_samples(samples, size=size)
Exemplo n.º 16
0
 def test_broadcast_dist_samples_shape(self, fixture_sizes, fixture_shapes):
     size = fixture_sizes
     shapes = fixture_shapes
     size_ = to_tuple(size)
     shapes_ = [
         s if s[:min([len(size_), len(s)])] != size_ else s[len(size_):]
         for s in shapes
     ]
     try:
         expected_out = np.broadcast(*(np.empty(s) for s in shapes_)).shape
     except ValueError:
         expected_out = None
     if expected_out is not None and any(
             s[:min([len(size_), len(s)])] == size_ for s in shapes):
         expected_out = size_ + expected_out
     if expected_out is None:
         with pytest.raises(ValueError):
             broadcast_dist_samples_shape(shapes, size=size)
     else:
         out = broadcast_dist_samples_shape(shapes, size=size)
         assert out == expected_out
Exemplo n.º 17
0
 def _random(mu, rng=None, size=None):
     return rng.normal(mu, scale=1, size=to_tuple(size) + mu.shape)
Exemplo n.º 18
0
    def test_normal_mixture_nd(self, nd, ncomp):
        nd = to_tuple(nd)
        ncomp = int(ncomp)
        comp_shape = nd + (ncomp,)
        test_mus = np.random.randn(*comp_shape)
        test_taus = np.random.gamma(1, 1, size=comp_shape)
        observed = generate_normal_mixture_data(
            w=np.ones(ncomp) / ncomp, mu=test_mus, sd=1 / np.sqrt(test_taus), size=10
        )

        with Model() as model0:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            mixture0 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)
            obs0 = NormalMixture(
                "obs", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape, observed=observed
            )

        with Model() as model1:
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            comp_dist = [
                Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd) for i in range(ncomp)
            ]
            mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
            obs1 = Mixture("obs", w=ws, comp_dists=comp_dist, shape=nd, observed=observed)

        with Model() as model2:
            # Expected to fail if comp_shape is not provided,
            # nd is multidim and it does not broadcast with ncomp. If by chance
            # it does broadcast, an error is raised if the mixture is given
            # observed data.
            # Furthermore, the Mixture will also raise errors when the observed
            # data is multidimensional but it does not broadcast well with
            # comp_dists.
            mus = Normal("mus", shape=comp_shape)
            taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
            ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
            if len(nd) > 1:
                if nd[-1] != ncomp:
                    with pytest.raises(ValueError):
                        NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
                    mixture2 = None
                else:
                    mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            else:
                mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
            observed_fails = False
            if len(nd) >= 1 and nd != (1,):
                try:
                    np.broadcast(np.empty(comp_shape), observed)
                except Exception:
                    observed_fails = True
            if observed_fails:
                with pytest.raises(ValueError):
                    NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)
                obs2 = None
            else:
                obs2 = NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)

        testpoint = model0.recompute_initial_point()
        testpoint["mus"] = test_mus
        testpoint["taus"] = test_taus
        assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
        assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
        assert_allclose(obs0.logp(testpoint), obs1.logp(testpoint))
        if mixture2 is not None and obs2 is not None:
            assert_allclose(model0.logp(testpoint), model2.logp(testpoint))
        if mixture2 is not None:
            assert_allclose(mixture0.logp(testpoint), mixture2.logp(testpoint))
        if obs2 is not None:
            assert_allclose(obs0.logp(testpoint), obs2.logp(testpoint))
Exemplo n.º 19
0
    def random(self, point=None, size=None):
        """
        Draw random values from MvGaussianRandomWalk.

        Parameters
        ----------
        point: dict, optional
            Dict of variable values on which random values are to be
            conditioned (uses default point if not specified).
        size: int or tuple of ints, optional
            Desired size of random sample (returns one sample if not
            specified).

        Returns
        -------
        array


        Examples
        -------

        .. code-block:: python

            with pm.Model():
                mu = np.array([1.0, 0.0])
                cov = np.array([[1.0, 0.0],
                                [0.0, 2.0]])

                # draw one sample from a 2-dimensional Gaussian random walk with 10 timesteps
                sample = MvGaussianRandomWalk(mu, cov, shape=(10, 2)).random()

                # draw three samples from a 2-dimensional Gaussian random walk with 10 timesteps
                sample = MvGaussianRandomWalk(mu, cov, shape=(10, 2)).random(size=3)

                # draw four samples from a 2-dimensional Gaussian random walk with 10 timesteps,
                # indexed with a (2, 2) array
                sample = MvGaussianRandomWalk(mu, cov, shape=(10, 2)).random(size=(2, 2))

        """

        # for each draw specified by the size input, we need to draw time_steps many
        # samples from MvNormal.

        size = to_tuple(size)
        multivariate_samples = self.innov.random(point=point, size=size)
        # this has shape (size, self.shape)
        if len(self.shape) == 2:
            # have time dimension in first slot of shape. Therefore the time
            # component can be accessed with the index equal to the length of size.
            time_axis = len(size)
            multivariate_samples = multivariate_samples.cumsum(axis=time_axis)
            if time_axis != 0:
                # this for loop covers the case where size is a tuple
                for idx in np.ndindex(size):
                    multivariate_samples[idx] = (
                        multivariate_samples[idx] - multivariate_samples[idx][0]
                    )
            else:
                # size was passed as None
                multivariate_samples = multivariate_samples - multivariate_samples[0]

        # if the above statement fails, then only a spatial dimension was passed in for self.shape.
        # Therefore don't subtract off the initial value since otherwise you get all zeros
        # as your output.
        return multivariate_samples