Beispiel #1
0
def test_logp_helper_exceptions():
    with pytest.raises(TypeError, match="When RV is not a pure distribution"):
        logp(at.exp(Normal.dist()), [1, 2])

    with pytest.raises(NotImplementedError,
                       match="PyMC could not infer logp of input variable"):
        logp(at.cos(Normal.dist()), 1)
Beispiel #2
0
 def backward(self, y):
     return aet.arctan2(aet.sin(y), aet.cos(y))
Beispiel #3
0
 def full(self, X, Xs=None):
     X, Xs = self._slice(X, Xs)
     return at.cos(2.0 * np.pi * self.euclidean_dist(X, Xs))
Beispiel #4
0
    def normal(
        self,
        size,
        avg=0.0,
        std=1.0,
        ndim=None,
        dtype=None,
        nstreams=None,
        truncate=False,
        **kwargs,
    ):
        """
        Sample a tensor of values from a normal distribution.

        Parameters
        ----------
        size : int_vector_like
            Array dimensions for the output tensor.
        avg : float_like, optional
            The mean value for the truncated normal to sample from (defaults to 0.0).
        std : float_like, optional
            The standard deviation for the truncated normal to sample from (defaults to 1.0).
        truncate : bool, optional
            Truncates the normal distribution at 2 standard deviations if True (defaults to False).
            When this flag is set, the standard deviation of the result will be less than the one specified.
        ndim : int, optional
            The number of dimensions for the output tensor (defaults to None).
            This argument is necessary if the size argument is ambiguous on the number of dimensions.
        dtype : str, optional
            The data-type for the output tensor. If not specified,
            the dtype is inferred from avg and std, but it is at least as precise as floatX.
        kwargs
            Other keyword arguments for random number generation (see uniform).

        Returns
        -------
        samples : TensorVariable
            A Aesara tensor of samples randomly drawn from a normal distribution.

        """
        size = _check_size(size)
        avg = undefined_grad(as_tensor_variable(avg))
        std = undefined_grad(as_tensor_variable(std))

        if dtype is None:
            dtype = scal.upcast(config.floatX, avg.dtype, std.dtype)

        avg = tensor.cast(avg, dtype=dtype)
        std = tensor.cast(std, dtype=dtype)

        # generate even number of uniform samples
        # Do manual constant folding to lower optiimizer work.
        if isinstance(size, aesara.Constant):
            n_odd_samples = size.prod(dtype="int64")
        else:
            n_odd_samples = tensor.prod(size, dtype="int64")
        n_even_samples = n_odd_samples + n_odd_samples % 2
        uniform = self.uniform(
            (n_even_samples, ),
            low=0.0,
            high=1.0,
            ndim=1,
            dtype=dtype,
            nstreams=nstreams,
            **kwargs,
        )

        # box-muller transform
        u1 = uniform[:n_even_samples // 2]
        u2 = uniform[n_even_samples // 2:]
        r = tensor.sqrt(-2.0 * tensor.log(u1))
        theta = np.array(2.0 * np.pi, dtype=dtype) * u2
        cos_theta, sin_theta = tensor.cos(theta), tensor.sin(theta)
        z0 = r * cos_theta
        z1 = r * sin_theta

        if truncate:
            # use valid samples
            to_fix0 = (z0 < -2.0) | (z0 > 2.0)
            to_fix1 = (z1 < -2.0) | (z1 > 2.0)
            z0_valid = z0[tensor.nonzero(~to_fix0)]
            z1_valid = z1[tensor.nonzero(~to_fix1)]

            # re-sample invalid samples
            to_fix0 = tensor.nonzero(to_fix0)[0]
            to_fix1 = tensor.nonzero(to_fix1)[0]
            n_fix_samples = to_fix0.size + to_fix1.size
            lower = tensor.constant(1.0 / np.e**2, dtype=dtype)
            u_fix = self.uniform(
                (n_fix_samples, ),
                low=lower,
                high=1.0,
                ndim=1,
                dtype=dtype,
                nstreams=nstreams,
                **kwargs,
            )
            r_fix = tensor.sqrt(-2.0 * tensor.log(u_fix))
            z0_fixed = r_fix[:to_fix0.size] * cos_theta[to_fix0]
            z1_fixed = r_fix[to_fix0.size:] * sin_theta[to_fix1]

            # pack everything together to a useful result
            norm_samples = tensor.join(0, z0_valid, z0_fixed, z1_valid,
                                       z1_fixed)
        else:
            norm_samples = tensor.join(0, z0, z1)
        if isinstance(n_odd_samples, aesara.Variable):
            samples = norm_samples[:n_odd_samples]
        elif n_odd_samples % 2 == 1:
            samples = norm_samples[:-1]
        else:
            samples = norm_samples
        samples = tensor.reshape(samples, newshape=size, ndim=ndim)
        samples *= std
        samples += avg

        return samples
Beispiel #5
0
 def backward(self, rv_var, rv_value):
     return at.arctan2(at.sin(rv_value), at.cos(rv_value))