def logp(value, distribution, lower, upper): """ Calculate log-probability of Bounded distribution at specified value. Parameters ---------- value: numeric Value for which log-probability is calculated. distribution: TensorVariable Distribution which is being bounded lower: numeric Lower bound for the distribution being bounded. upper: numeric Upper bound for the distribution being bounded. Returns ------- TensorVariable """ res = at.switch( at.or_(at.lt(value, lower), at.gt(value, upper)), -np.inf, logp(distribution, value), ) return check_parameters( res, lower <= upper, msg="lower <= upper", )
def marginal_mixture_logcdf(op, value, rng, weights, *components, **kwargs): # single component if len(components) == 1: # Need to broadcast value across mixture axis mix_axis = -components[0].owner.op.ndim_supp - 1 components_logcdf = logcdf(components[0], at.expand_dims(value, mix_axis)) else: components_logcdf = at.stack( [logcdf(component, value) for component in components], axis=-1, ) mix_logcdf = at.logsumexp(at.log(weights) + components_logcdf, axis=-1) mix_logcdf = check_parameters( mix_logcdf, 0 <= weights, weights <= 1, at.isclose(at.sum(weights, axis=-1), 1), msg="0 <= weights <= 1, sum(weights) == 1", ) return mix_logcdf
def test_check_parameters(conditions, succeeds): ret = check_parameters(1, *conditions, msg="parameter check msg") if succeeds: assert ret.eval() else: with pytest.raises(ParameterValueError, match="^parameter check msg$"): ret.eval()
def marginal_mixture_logcdf(op, value, rng, weights, *components, **kwargs): # single component if len(components) == 1: # Need to broadcast value across mixture axis mix_axis = -components[0].owner.op.ndim_supp - 1 components_logcdf = logcdf(components[0], at.expand_dims(value, mix_axis)) else: components_logcdf = at.stack( [logcdf(component, value) for component in components], axis=-1, ) mix_logcdf = at.logsumexp(at.log(weights) + components_logcdf, axis=-1) # Squeeze stack dimension # There is a Aesara bug in squeeze with negative axis # https://github.com/aesara-devs/aesara/issues/830 # mix_logp = at.squeeze(mix_logp, axis=-1) mix_logcdf = at.squeeze(mix_logcdf, axis=mix_logcdf.ndim - 1) mix_logcdf = check_parameters( mix_logcdf, 0 <= weights, weights <= 1, at.isclose(at.sum(weights, axis=-1), 1), msg="0 <= weights <= 1, sum(weights) == 1", ) return mix_logcdf
def logp(value, n, p): return check_parameters( factln(n) - factln(value).sum() + (value * at.log(p)).sum(), at.all(value >= 0), at.all(0 <= p), at.all(p <= 1), at.isclose(p.sum(), 1), )
def logp(value, n, p): return check_parameters( factln(n) - factln(value).sum() + (value * at.log(p)).sum(), value >= 0, 0 <= p, p <= 1, at.isclose(p.sum(), 1), )
def logp(self, value): """ Calculate log-probability of defined ``MixtureSameFamily`` distribution at specified value. Parameters ---------- value : numeric Value(s) for which log-probability is calculated. If the log probabilities for multiple values are desired the values must be provided in a numpy array or Aesara tensor Returns ------- TensorVariable """ comp_dists = self.comp_dists w = self.w mixture_axis = self.mixture_axis event_shape = comp_dists.shape[mixture_axis + 1 :] # To be able to broadcast the comp_dists.logp with w and value # We first have to pad the shape of w to the right with ones # so that it can broadcast with the event_shape. w = at.shape_padright(w, len(event_shape)) # Second, we have to add the mixture_axis to the value tensor # To insert the mixture axis at the correct location, we use the # negative number index. This way, we can also handle situations # in which, value is an observed value with more batch dimensions # than the ones present in the comp_dists. comp_dists_ndim = len(comp_dists.shape) value = at.shape_padaxis(value, axis=mixture_axis - comp_dists_ndim) comp_logp = comp_dists.logp(value) return check_parameters( logsumexp(at.log(w) + comp_logp, axis=mixture_axis, keepdims=False), w >= 0, w <= 1, at.allclose(w.sum(axis=mixture_axis - comp_dists_ndim), 1), broadcast_conditions=False, )
def test_check_bounds_flag(self): """Test that CheckParameterValue Ops are replaced or removed when using compile_pymc""" logp = at.ones(3) cond = np.array([1, 0, 1]) bound = check_parameters(logp, cond) with pm.Model() as m: pass with pytest.raises(ParameterValueError): aesara.function([], bound)() m.check_bounds = False with m: assert np.all(compile_pymc([], bound)() == 1) m.check_bounds = True with m: assert np.all(compile_pymc([], bound)() == -np.inf)
def logp( value: at.Variable, mu: at.Variable, sigma: at.Variable, init: at.Variable, steps: at.Variable, ) -> at.TensorVariable: """Calculate log-probability of Gaussian Random Walk distribution at specified value.""" # Calculate initialization logp init_logp = logp(init, value[..., 0]) # Make time series stationary around the mean value stationary_series = value[..., 1:] - value[..., :-1] # Add one dimension to the right, so that mu and sigma broadcast safely along # the steps dimension series_logp = logp(Normal.dist(mu[..., None], sigma[..., None]), stationary_series) return check_parameters( init_logp + series_logp.sum(axis=-1), steps > 0, msg="steps > 0", )
def logp(self, value): """ Calculate log-probability of defined Mixture distribution at specified value. Parameters ---------- value: numeric Value(s) for which log-probability is calculated. If the log probabilities for multiple values are desired the values must be provided in a numpy array or Aesara tensor Returns ------- TensorVariable """ w = self.w return check_parameters( logsumexp(at.log(w) + self._comp_logp(value), axis=-1, keepdims=False), w >= 0, w <= 1, at.allclose(w.sum(axis=-1), 1), broadcast_conditions=False, )
def test_check_parameters_shape(): conditions = [True, at.ones(10), at.ones(5)] assert check_parameters(1, *conditions).eval().shape == ()