def marginal_mixture_logcdf(op, value, rng, weights, *components, **kwargs): # single component if len(components) == 1: # Need to broadcast value across mixture axis mix_axis = -components[0].owner.op.ndim_supp - 1 components_logcdf = logcdf(components[0], at.expand_dims(value, mix_axis)) else: components_logcdf = at.stack( [logcdf(component, value) for component in components], axis=-1, ) mix_logcdf = at.logsumexp(at.log(weights) + components_logcdf, axis=-1) mix_logcdf = check_parameters( mix_logcdf, 0 <= weights, weights <= 1, at.isclose(at.sum(weights, axis=-1), 1), msg="0 <= weights <= 1, sum(weights) == 1", ) return mix_logcdf
def marginal_mixture_logcdf(op, value, rng, weights, *components, **kwargs): # single component if len(components) == 1: # Need to broadcast value across mixture axis mix_axis = -components[0].owner.op.ndim_supp - 1 components_logcdf = logcdf(components[0], at.expand_dims(value, mix_axis)) else: components_logcdf = at.stack( [logcdf(component, value) for component in components], axis=-1, ) mix_logcdf = at.logsumexp(at.log(weights) + components_logcdf, axis=-1) # Squeeze stack dimension # There is a Aesara bug in squeeze with negative axis # https://github.com/aesara-devs/aesara/issues/830 # mix_logp = at.squeeze(mix_logp, axis=-1) mix_logcdf = at.squeeze(mix_logcdf, axis=mix_logcdf.ndim - 1) mix_logcdf = check_parameters( mix_logcdf, 0 <= weights, weights <= 1, at.isclose(at.sum(weights, axis=-1), 1), msg="0 <= weights <= 1, sum(weights) == 1", ) return mix_logcdf
def test_logcdf_helper(): value = at.vector("value") x = Normal.dist(0, 1) x_logcdf = logcdf(x, value) np.testing.assert_almost_equal(x_logcdf.eval({value: [0, 1]}), sp.norm(0, 1).logcdf([0, 1])) x_logcdf = logcdf(x, [0, 1]) np.testing.assert_almost_equal(x_logcdf.eval(), sp.norm(0, 1).logcdf([0, 1]))
def test_logcdf_helper(): value = at.vector("value") x = Normal.dist(0, 1, size=2) x_logp = logcdf(x, value, sum=False) np.testing.assert_almost_equal(x_logp.eval({value: [0, 1]}), sp.norm(0, 1).logcdf([0, 1])) x_logp = logcdf(x, [0, 1], sum=False) np.testing.assert_almost_equal(x_logp.eval(), sp.norm(0, 1).logcdf([0, 1]))
def test_logcdf_transformed_argument(): with Model() as m: sigma = HalfFlat("sigma") x = Normal("x", 0, sigma) Potential("norm_term", -logcdf(x, 1.0)) sigma_value_log = -1.0 sigma_value = np.exp(sigma_value_log) x_value = 0.5 observed = m.logp_nojac({"sigma_log__": sigma_value_log, "x": x_value}) expected = logp(TruncatedNormal.dist(0, sigma_value, lower=None, upper=1.0), x_value).eval() assert np.isclose(observed, expected)