def test_beta_density(batch_shape, eager):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

    @funsor.function(reals(), reals(), reals(), reals())
    def beta(concentration1, concentration0, value):
        return backend_dist.Beta(concentration1,
                                 concentration0).log_prob(value)

    check_funsor(beta, {
        'concentration1': reals(),
        'concentration0': reals(),
        'value': reals()
    }, reals())

    concentration1 = Tensor(ops.exp(randn(batch_shape)), inputs)
    concentration0 = Tensor(ops.exp(randn(batch_shape)), inputs)
    value = Tensor(rand(batch_shape), inputs)
    expected = beta(concentration1, concentration0, value)
    check_funsor(expected, inputs, reals())

    d = Variable('value', reals())
    actual = dist.Beta(concentration1, concentration0, value) if eager else \
        dist.Beta(concentration1, concentration0, d)(value=value)
    check_funsor(actual, inputs, reals())
    assert_close(actual, expected)
Exemple #2
0
    def diff_fn(p_data):
        p = Tensor(p_data, be_inputs)
        q = p.sample(sampled_vars, sample_inputs, rng_key=rng_key)
        mq = p.materialize(q).reduce(ops.logaddexp, 'n')
        mq = mq.align(tuple(p.inputs))

        _, (p_data, mq_data) = align_tensors(p, mq)
        assert p_data.shape == mq_data.shape
        return (ops.exp(mq_data) * probe).sum() - (ops.exp(p_data) *
                                                   probe).sum(), mq
def test_beta_sample(with_lazy, batch_shape, sample_inputs, reparametrized):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    concentration1 = ops.exp(randn(batch_shape))
    concentration0 = ops.exp(randn(batch_shape))
    funsor_dist_class = (dist.Beta if reparametrized else dist.NonreparameterizedBeta)
    params = (concentration1, concentration0)

    _check_sample(funsor_dist_class, params, sample_inputs, inputs, atol=1e-2 if reparametrized else 1e-1,
                  statistic="variance", num_samples=100000, with_lazy=with_lazy)
def test_gamma_gamma_conjugate(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))
    full_shape = batch_shape
    prior = Variable("prior", Real)
    concentration0 = Tensor(ops.exp(randn(full_shape)), inputs)
    rate0 = Tensor(ops.exp(randn(full_shape)), inputs)
    concentration = Tensor(ops.exp(randn(full_shape)), inputs)
    latent = dist.Gamma(concentration0, rate0, value=prior)
    conditional = dist.Gamma(concentration, rate=prior)

    obs = Tensor(ops.exp(randn(full_shape)), inputs)
    _assert_conjugate_density_ok(latent, conditional, obs, prec=0.02)
def test_delta_density(batch_shape, event_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

    @funsor.function(reals(*event_shape), reals(), reals(*event_shape),
                     reals())
    def delta(v, log_density, value):
        eq = (v == value)
        for _ in range(len(event_shape)):
            eq = ops.all(eq, -1)
        return ops.log(ops.astype(eq, 'float32')) + log_density

    check_funsor(
        delta, {
            'v': reals(*event_shape),
            'log_density': reals(),
            'value': reals(*event_shape)
        }, reals())

    v = Tensor(randn(batch_shape + event_shape), inputs)
    log_density = Tensor(ops.exp(randn(batch_shape)), inputs)
    for value in [v, Tensor(randn(batch_shape + event_shape), inputs)]:
        expected = delta(v, log_density, value)
        check_funsor(expected, inputs, reals())

        actual = dist.Delta(v, log_density, value)
        check_funsor(actual, inputs, reals())
        assert_close(actual, expected)
def test_dirichlet_multinomial_density(batch_shape, event_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))
    max_count = 10

    @funsor.function
    def dirichlet_multinomial(concentration: Reals[event_shape], total_count: Real,
                              value: Reals[event_shape]) -> Real:
        return backend_dist.DirichletMultinomial(concentration, total_count).log_prob(value)

    check_funsor(dirichlet_multinomial, {'concentration': Reals[event_shape],
                                         'total_count': Real,
                                         'value': Reals[event_shape]},
                 Real)

    concentration = Tensor(ops.exp(randn(batch_shape + event_shape)), inputs)
    value_data = ops.astype(randint(0, max_count, size=batch_shape + event_shape), 'float32')
    total_count_data = value_data.sum(-1) + ops.astype(randint(0, max_count, size=batch_shape), 'float32')
    value = Tensor(value_data, inputs)
    total_count = Tensor(total_count_data, inputs)
    expected = dirichlet_multinomial(concentration, total_count, value)
    check_funsor(expected, inputs, Real)
    actual = dist.DirichletMultinomial(concentration, total_count, value)
    check_funsor(actual, inputs, Real)
    assert_close(actual, expected)
def test_gamma_poisson_conjugate(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))
    full_shape = batch_shape
    prior = Variable("prior", Real)
    concentration = Tensor(ops.exp(randn(full_shape)), inputs)
    rate = Tensor(ops.exp(randn(full_shape)), inputs)
    latent = dist.Gamma(concentration, rate, value=prior)
    conditional = dist.Poisson(rate=prior)
    reduced = (latent + conditional).reduce(ops.logaddexp, set(["prior"]))
    assert isinstance(reduced, dist.GammaPoisson)
    assert_close(reduced.concentration, concentration)
    assert_close(reduced.rate, rate)

    obs = Tensor(ops.astype(ops.astype(ops.exp(randn(batch_shape)), 'int32'), 'float32'), inputs)
    _assert_conjugate_density_ok(latent, conditional, obs)
def test_dirichlet_multinomial_conjugate(batch_shape, size):
    max_count = 10
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))
    full_shape = batch_shape + (size,)
    prior = Variable("prior", Reals[size])
    concentration = Tensor(ops.exp(randn(full_shape)), inputs)
    value_data = ops.astype(randint(0, max_count, size=full_shape), 'float32')
    obs = Tensor(value_data, inputs)
    total_count_data = value_data.sum(-1)
    total_count = Tensor(total_count_data, inputs)
    latent = dist.Dirichlet(concentration, value=prior)
    conditional = dist.Multinomial(probs=prior, total_count=total_count)
    p = latent + conditional
    marginalized = p.reduce(ops.logaddexp, set(["value"]))
    assert isinstance(marginalized, dist.Dirichlet)
    reduced = p.reduce(ops.logaddexp, set(["prior"]))
    assert isinstance(reduced, dist.DirichletMultinomial)
    assert_close(reduced.concentration, concentration)
    assert_close(reduced.total_count, total_count)
    result = (p - reduced)(value=obs)
    assert isinstance(result, dist.Dirichlet)
    assert_close(result.concentration, concentration + obs)

    _assert_conjugate_density_ok(latent, conditional, obs)
Exemple #9
0
def eager_integrate(log_measure, integrand, reduced_vars):
    real_vars = frozenset(k for k in reduced_vars if log_measure.inputs[k].dtype == 'real')
    if real_vars:

        lhs_reals = frozenset(k for k, d in log_measure.inputs.items() if d.dtype == 'real')
        rhs_reals = frozenset(k for k, d in integrand.inputs.items() if d.dtype == 'real')
        if lhs_reals == real_vars and rhs_reals <= real_vars:
            inputs = OrderedDict((k, d) for t in (log_measure, integrand)
                                 for k, d in t.inputs.items())
            lhs_info_vec, lhs_precision = align_gaussian(inputs, log_measure)
            rhs_info_vec, rhs_precision = align_gaussian(inputs, integrand)
            lhs = Gaussian(lhs_info_vec, lhs_precision, inputs)

            # Compute the expectation of a non-normalized quadratic form.
            # See "The Matrix Cookbook" (November 15, 2012) ss. 8.2.2 eq. 380.
            # http://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf
            norm = ops.exp(lhs.log_normalizer.data)
            lhs_cov = ops.cholesky_inverse(lhs._precision_chol)
            lhs_loc = ops.cholesky_solve(ops.unsqueeze(lhs.info_vec, -1), lhs._precision_chol).squeeze(-1)
            vmv_term = _vv(lhs_loc, rhs_info_vec - 0.5 * _mv(rhs_precision, lhs_loc))
            data = norm * (vmv_term - 0.5 * _trace_mm(rhs_precision, lhs_cov))
            inputs = OrderedDict((k, d) for k, d in inputs.items() if k not in reduced_vars)
            result = Tensor(data, inputs)
            return result.reduce(ops.add, reduced_vars - real_vars)

        raise NotImplementedError('TODO implement partial integration')

    return None  # defer to default implementation
Exemple #10
0
def test_gamma_probs_density(batch_shape, syntax):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

    @funsor.function(reals(), reals(), reals(), reals())
    def gamma(concentration, rate, value):
        return backend_dist.Gamma(concentration, rate).log_prob(value)

    check_funsor(gamma, {
        'concentration': reals(),
        'rate': reals(),
        'value': reals()
    }, reals())

    concentration = Tensor(rand(batch_shape), inputs)
    rate = Tensor(rand(batch_shape), inputs)
    value = Tensor(ops.exp(randn(batch_shape)), inputs)
    expected = gamma(concentration, rate, value)
    check_funsor(expected, inputs, reals())

    d = Variable('value', reals())
    if syntax == 'eager':
        actual = dist.Gamma(concentration, rate, value)
    elif syntax == 'lazy':
        actual = dist.Gamma(concentration, rate, d)(value=value)
    check_funsor(actual, inputs, reals())
    assert_close(actual, expected)
def test_dirichlet_sample(batch_shape, sample_inputs, event_shape, reparametrized):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    concentration = ops.exp(randn(batch_shape + event_shape))
    funsor_dist_class = (dist.Dirichlet if reparametrized else dist.NonreparameterizedDirichlet)
    params = (concentration,)

    _check_sample(funsor_dist_class, params, sample_inputs, inputs, atol=1e-2 if reparametrized else 1e-1)
def test_lognormal_density(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    @funsor.function
    def log_normal(loc: Real, scale: Real, value: Real) -> Real:
        return backend_dist.LogNormal(loc, scale).log_prob(value)

    check_funsor(log_normal, {'loc': Real, 'scale': Real, 'value': Real}, Real)

    loc = Tensor(randn(batch_shape), inputs)
    scale = Tensor(ops.exp(randn(batch_shape)), inputs)
    value = Tensor(ops.exp(randn(batch_shape)), inputs)
    expected = log_normal(loc, scale, value)
    check_funsor(expected, inputs, Real)

    actual = dist.LogNormal(loc, scale, value)
    check_funsor(actual, inputs, Real)
    assert_close(actual, expected)
Exemple #13
0
def eager_integrate(log_measure, integrand, reduced_vars):
    real_vars = frozenset(k for k in reduced_vars if log_measure.inputs[k].dtype == 'real')
    if real_vars == frozenset([integrand.name]):
        loc = ops.cholesky_solve(ops.unsqueeze(log_measure.info_vec, -1), log_measure._precision_chol).squeeze(-1)
        data = loc * ops.unsqueeze(ops.exp(log_measure.log_normalizer.data), -1)
        data = data.reshape(loc.shape[:-1] + integrand.output.shape)
        inputs = OrderedDict((k, d) for k, d in log_measure.inputs.items() if d.dtype != 'real')
        result = Tensor(data, inputs)
        return result.reduce(ops.add, reduced_vars - real_vars)
    return None  # defer to default implementation
def test_normal_independent():
    loc = random_tensor(OrderedDict(), Reals[2])
    scale = ops.exp(random_tensor(OrderedDict(), Reals[2]))
    fn = dist.Normal(loc['i'], scale['i'], value='z_i')
    assert fn.inputs['z_i'] == Real
    d = Independent(fn, 'z', 'i', 'z_i')
    assert d.inputs['z'] == Reals[2]
    rng_key = None if get_backend() == "torch" else np.array([0, 0], dtype=np.uint32)
    sample = d.sample(frozenset(['z']), rng_key=rng_key)
    assert isinstance(sample, Contraction)
    assert sample.inputs['z'] == Reals[2]
def test_beta_bernoulli_conjugate(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))
    full_shape = batch_shape
    prior = Variable("prior", Real)
    concentration1 = Tensor(ops.exp(randn(full_shape)), inputs)
    concentration0 = Tensor(ops.exp(randn(full_shape)), inputs)
    latent = dist.Beta(concentration1, concentration0, value=prior)
    conditional = dist.Bernoulli(probs=prior)
    reduced = (latent + conditional).reduce(ops.logaddexp, set(["prior"]))
    assert isinstance(reduced, dist.DirichletMultinomial)
    concentration = stack((concentration0, concentration1), dim=-1)
    assert_close(reduced.concentration, concentration)
    assert_close(reduced.total_count, Tensor(numeric_array(1.)))

    # we need lazy expression for Beta to draw samples from it
    with interpretation(funsor.terms.lazy):
        lazy_latent = dist.Beta(concentration1, concentration0, value=prior)
    obs = Tensor(rand(batch_shape).round(), inputs)
    _assert_conjugate_density_ok(latent, conditional, obs, lazy_latent=lazy_latent)
def test_normal_gaussian_3(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    loc = Tensor(randn(batch_shape), inputs)
    scale = Tensor(ops.exp(randn(batch_shape)), inputs)
    value = Tensor(randn(batch_shape), inputs)

    expected = dist.Normal(loc, scale, value)
    assert isinstance(expected, Tensor)
    check_funsor(expected, inputs, Real)

    g = dist.Normal(Variable('loc', Real), scale, 'value')
    assert isinstance(g, Contraction)
    actual = g(loc=loc, value=value)
    check_funsor(actual, inputs, Real)

    assert_close(actual, expected, atol=1e-4)
def test_normal_density(batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    @funsor.symbolic
    def normal(loc: Real, scale: Real, value: Real):
        return -((value - loc) ** 2) / (2 * scale ** 2) - scale.log() - math.log(math.sqrt(2 * math.pi))

    check_funsor(normal, {'loc': Real, 'scale': Real, 'value': Real}, Real)

    loc = Tensor(randn(batch_shape), inputs)
    scale = Tensor(ops.exp(randn(batch_shape)), inputs)
    value = Tensor(randn(batch_shape), inputs)
    expected = normal(loc, scale, value)
    check_funsor(expected, inputs, Real)

    actual = dist.Normal(loc, scale, value)
    check_funsor(actual, inputs, Real)
    assert_close(actual, expected)
def test_dirichlet_categorical_conjugate(batch_shape, size):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    full_shape = batch_shape + (size,)
    prior = Variable("prior", Reals[size])
    concentration = Tensor(ops.exp(randn(full_shape)), inputs)
    value = random_tensor(inputs, Bint[size])
    latent = dist.Dirichlet(concentration, value=prior)
    conditional = dist.Categorical(probs=prior)
    reduced = (latent + conditional).reduce(ops.logaddexp, set(["prior"]))
    assert isinstance(reduced, Tensor)
    actual = reduced(value=value)
    expected = dist.DirichletMultinomial(concentration=concentration, total_count=1)(
        value=Tensor(ops.new_eye(concentration.data, (size,)))[value])
    # TODO: investigate why jax backend gives inconsistent results on Travis
    assert_close(actual, expected, rtol=1e-5 if get_backend() == "jax" else 1e-6)

    obs = random_tensor(inputs, Bint[size])
    _assert_conjugate_density_ok(latent, conditional, obs)
def test_dirichlet_multinomial_conjugate_plate(batch_shape, size):
    max_count = 10
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))
    full_shape = batch_shape + (size,)
    prior = Variable("prior", Reals[size])
    concentration = Tensor(ops.exp(randn(full_shape)), inputs)
    value_data = ops.astype(randint(0, max_count, size=batch_shape + (7, size)), 'float32')
    obs_inputs = inputs.copy()
    obs_inputs['plate'] = Bint[7]
    obs = Tensor(value_data, obs_inputs)
    total_count_data = value_data.sum(-1)
    total_count = Tensor(total_count_data, obs_inputs)
    latent = dist.Dirichlet(concentration, value=prior)
    conditional = dist.Multinomial(probs=prior, total_count=total_count, value=obs)
    p = latent + conditional.reduce(ops.add, 'plate')
    reduced = p.reduce(ops.logaddexp, 'prior')
    assert isinstance(reduced, Tensor)

    _assert_conjugate_density_ok(latent, conditional, obs)
def test_categorical_density(size, batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    @funsor.symbolic
    def categorical(probs: Reals[size], value: Bint[size]):
        return probs[value].log()

    check_funsor(categorical, {'probs': Reals[size], 'value': Bint[size]}, Real)

    probs_data = ops.exp(randn(batch_shape + (size,)))
    probs_data /= probs_data.sum(-1)[..., None]
    probs = Tensor(probs_data, inputs)
    value = random_tensor(inputs, Bint[size])
    expected = categorical(probs, value)
    check_funsor(expected, inputs, Real)

    actual = dist.Categorical(probs, value)
    check_funsor(actual, inputs, Real)
    assert_close(actual, expected)
def test_dirichlet_density(batch_shape, event_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    @funsor.function
    def dirichlet(concentration: Reals[event_shape],
                  value: Reals[event_shape]) -> Real:
        return backend_dist.Dirichlet(concentration).log_prob(value)

    check_funsor(dirichlet, {'concentration': Reals[event_shape], 'value': Reals[event_shape]}, Real)

    concentration = Tensor(ops.exp(randn(batch_shape + event_shape)), inputs)
    value_data = rand(batch_shape + event_shape)
    value_data = value_data / value_data.sum(-1)[..., None]
    value = Tensor(value_data, inputs)
    expected = dirichlet(concentration, value)
    check_funsor(expected, inputs, Real)
    actual = dist.Dirichlet(concentration, value)
    check_funsor(actual, inputs, Real)
    assert_close(actual, expected)
Exemple #22
0
def einsum(equation, *operands):
    """
    Log-sum-exp implementation of einsum.
    """
    if get_backend() != "jax":
        # NB: rename symbols to support NumPy, which allow only symbols a-z.
        symbols = sorted(set(equation) - set(',->'))
        rename = dict(zip(symbols, 'abcdefghijklmnopqrstuvwxyz'))
        equation = ''.join(rename.get(s, s) for s in equation)

    inputs, output = equation.split('->')
    if inputs == output:
        return operands[0][...]  # create a new object
    inputs = inputs.split(',')

    shifts = []
    exp_operands = []
    for dims, operand in zip(inputs, operands):
        shift = operand
        for i, dim in enumerate(dims):
            if dim not in output:
                shift = ops.amax(shift, i, keepdims=True)
        # avoid nan due to -inf - -inf
        shift = ops.clamp(shift, ops.finfo(shift).min, None)
        exp_operands.append(ops.exp(operand - shift))

        # permute shift to match output
        shift = shift.reshape(
            [size for size, dim in zip(operand.shape, dims) if dim in output])
        if len(shift.shape) > 0:
            shift = shift.reshape((1, ) * (len(output) - shift.ndim) +
                                  shift.shape)
            dims = [dim for dim in dims if dim in output]
            dims = [dim for dim in output if dim not in dims] + dims
            shift = ops.permute(shift, [dims.index(dim) for dim in output])
        shifts.append(shift)

    result = ops.log(ops.einsum(equation, *exp_operands))
    return sum(shifts + [result])
def test_poisson_probs_density(batch_shape, syntax):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape))

    @funsor.function
    def poisson(rate: Real, value: Real) -> Real:
        return backend_dist.Poisson(rate).log_prob(value)

    check_funsor(poisson, {'rate': Real, 'value': Real}, Real)

    rate = Tensor(rand(batch_shape), inputs)
    value = Tensor(ops.astype(ops.astype(ops.exp(randn(batch_shape)), 'int32'), 'float32'), inputs)
    expected = poisson(rate, value)
    check_funsor(expected, inputs, Real)

    d = Variable('value', Real)
    if syntax == 'eager':
        actual = dist.Poisson(rate, value)
    elif syntax == 'lazy':
        actual = dist.Poisson(rate, d)(value=value)
    check_funsor(actual, inputs, Real)
    assert_close(actual, expected)
Exemple #24
0
def test_categorical_density(size, batch_shape):
    batch_dims = ('i', 'j', 'k')[:len(batch_shape)]
    inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape))

    @funsor.of_shape(reals(size), bint(size))
    def categorical(probs, value):
        return probs[value].log()

    check_funsor(categorical, {
        'probs': reals(size),
        'value': bint(size)
    }, reals())

    probs_data = ops.exp(randn(batch_shape + (size, )))
    probs_data /= probs_data.sum(-1)[..., None]
    probs = Tensor(probs_data, inputs)
    value = random_tensor(inputs, bint(size))
    expected = categorical(probs, value)
    check_funsor(expected, inputs, reals())

    actual = dist.Categorical(probs, value)
    check_funsor(actual, inputs, reals())
    assert_close(actual, expected)
Exemple #25
0
def test_transform_exp(shape):
    point = Tensor(ops.abs(randn(shape)))
    x = Variable('x', reals(*shape))
    actual = Delta('y', point)(y=ops.exp(x))
    expected = Delta('x', point.log(), point.log().sum())
    assert_close(actual, expected)
Exemple #26
0
def exptransform_to_funsor(tfm,
                           output=None,
                           dim_to_name=None,
                           real_inputs=None):
    name = next(real_inputs.keys()) if real_inputs else "value"
    return ops.exp(Variable(name, output))