def test_gamma_probs_density(batch_shape, syntax): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape)) @funsor.function(reals(), reals(), reals(), reals()) def gamma(concentration, rate, value): return backend_dist.Gamma(concentration, rate).log_prob(value) check_funsor(gamma, { 'concentration': reals(), 'rate': reals(), 'value': reals() }, reals()) concentration = Tensor(rand(batch_shape), inputs) rate = Tensor(rand(batch_shape), inputs) value = Tensor(ops.exp(randn(batch_shape)), inputs) expected = gamma(concentration, rate, value) check_funsor(expected, inputs, reals()) d = Variable('value', reals()) if syntax == 'eager': actual = dist.Gamma(concentration, rate, value) elif syntax == 'lazy': actual = dist.Gamma(concentration, rate, d)(value=value) check_funsor(actual, inputs, reals()) assert_close(actual, expected)
def test_von_mises_probs_density(batch_shape, syntax): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape)) @funsor.function(reals(), reals(), reals(), reals()) def von_mises(loc, concentration, value): return backend_dist.VonMises(loc, concentration).log_prob(value) check_funsor(von_mises, { 'concentration': reals(), 'loc': reals(), 'value': reals() }, reals()) concentration = Tensor(rand(batch_shape), inputs) loc = Tensor(rand(batch_shape), inputs) value = Tensor(ops.abs(randn(batch_shape)), inputs) expected = von_mises(loc, concentration, value) check_funsor(expected, inputs, reals()) d = Variable('value', reals()) if syntax == 'eager': actual = dist.VonMises(loc, concentration, value) elif syntax == 'lazy': actual = dist.VonMises(loc, concentration, d)(value=value) check_funsor(actual, inputs, reals()) assert_close(actual, expected)
def test_gamma_sample(batch_shape, sample_inputs, reparametrized): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) concentration = rand(batch_shape) rate = rand(batch_shape) funsor_dist_class = (dist.Gamma if reparametrized else dist.NonreparameterizedGamma) params = (concentration, rate) _check_sample(funsor_dist_class, params, sample_inputs, inputs, num_samples=200000, atol=5e-2 if reparametrized else 1e-1)
def test_all_equal(shape): inputs = OrderedDict() data1 = rand(shape) + 0.5 data2 = rand(shape) + 0.5 dtype = 'real' x1 = Tensor(data1, inputs, dtype=dtype) x2 = Tensor(data2, inputs, dtype=dtype) assert (x1 == x1).all() assert (x2 == x2).all() assert not (x1 == x2).all() assert not (x1 != x1).any() assert not (x2 != x2).any() assert (x1 != x2).any()
def test_binomial_density(batch_shape, eager): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape)) max_count = 10 @funsor.function(reals(), reals(), reals(), reals()) def binomial(total_count, probs, value): return backend_dist.Binomial(total_count, probs).log_prob(value) check_funsor(binomial, { 'total_count': reals(), 'probs': reals(), 'value': reals() }, reals()) value_data = ops.astype( random_tensor(inputs, bint(max_count)).data, 'float') total_count_data = value_data + ops.astype( random_tensor(inputs, bint(max_count)).data, 'float') value = Tensor(value_data, inputs) total_count = Tensor(total_count_data, inputs) probs = Tensor(rand(batch_shape), inputs) expected = binomial(total_count, probs, value) check_funsor(expected, inputs, reals()) m = Variable('value', reals()) actual = dist.Binomial(total_count, probs, value) if eager else \ dist.Binomial(total_count, probs, m)(value=value) check_funsor(actual, inputs, reals()) assert_close(actual, expected, rtol=1e-5)
def test_reduce_subset(dims, reduced_vars, op): reduced_vars = frozenset(reduced_vars) sizes = {'a': 3, 'b': 4, 'c': 5} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, bint(sizes[d])) for d in dims) data = rand(shape) + 0.5 dtype = 'real' if op in [ops.and_, ops.or_]: data = ops.astype(data, 'uint8') dtype = 2 x = Tensor(data, inputs, dtype) actual = x.reduce(op, reduced_vars) expected_inputs = OrderedDict( (d, bint(sizes[d])) for d in dims if d not in reduced_vars) reduced_vars &= frozenset(dims) if not reduced_vars: assert actual is x else: if reduced_vars == frozenset(dims): data = REDUCE_OP_TO_NUMERIC[op](data, None) else: for pos in reversed(sorted(map(dims.index, reduced_vars))): data = REDUCE_OP_TO_NUMERIC[op](data, pos) check_funsor(actual, expected_inputs, Domain((), dtype)) assert_close(actual, Tensor(data, expected_inputs, dtype), atol=1e-5, rtol=1e-5)
def test_beta_density(batch_shape, eager): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape)) @funsor.function(reals(), reals(), reals(), reals()) def beta(concentration1, concentration0, value): return backend_dist.Beta(concentration1, concentration0).log_prob(value) check_funsor(beta, { 'concentration1': reals(), 'concentration0': reals(), 'value': reals() }, reals()) concentration1 = Tensor(ops.exp(randn(batch_shape)), inputs) concentration0 = Tensor(ops.exp(randn(batch_shape)), inputs) value = Tensor(rand(batch_shape), inputs) expected = beta(concentration1, concentration0, value) check_funsor(expected, inputs, reals()) d = Variable('value', reals()) actual = dist.Beta(concentration1, concentration0, value) if eager else \ dist.Beta(concentration1, concentration0, d)(value=value) check_funsor(actual, inputs, reals()) assert_close(actual, expected)
def test_multinomial_density(batch_shape, event_shape): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) max_count = 10 @funsor.function def multinomial(total_count: Real, probs: Reals[event_shape], value: Reals[event_shape]) -> Real: if get_backend() == "torch": total_count = total_count.max().item() return backend_dist.Multinomial(total_count, probs).log_prob(value) check_funsor(multinomial, {'total_count': Real, 'probs': Reals[event_shape], 'value': Reals[event_shape]}, Real) probs_data = rand(batch_shape + event_shape) probs_data = probs_data / probs_data.sum(-1)[..., None] probs = Tensor(probs_data, inputs) value_data = ops.astype(randint(0, max_count, size=batch_shape + event_shape), 'float') total_count_data = value_data.sum(-1) value = Tensor(value_data, inputs) total_count = Tensor(total_count_data, inputs) expected = multinomial(total_count, probs, value) check_funsor(expected, inputs, Real) actual = dist.Multinomial(total_count, probs, value) check_funsor(actual, inputs, Real) assert_close(actual, expected)
def test_bernoulliprobs_sample(batch_shape, sample_inputs): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) probs = rand(batch_shape) funsor_dist_class = dist.BernoulliProbs params = (probs,) _check_sample(funsor_dist_class, params, sample_inputs, inputs, atol=5e-2, num_samples=100000)
def test_poisson_sample(batch_shape, sample_inputs): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) rate = rand(batch_shape) funsor_dist_class = dist.Poisson params = (rate,) _check_sample(funsor_dist_class, params, sample_inputs, inputs, atol=2e-2, skip_grad=True)
def test_binary_scalar_funsor(symbol, dims, scalar): sizes = {'a': 3, 'b': 4, 'c': 5} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, bint(sizes[d])) for d in dims) data1 = rand(shape) + 0.5 expected_data = binary_eval(symbol, scalar, data1) x1 = Tensor(data1, inputs) actual = binary_eval(symbol, scalar, x1) check_funsor(actual, inputs, reals(), expected_data)
def test_binary_funsor_scalar(symbol, dims, scalar): sizes = {'a': 3, 'b': 4, 'c': 5} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, Bint[sizes[d]]) for d in dims) data1 = rand(shape) + 0.5 expected_data = binary_eval(symbol, data1, scalar) x1 = Tensor(data1, inputs) actual = binary_eval(symbol, x1, scalar) check_funsor(actual, inputs, Real, expected_data)
def test_binary_funsor_funsor(symbol, dims1, dims2): sizes = {'a': 3, 'b': 4, 'c': 5} shape1 = tuple(sizes[d] for d in dims1) shape2 = tuple(sizes[d] for d in dims2) inputs1 = OrderedDict((d, bint(sizes[d])) for d in dims1) inputs2 = OrderedDict((d, bint(sizes[d])) for d in dims2) data1 = rand(shape1) + 0.5 data2 = rand(shape2) + 0.5 dtype = 'real' if symbol in BOOLEAN_OPS: dtype = 2 data1 = ops.astype(data1, 'uint8') data2 = ops.astype(data2, 'uint8') x1 = Tensor(data1, inputs1, dtype) x2 = Tensor(data2, inputs2, dtype) inputs, aligned = align_tensors(x1, x2) expected_data = binary_eval(symbol, aligned[0], aligned[1]) actual = binary_eval(symbol, x1, x2) check_funsor(actual, inputs, Domain((), dtype), expected_data)
def test_normal_sample(with_lazy, batch_shape, sample_inputs, reparametrized): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) loc = randn(batch_shape) scale = rand(batch_shape) funsor_dist_class = (dist.Normal if reparametrized else dist.NonreparameterizedNormal) params = (loc, scale) _check_sample(funsor_dist_class, params, sample_inputs, inputs, num_samples=200000, atol=1e-2 if reparametrized else 1e-1, with_lazy=with_lazy)
def test_reduce_all(dims, op): sizes = {'a': 3, 'b': 4, 'c': 5} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, bint(sizes[d])) for d in dims) data = rand(shape) + 0.5 if op in [ops.and_, ops.or_]: data = ops.astype(data, 'uint8') expected_data = REDUCE_OP_TO_NUMERIC[op](data, None) x = Tensor(data, inputs) actual = x.reduce(op) check_funsor(actual, {}, reals(), expected_data)
def test_binomial_sample(with_lazy, batch_shape, sample_inputs): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) max_count = 10 total_count_data = random_tensor(inputs, Bint[max_count]).data if get_backend() == "torch": total_count_data = ops.astype(total_count_data, 'float') total_count = total_count_data probs = rand(batch_shape) funsor_dist_class = dist.Binomial params = (total_count, probs) _check_sample(funsor_dist_class, params, sample_inputs, inputs, atol=5e-2, skip_grad=True, with_lazy=with_lazy)
def test_bernoullilogits_sample(batch_shape, sample_inputs): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape)) logits = rand(batch_shape) funsor_dist_class = dist.BernoulliLogits params = (logits, ) _check_sample(funsor_dist_class, params, sample_inputs, inputs, atol=5e-2, num_samples=100000)
def test_bernoulli_probs_density(batch_shape, syntax): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, bint(v)) for k, v in zip(batch_dims, batch_shape)) @funsor.function(reals(), reals(), reals()) def bernoulli(probs, value): return backend_dist.Bernoulli(probs).log_prob(value) check_funsor(bernoulli, {'probs': reals(), 'value': reals()}, reals()) probs = Tensor(rand(batch_shape), inputs) value = Tensor(rand(batch_shape).round(), inputs) expected = bernoulli(probs, value) check_funsor(expected, inputs, reals()) d = Variable('value', reals()) if syntax == 'eager': actual = dist.BernoulliProbs(probs, value) elif syntax == 'lazy': actual = dist.BernoulliProbs(probs, d)(value=value) elif syntax == 'generic': actual = dist.Bernoulli(probs=probs)(value=value) check_funsor(actual, inputs, reals()) assert_close(actual, expected)
def test_bernoulli_logits_density(batch_shape, syntax): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) @funsor.function def bernoulli(logits: Real, value: Real) -> Real: return backend_dist.Bernoulli(logits=logits).log_prob(value) check_funsor(bernoulli, {'logits': Real, 'value': Real}, Real) logits = Tensor(rand(batch_shape), inputs) value = Tensor(ops.astype(rand(batch_shape) >= 0.5, 'float'), inputs) expected = bernoulli(logits, value) check_funsor(expected, inputs, Real) d = Variable('value', Real) if syntax == 'eager': actual = dist.BernoulliLogits(logits, value) elif syntax == 'lazy': actual = dist.BernoulliLogits(logits, d)(value=value) elif syntax == 'generic': actual = dist.Bernoulli(logits=logits)(value=value) check_funsor(actual, inputs, Real) assert_close(actual, expected)
def test_reduce_event(op, event_shape, dims): sizes = {'a': 3, 'b': 4, 'c': 5} batch_shape = tuple(sizes[d] for d in dims) shape = batch_shape + event_shape inputs = OrderedDict((d, bint(sizes[d])) for d in dims) numeric_op = REDUCE_OP_TO_NUMERIC[op] data = rand(shape) + 0.5 dtype = 'real' if op in [ops.and_, ops.or_]: data = ops.astype(data, 'uint8') expected_data = numeric_op(data.reshape(batch_shape + (-1, )), -1) x = Tensor(data, inputs, dtype=dtype) op_name = numeric_op.__name__[1:] if op in [ops.min, ops.max ] else numeric_op.__name__ actual = getattr(x, op_name)() check_funsor(actual, inputs, Domain((), dtype), expected_data)
def test_unary(symbol, dims): sizes = {'a': 3, 'b': 4} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, bint(sizes[d])) for d in dims) dtype = 'real' data = rand(shape) + 0.5 if symbol == '~': data = ops.astype(data, 'uint8') dtype = 2 if get_backend() != "torch" and symbol in [ "abs", "sqrt", "exp", "log", "log1p", "sigmoid" ]: expected_data = getattr(ops, symbol)(data) else: expected_data = unary_eval(symbol, data) x = Tensor(data, inputs, dtype) actual = unary_eval(symbol, x) check_funsor(actual, inputs, funsor.Domain((), dtype), expected_data)
def test_bernoullilogits_enumerate_support(expand, batch_shape): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) logits = funsor.Tensor(rand(batch_shape), inputs, 'real') with interpretation(lazy): d = dist.BernoulliLogits(logits) x = d.enumerate_support(expand=expand) actual_log_prob = d(value='value2')(value2=x).reduce(ops.logaddexp, 'value') raw_dist = d.dist_class(logits=logits.data) raw_value = raw_dist.enumerate_support(expand=expand) expected_inputs = OrderedDict([('value', Bint[raw_value.shape[0]])]) expected_inputs.update(inputs) expected_log_prob = funsor.Tensor(raw_dist.log_prob(raw_value), expected_inputs).reduce(ops.logaddexp, 'value') assert d.has_enumerate_support assert x.output == d.value.output assert set(x.inputs) == {'value'} | (set(batch_dims) if expand else set()) assert_close(expected_log_prob, actual_log_prob)
def test_dirichlet_density(batch_shape, event_shape): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) @funsor.function def dirichlet(concentration: Reals[event_shape], value: Reals[event_shape]) -> Real: return backend_dist.Dirichlet(concentration).log_prob(value) check_funsor(dirichlet, {'concentration': Reals[event_shape], 'value': Reals[event_shape]}, Real) concentration = Tensor(ops.exp(randn(batch_shape + event_shape)), inputs) value_data = rand(batch_shape + event_shape) value_data = value_data / value_data.sum(-1)[..., None] value = Tensor(value_data, inputs) expected = dirichlet(concentration, value) check_funsor(expected, inputs, Real) actual = dist.Dirichlet(concentration, value) check_funsor(actual, inputs, Real) assert_close(actual, expected)
def test_beta_bernoulli_conjugate(batch_shape): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) full_shape = batch_shape prior = Variable("prior", Real) concentration1 = Tensor(ops.exp(randn(full_shape)), inputs) concentration0 = Tensor(ops.exp(randn(full_shape)), inputs) latent = dist.Beta(concentration1, concentration0, value=prior) conditional = dist.Bernoulli(probs=prior) reduced = (latent + conditional).reduce(ops.logaddexp, set(["prior"])) assert isinstance(reduced, dist.DirichletMultinomial) concentration = stack((concentration0, concentration1), dim=-1) assert_close(reduced.concentration, concentration) assert_close(reduced.total_count, Tensor(numeric_array(1.))) # we need lazy expression for Beta to draw samples from it with interpretation(funsor.terms.lazy): lazy_latent = dist.Beta(concentration1, concentration0, value=prior) obs = Tensor(rand(batch_shape).round(), inputs) _assert_conjugate_density_ok(latent, conditional, obs, lazy_latent=lazy_latent)
def test_unary(symbol, dims): sizes = {'a': 3, 'b': 4} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, Bint[sizes[d]]) for d in dims) dtype = 'real' data = rand(shape) + 0.5 if symbol == '~': data = ops.astype(data, 'uint8') dtype = 2 if symbol == 'atanh': data = ops.clamp(data, -0.99, 0.99) if get_backend() != "torch" and symbol in [ "abs", "atanh", "sqrt", "exp", "log", "log1p", "sigmoid", "tanh" ]: expected_data = getattr(ops, symbol)(data) else: expected_data = unary_eval(symbol, data) x = Tensor(data, inputs, dtype) actual = unary_eval(symbol, x) check_funsor(actual, inputs, Array[dtype, ()], expected_data)
def test_poisson_probs_density(batch_shape, syntax): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) @funsor.function def poisson(rate: Real, value: Real) -> Real: return backend_dist.Poisson(rate).log_prob(value) check_funsor(poisson, {'rate': Real, 'value': Real}, Real) rate = Tensor(rand(batch_shape), inputs) value = Tensor(ops.astype(ops.astype(ops.exp(randn(batch_shape)), 'int32'), 'float32'), inputs) expected = poisson(rate, value) check_funsor(expected, inputs, Real) d = Variable('value', Real) if syntax == 'eager': actual = dist.Poisson(rate, value) elif syntax == 'lazy': actual = dist.Poisson(rate, d)(value=value) check_funsor(actual, inputs, Real) assert_close(actual, expected)