def maskeddist_to_funsor(backend_dist, output=None, dim_to_name=None): mask = to_funsor(ops.astype(backend_dist._mask, 'float32'), output=output, dim_to_name=dim_to_name) funsor_base_dist = to_funsor(backend_dist.base_dist, output=output, dim_to_name=dim_to_name) return mask * funsor_base_dist
def test_binary_funsor_funsor(symbol, dims1, dims2): sizes = {'a': 3, 'b': 4, 'c': 5} shape1 = tuple(sizes[d] for d in dims1) shape2 = tuple(sizes[d] for d in dims2) inputs1 = OrderedDict((d, bint(sizes[d])) for d in dims1) inputs2 = OrderedDict((d, bint(sizes[d])) for d in dims2) data1 = rand(shape1) + 0.5 data2 = rand(shape2) + 0.5 dtype = 'real' if symbol in BOOLEAN_OPS: dtype = 2 data1 = ops.astype(data1, 'uint8') data2 = ops.astype(data2, 'uint8') x1 = Tensor(data1, inputs1, dtype) x2 = Tensor(data2, inputs2, dtype) inputs, aligned = align_tensors(x1, x2) expected_data = binary_eval(symbol, aligned[0], aligned[1]) actual = binary_eval(symbol, x1, x2) check_funsor(actual, inputs, Domain((), dtype), expected_data)
def test_reduce_all(dims, op): sizes = {'a': 3, 'b': 4, 'c': 5} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, bint(sizes[d])) for d in dims) data = rand(shape) + 0.5 if op in [ops.and_, ops.or_]: data = ops.astype(data, 'uint8') expected_data = REDUCE_OP_TO_NUMERIC[op](data, None) x = Tensor(data, inputs) actual = x.reduce(op) check_funsor(actual, {}, reals(), expected_data)
def test_poisson_probs_density(batch_shape, syntax): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) @funsor.function def poisson(rate: Real, value: Real) -> Real: return backend_dist.Poisson(rate).log_prob(value) check_funsor(poisson, {'rate': Real, 'value': Real}, Real) rate = Tensor(rand(batch_shape), inputs) value = Tensor(ops.astype(ops.astype(ops.exp(randn(batch_shape)), 'int32'), 'float32'), inputs) expected = poisson(rate, value) check_funsor(expected, inputs, Real) d = Variable('value', Real) if syntax == 'eager': actual = dist.Poisson(rate, value) elif syntax == 'lazy': actual = dist.Poisson(rate, d)(value=value) check_funsor(actual, inputs, Real) assert_close(actual, expected)
def test_binomial_sample(with_lazy, batch_shape, sample_inputs): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) max_count = 10 total_count_data = random_tensor(inputs, Bint[max_count]).data if get_backend() == "torch": total_count_data = ops.astype(total_count_data, 'float') total_count = total_count_data probs = rand(batch_shape) funsor_dist_class = dist.Binomial params = (total_count, probs) _check_sample(funsor_dist_class, params, sample_inputs, inputs, atol=5e-2, skip_grad=True, with_lazy=with_lazy)
def test_binomial_density(batch_shape, eager): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) max_count = 10 @funsor.function def binomial(total_count: Real, probs: Real, value: Real) -> Real: return backend_dist.Binomial(total_count, probs).log_prob(value) check_funsor(binomial, {'total_count': Real, 'probs': Real, 'value': Real}, Real) value_data = ops.astype(random_tensor(inputs, Bint[max_count]).data, 'float') total_count_data = value_data + ops.astype(random_tensor(inputs, Bint[max_count]).data, 'float') value = Tensor(value_data, inputs) total_count = Tensor(total_count_data, inputs) probs = Tensor(rand(batch_shape), inputs) expected = binomial(total_count, probs, value) check_funsor(expected, inputs, Real) m = Variable('value', Real) actual = dist.Binomial(total_count, probs, value) if eager else \ dist.Binomial(total_count, probs, m)(value=value) check_funsor(actual, inputs, Real) assert_close(actual, expected, rtol=1e-5)
def test_reduce_event(op, event_shape, dims): sizes = {'a': 3, 'b': 4, 'c': 5} batch_shape = tuple(sizes[d] for d in dims) shape = batch_shape + event_shape inputs = OrderedDict((d, bint(sizes[d])) for d in dims) numeric_op = REDUCE_OP_TO_NUMERIC[op] data = rand(shape) + 0.5 dtype = 'real' if op in [ops.and_, ops.or_]: data = ops.astype(data, 'uint8') expected_data = numeric_op(data.reshape(batch_shape + (-1, )), -1) x = Tensor(data, inputs, dtype=dtype) op_name = numeric_op.__name__[1:] if op in [ops.min, ops.max ] else numeric_op.__name__ actual = getattr(x, op_name)() check_funsor(actual, inputs, Domain((), dtype), expected_data)
def test_unary(symbol, dims): sizes = {'a': 3, 'b': 4} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, bint(sizes[d])) for d in dims) dtype = 'real' data = rand(shape) + 0.5 if symbol == '~': data = ops.astype(data, 'uint8') dtype = 2 if get_backend() != "torch" and symbol in [ "abs", "sqrt", "exp", "log", "log1p", "sigmoid" ]: expected_data = getattr(ops, symbol)(data) else: expected_data = unary_eval(symbol, data) x = Tensor(data, inputs, dtype) actual = unary_eval(symbol, x) check_funsor(actual, inputs, funsor.Domain((), dtype), expected_data)
def test_dirichlet_multinomial_conjugate_plate(batch_shape, size): max_count = 10 batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) full_shape = batch_shape + (size,) prior = Variable("prior", Reals[size]) concentration = Tensor(ops.exp(randn(full_shape)), inputs) value_data = ops.astype(randint(0, max_count, size=batch_shape + (7, size)), 'float32') obs_inputs = inputs.copy() obs_inputs['plate'] = Bint[7] obs = Tensor(value_data, obs_inputs) total_count_data = value_data.sum(-1) total_count = Tensor(total_count_data, obs_inputs) latent = dist.Dirichlet(concentration, value=prior) conditional = dist.Multinomial(probs=prior, total_count=total_count, value=obs) p = latent + conditional.reduce(ops.add, 'plate') reduced = p.reduce(ops.logaddexp, 'prior') assert isinstance(reduced, Tensor) _assert_conjugate_density_ok(latent, conditional, obs)
def test_unary(symbol, dims): sizes = {'a': 3, 'b': 4} shape = tuple(sizes[d] for d in dims) inputs = OrderedDict((d, Bint[sizes[d]]) for d in dims) dtype = 'real' data = rand(shape) + 0.5 if symbol == '~': data = ops.astype(data, 'uint8') dtype = 2 if symbol == 'atanh': data = ops.clamp(data, -0.99, 0.99) if get_backend() != "torch" and symbol in [ "abs", "atanh", "sqrt", "exp", "log", "log1p", "sigmoid", "tanh" ]: expected_data = getattr(ops, symbol)(data) else: expected_data = unary_eval(symbol, data) x = Tensor(data, inputs, dtype) actual = unary_eval(symbol, x) check_funsor(actual, inputs, Array[dtype, ()], expected_data)
def test_bernoulli_logits_density(batch_shape, syntax): batch_dims = ('i', 'j', 'k')[:len(batch_shape)] inputs = OrderedDict((k, Bint[v]) for k, v in zip(batch_dims, batch_shape)) @funsor.function def bernoulli(logits: Real, value: Real) -> Real: return backend_dist.Bernoulli(logits=logits).log_prob(value) check_funsor(bernoulli, {'logits': Real, 'value': Real}, Real) logits = Tensor(rand(batch_shape), inputs) value = Tensor(ops.astype(rand(batch_shape) >= 0.5, 'float'), inputs) expected = bernoulli(logits, value) check_funsor(expected, inputs, Real) d = Variable('value', Real) if syntax == 'eager': actual = dist.BernoulliLogits(logits, value) elif syntax == 'lazy': actual = dist.BernoulliLogits(logits, d)(value=value) elif syntax == 'generic': actual = dist.Bernoulli(logits=logits)(value=value) check_funsor(actual, inputs, Real) assert_close(actual, expected)
def delta(v: Reals[event_shape], log_density: Real, value: Reals[event_shape]) -> Real: eq = (v == value) for _ in range(len(event_shape)): eq = ops.all(eq, -1) return ops.log(ops.astype(eq, 'float32')) + log_density
def delta(v, log_density, value): eq = (v == value) for _ in range(len(event_shape)): eq = ops.all(eq, -1) return ops.log(ops.astype(eq, 'float32')) + log_density