Ejemplo n.º 1
0
def test_mc_plate_gaussian():
    log_measure = Gaussian(torch.tensor([0.]), torch.tensor([[1.]]),
                           (('loc', reals()),)) + torch.tensor(-0.9189)
    integrand = Gaussian(torch.randn((100, 1)) + 3., torch.ones((100, 1, 1)),
                         (('data', bint(100)), ('loc', reals())))

    res = Integrate(log_measure.sample(frozenset({'loc'})), integrand, frozenset({'loc'}))
    res = res.reduce(ops.mul, frozenset({'data'}))
    assert not torch.isinf(res).any()
Ejemplo n.º 2
0
def _assert_conjugate_density_ok(latent, conditional, obs, lazy_latent=None,
                                 num_samples=10000, prec=1e-2):
    sample_inputs = OrderedDict(n=Bint[num_samples])
    lazy_latent = lazy_latent if lazy_latent is not None else latent
    rng_key = None if get_backend() == "torch" else np.array([0, 0], dtype=np.uint32)
    latent_samples = lazy_latent.sample(frozenset(["prior"]), sample_inputs, rng_key=rng_key)
    expected = Integrate(latent_samples, conditional(value=obs).exp(), frozenset(['prior']))
    expected = expected.reduce(ops.add, frozenset(sample_inputs))
    actual = (latent + conditional).reduce(ops.logaddexp, set(["prior"]))(value=obs).exp()
    assert_close(actual, expected, atol=prec, rtol=None)
Ejemplo n.º 3
0
def test_mc_plate_gaussian():
    log_measure = Gaussian(numeric_array([0.]), numeric_array([[1.]]),
                           (('loc', Real),)) + numeric_array(-0.9189)
    integrand = Gaussian(randn((100, 1)) + 3., ones((100, 1, 1)),
                         (('data', Bint[100]), ('loc', Real)))

    rng_key = None if get_backend() != 'jax' else np.array([0, 0], dtype=np.uint32)
    res = Integrate(log_measure.sample('loc', rng_key=rng_key), integrand, 'loc')
    res = res.reduce(ops.mul, 'data')
    assert not ((res == float('inf')) | (res == float('-inf'))).any()
Ejemplo n.º 4
0
def _get_stat_diff(funsor_dist_class, sample_inputs, inputs, num_samples,
                   statistic, with_lazy, params):
    params = [Tensor(p, inputs) for p in params]
    if isinstance(with_lazy, bool):
        with interpretation(lazy if with_lazy else eager):
            funsor_dist = funsor_dist_class(*params)
    else:
        funsor_dist = funsor_dist_class(*params)

    rng_key = None if get_backend() == "torch" else np.array([0, 0],
                                                             dtype=np.uint32)
    sample_value = funsor_dist.sample(frozenset(['value']),
                                      sample_inputs,
                                      rng_key=rng_key)
    expected_inputs = OrderedDict(
        tuple(sample_inputs.items()) + tuple(inputs.items()) +
        (('value', funsor_dist.inputs['value']), ))
    check_funsor(sample_value, expected_inputs, reals())

    if sample_inputs:

        actual_mean = Integrate(sample_value,
                                Variable('value', funsor_dist.inputs['value']),
                                frozenset(['value'
                                           ])).reduce(ops.add,
                                                      frozenset(sample_inputs))

        inputs, tensors = align_tensors(
            *list(funsor_dist.params.values())[:-1])
        raw_dist = funsor_dist.dist_class(
            **dict(zip(funsor_dist._ast_fields[:-1], tensors)))
        expected_mean = Tensor(raw_dist.mean, inputs)

        if statistic == "mean":
            actual_stat, expected_stat = actual_mean, expected_mean
        elif statistic == "variance":
            actual_stat = Integrate(
                sample_value, (Variable('value', funsor_dist.inputs['value']) -
                               actual_mean)**2,
                frozenset(['value'])).reduce(ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.variance, inputs)
        elif statistic == "entropy":
            actual_stat = -Integrate(sample_value, funsor_dist,
                                     frozenset(['value'])).reduce(
                                         ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.entropy(), inputs)
        else:
            raise ValueError("invalid test statistic")

        diff = actual_stat.reduce(ops.add).data - expected_stat.reduce(
            ops.add).data
        return diff.sum(), diff
Ejemplo n.º 5
0
def _check_sample(funsor_dist,
                  sample_inputs,
                  inputs,
                  atol=1e-2,
                  rtol=None,
                  num_samples=100000,
                  statistic="mean",
                  skip_grad=False):
    """utility that compares a Monte Carlo estimate of a distribution mean with the true mean"""
    samples_per_dim = int(num_samples**(1. / max(1, len(sample_inputs))))
    sample_inputs = OrderedDict(
        (k, bint(samples_per_dim)) for k in sample_inputs)

    for tensor in list(funsor_dist.params.values())[:-1]:
        tensor.data.requires_grad_()

    sample_value = funsor_dist.sample(frozenset(['value']), sample_inputs)
    expected_inputs = OrderedDict(
        tuple(sample_inputs.items()) + tuple(inputs.items()) +
        (('value', funsor_dist.inputs['value']), ))
    check_funsor(sample_value, expected_inputs, reals())

    if sample_inputs:

        actual_mean = Integrate(sample_value,
                                Variable('value', funsor_dist.inputs['value']),
                                frozenset(['value'
                                           ])).reduce(ops.add,
                                                      frozenset(sample_inputs))

        inputs, tensors = align_tensors(
            *list(funsor_dist.params.values())[:-1])
        raw_dist = funsor_dist.dist_class(
            **dict(zip(funsor_dist._ast_fields[:-1], tensors)))
        expected_mean = Tensor(raw_dist.mean, inputs)

        check_funsor(actual_mean, expected_mean.inputs, expected_mean.output)
        assert_close(actual_mean, expected_mean, atol=atol, rtol=rtol)

    if sample_inputs and not skip_grad:
        if statistic == "mean":
            actual_stat, expected_stat = actual_mean, expected_mean
        elif statistic == "variance":
            actual_stat = Integrate(
                sample_value, (Variable('value', funsor_dist.inputs['value']) -
                               actual_mean)**2,
                frozenset(['value'])).reduce(ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.variance, inputs)
        elif statistic == "entropy":
            actual_stat = -Integrate(sample_value, funsor_dist,
                                     frozenset(['value'])).reduce(
                                         ops.add, frozenset(sample_inputs))
            expected_stat = Tensor(raw_dist.entropy(), inputs)
        else:
            raise ValueError("invalid test statistic")

        grad_targets = [v.data for v in list(funsor_dist.params.values())[:-1]]
        actual_grads = torch.autograd.grad(actual_stat.reduce(
            ops.add).sum().data,
                                           grad_targets,
                                           allow_unused=True)
        expected_grads = torch.autograd.grad(expected_stat.reduce(
            ops.add).sum().data,
                                             grad_targets,
                                             allow_unused=True)

        assert_close(actual_stat, expected_stat, atol=atol, rtol=rtol)

        for actual_grad, expected_grad in zip(actual_grads, expected_grads):
            if expected_grad is not None:
                assert_close(actual_grad, expected_grad, atol=atol, rtol=rtol)
            else:
                assert_close(actual_grad,
                             torch.zeros_like(actual_grad),
                             atol=atol,
                             rtol=rtol)