Exemplo n.º 1
0
    mean = torch.matmul(design[..., :-2], w1.unsqueeze(-1)).squeeze(-1)
    offset_mean = mean + random_effect_offset

    base_dist = dist.Normal(offset_mean, torch.tensor(1.)).to_event(1)
    transforms = [
        AffineTransform(loc=torch.tensor(0.), scale=random_effect_k),
        SigmoidTransform()
    ]
    response_dist = dist.TransformedDistribution(base_dist, transforms)
    y = pyro.sample("y", response_dist)
    return y


@pytest.mark.parametrize(
    "model1,model2,design",
    [(zero_mean_unit_obs_sd_lm(torch.tensor(
        [10., 10.]))[0], lm_2p_10_10_1, torch.tensor([[1., -1.]])),
     (lm_2p_10_10_1, zero_mean_unit_obs_sd_lm(torch.tensor(
         [10., 10.]))[0], torch.tensor([[100., -100.]])),
     (group_linear_model(torch.tensor(0.), torch.tensor(
         [10.]), torch.tensor(0.), torch.tensor([10.]), torch.tensor(1.)),
      lm_2p_10_10_1_w12, torch.tensor([[-1.5, 0.5], [1.5, 0.]])),
     (known_covariance_linear_model(torch.tensor(
         [1., -1.]), torch.tensor([10., 10.]), torch.tensor(1.)),
      nz_lm_2p_10_10_1, torch.tensor([[-1., 0.5], [2.5, -2.]])),
     (normal_inverse_gamma_linear_model(torch.tensor(
         [1., -1.]), torch.tensor(.1), torch.tensor(2.), torch.tensor(2.)),
      normal_inv_gamma_2_2_10_10, torch.tensor([[1., -0.5], [1.5, 2.]])),
     (logistic_regression_model(torch.tensor([1., -1.]), torch.tensor(10.)),
      lr_10_10, torch.tensor([[6., -1.5], [.5, 0.]])),
     (sigmoid_model(torch.tensor([1., -1.]), torch.tensor([10., 10.]),
                    torch.tensor(0.), torch.tensor([1., 1.]), torch.tensor(1.),
Exemplo n.º 2
0
and expensive-to-compute functions in pyro.

[1] ["Bayesian Regression"](http://pyro.ai/examples/bayesian_regression.html)
[2] Long Ouyang, Michael Henry Tessler, Daniel Ly, Noah Goodman (2016),
    "Practical optimal experiment design with probabilistic programs",
    (https://arxiv.org/abs/1608.05046)
[3] ["Bayesian Optimization"](http://pyro.ai/examples/bo.html)
"""

# Set up regression model dimensions
N = 100  # number of participants
p = 2  # number of features
prior_sds = torch.tensor([10., 2.5])

# Model and guide using known obs_sd
model, guide = zero_mean_unit_obs_sd_lm(prior_sds)


def estimated_ape(ns, num_vi_steps):
    designs = [
        group_assignment_matrix(torch.tensor([n1, N - n1])) for n1 in ns
    ]
    X = torch.stack(designs)
    est_ape = vi_eig(
        model,
        X,
        observation_labels="y",
        target_labels="w",
        vi_parameters={
            "guide": guide,
            "optim": optim.Adam({"lr": 0.05}),
Exemplo n.º 3
0
    base_dist = dist.Normal(offset_mean, torch.tensor(1.0)).to_event(1)
    transforms = [
        AffineTransform(loc=torch.tensor(0.0), scale=random_effect_k),
        SigmoidTransform(),
    ]
    response_dist = dist.TransformedDistribution(base_dist, transforms)
    y = pyro.sample("y", response_dist)
    return y


@pytest.mark.parametrize(
    "model1,model2,design",
    [
        (
            zero_mean_unit_obs_sd_lm(torch.tensor([10.0, 10.0]))[0],
            lm_2p_10_10_1,
            torch.tensor([[1.0, -1.0]]),
        ),
        (
            lm_2p_10_10_1,
            zero_mean_unit_obs_sd_lm(torch.tensor([10.0, 10.0]))[0],
            torch.tensor([[100.0, -100.0]]),
        ),
        (
            group_linear_model(
                torch.tensor(0.0),
                torch.tensor([10.0]),
                torch.tensor(0.0),
                torch.tensor([10.0]),
                torch.tensor(1.0),