Beispiel #1
0
     zero_mean_unit_obs_sd_lm(torch.tensor([10.0, 10.0]))[0],
     torch.tensor([[100.0, -100.0]]),
 ),
 (
     group_linear_model(
         torch.tensor(0.0),
         torch.tensor([10.0]),
         torch.tensor(0.0),
         torch.tensor([10.0]),
         torch.tensor(1.0),
     ),
     lm_2p_10_10_1_w12,
     torch.tensor([[-1.5, 0.5], [1.5, 0.0]]),
 ),
 (
     known_covariance_linear_model(torch.tensor(
         [1.0, -1.0]), torch.tensor([10.0, 10.0]), torch.tensor(1.0)),
     nz_lm_2p_10_10_1,
     torch.tensor([[-1.0, 0.5], [2.5, -2.0]]),
 ),
 (
     normal_inverse_gamma_linear_model(
         torch.tensor([1.0, -1.0]),
         torch.tensor(0.1),
         torch.tensor(2.0),
         torch.tensor(2.0),
     ),
     normal_inv_gamma_2_2_10_10,
     torch.tensor([[1.0, -0.5], [1.5, 2.0]]),
 ),
 (
     logistic_regression_model(torch.tensor([1.0, -1.0]),
Beispiel #2
0
    ]
    response_dist = dist.TransformedDistribution(base_dist, transforms)
    y = pyro.sample("y", response_dist)
    return y


@pytest.mark.parametrize(
    "model1,model2,design",
    [(zero_mean_unit_obs_sd_lm(torch.tensor(
        [10., 10.]))[0], lm_2p_10_10_1, torch.tensor([[1., -1.]])),
     (lm_2p_10_10_1, zero_mean_unit_obs_sd_lm(torch.tensor(
         [10., 10.]))[0], torch.tensor([[100., -100.]])),
     (group_linear_model(torch.tensor(0.), torch.tensor(
         [10.]), torch.tensor(0.), torch.tensor([10.]), torch.tensor(1.)),
      lm_2p_10_10_1_w12, torch.tensor([[-1.5, 0.5], [1.5, 0.]])),
     (known_covariance_linear_model(torch.tensor(
         [1., -1.]), torch.tensor([10., 10.]), torch.tensor(1.)),
      nz_lm_2p_10_10_1, torch.tensor([[-1., 0.5], [2.5, -2.]])),
     (normal_inverse_gamma_linear_model(torch.tensor(
         [1., -1.]), torch.tensor(.1), torch.tensor(2.), torch.tensor(2.)),
      normal_inv_gamma_2_2_10_10, torch.tensor([[1., -0.5], [1.5, 2.]])),
     (logistic_regression_model(torch.tensor([1., -1.]), torch.tensor(10.)),
      lr_10_10, torch.tensor([[6., -1.5], [.5, 0.]])),
     (sigmoid_model(torch.tensor([1., -1.]), torch.tensor([10., 10.]),
                    torch.tensor(0.), torch.tensor([1., 1.]), torch.tensor(1.),
                    torch.tensor(2.), torch.tensor(2.),
                    torch.eye(2)), sigmoid_example,
      torch.cat([torch.tensor([[1., 1.], [.5, -2.5]]),
                 torch.eye(2)], dim=-1))])
def test_log_prob_matches(model1, model2, design):
    trace = poutine.trace(model1).get_trace(design)
    trace.compute_log_prob()
Beispiel #3
0
def linear_model():
    return known_covariance_linear_model(coef_means=torch.tensor(0.),
                                         coef_sds=torch.tensor([1., 1.5]),
                                         observation_sd=torch.tensor(1.))