コード例 #1
0
def test_importance_sampling(
    model_approx,
    y_,
    z_,
):
    sampler = mp.ImportanceSampler(n_samples=500)

    print_factor = lambda *args: print(args[0])
    print_status = lambda *args: print(args[2])
    callback = mp.expectation_propagation.EPHistory(callbacks=(print_factor,
                                                               print_status))
    opt = mp.EPOptimiser(model_approx.factor_graph,
                         default_optimiser=sampler,
                         callback=callback)
    new_approx = opt.run(model_approx, max_steps=5)

    y = new_approx.mean_field[y_].mean
    z_pred = new_approx(new_approx.mean_field.mean)[z_]
    y_pred = z_pred > 0
    (tpr, fpr), (fnr, tnr) = np.dot(
        np.array([y, 1 - y]).reshape(2, -1),
        np.array([y_pred, 1 - y_pred]).reshape(2, -1).T)

    accuracy = (tpr + tnr) / (tpr + fpr + fnr + tnr)
    assert 0.95 > accuracy > 0.7
コード例 #2
0
def test_simple(model_approx, centres):
    laplace = graph.LaplaceOptimiser()
    ep_opt = graph.EPOptimiser(model_approx, default_optimiser=laplace)
    new_approx = ep_opt.run(model_approx, max_steps=20)

    mu_ = new_approx.factor_graph.name_variable_dict["mu"]
    logt_ = new_approx.factor_graph.name_variable_dict["logt"]

    assert new_approx.mean_field[mu_].mean == pytest.approx(np.mean(centres),
                                                            rel=1.0)
    assert new_approx.mean_field[logt_].mean == pytest.approx(np.log(
        np.std(centres)**-2),
                                                              rel=1.0)
コード例 #3
0
def test_laplace(
    model_approx,
    a_,
    b_,
    y_,
    z_,
):
    laplace = mp.LaplaceFactorOptimiser()
    opt = mp.EPOptimiser(model_approx.factor_graph, default_optimiser=laplace)
    model_approx = opt.run(model_approx)

    y = model_approx.mean_field[y_].mean
    y_pred = model_approx.mean_field[z_].mean

    assert mp.utils.r2_score(y, y_pred) > 0.95
コード例 #4
0
ファイル: test_output.py プロジェクト: rhayes777/PyAutoFit
def test_path_prefix(output_directory, factor_graph_model):
    paths = DirectoryPaths(
        path_prefix="path_prefix",
        name="name",
        is_identifier_in_paths=False,
    )
    optimiser = g.EPOptimiser(
        factor_graph=factor_graph_model.graph,
        paths=paths,
        default_optimiser=MockSearch(),
    )

    assert optimiser.output_path == output_directory / "path_prefix/name"

    _run_optimisation(factor_graph_model, paths=paths)
    assert (output_directory / "path_prefix/name/AnalysisFactor0").exists()
    assert (output_directory / "path_prefix/name/AnalysisFactor1").exists()
コード例 #5
0
def test_importance_sampling(
    model,
    model_approx,
    linear_factor,
    y_,
    z_,
):
    laplace = mp.LaplaceFactorOptimiser()
    sampler = mp.ImportanceSampler(n_samples=500, force_sample=True, delta=0.8)
    ep_opt = mp.EPOptimiser(model,
                            default_optimiser=laplace,
                            factor_optimisers={linear_factor: sampler})
    model_approx = ep_opt.run(model_approx, max_steps=3)

    y = model_approx.mean_field[y_].mean
    y_pred = model_approx.mean_field[z_].mean

    assert mp.utils.r2_score(y, y_pred) > 0.90
コード例 #6
0
def test_laplace(
    model_approx,
    y_,
    z_,
):
    laplace = mp.LaplaceFactorOptimiser(default_opt_kws={'jac': True})
    opt = mp.EPOptimiser(model_approx.factor_graph, default_optimiser=laplace)
    new_approx = opt.run(model_approx)

    y = new_approx.mean_field[y_].mean
    z_pred = new_approx(new_approx.mean_field.mean)[z_]
    y_pred = z_pred > 0
    (tpr, fpr), (fnr, tnr) = np.dot(
        np.array([y, 1 - y]).reshape(2, -1),
        np.array([y_pred, 1 - y_pred]).reshape(2, -1).T)

    accuracy = (tpr + tnr) / (tpr + fpr + fnr + tnr)
    assert 0.95 > accuracy > 0.7
コード例 #7
0
def test_hierarchical(centres, widths):
    centres_ = [Variable(f"x_{i}") for i in range(n)]
    mu_ = Variable("mu")
    logt_ = Variable("logt")

    centre_likelihoods = [
        messages.NormalMessage(c, w).as_factor(x)
        for c, w, x in zip(centres, widths, centres_)
    ]

    hierarchical_factor = graph.Factor(
        hierarchical_loglike_t,
        mu_,
        logt_,
        *centres_,
        factor_jacobian=hierarchical_loglike_t_jac,
    )

    model = graph.utils.prod(centre_likelihoods) * hierarchical_factor

    model_approx = graph.EPMeanField.from_approx_dists(
        model,
        {
            mu_: messages.NormalMessage(0.0, 10.0),
            logt_: messages.NormalMessage(0.0, 10.0),
            **{x_: messages.NormalMessage(0.0, 10.0)
               for x_ in centres_},
        },
    )

    laplace = graph.LaplaceOptimiser()
    ep_opt = graph.EPOptimiser(model_approx, default_optimiser=laplace)
    new_approx = ep_opt.run(model_approx, max_steps=10)
    print(new_approx)

    mu_ = new_approx.factor_graph.name_variable_dict["mu"]
    logt_ = new_approx.factor_graph.name_variable_dict["logt"]

    assert new_approx.mean_field[mu_].mean == pytest.approx(np.mean(centres),
                                                            rel=0.2)
    assert new_approx.mean_field[logt_].mean == pytest.approx(np.log(
        np.std(centres)**-2),
                                                              rel=0.2)
コード例 #8
0
def test_laplace(
        model,
        start_approx,
        y_,
        z_,
):
    model_approx = graph.EPMeanField.from_approx_dists(model, start_approx)
    laplace = graph.LaplaceOptimiser()
    opt = graph.EPOptimiser(model_approx.factor_graph, default_optimiser=laplace)
    new_approx = opt.run(model_approx, max_steps=10)

    y = new_approx.mean_field[y_].mean
    z_pred = new_approx(new_approx.mean_field.mean)[z_]
    y_pred = z_pred > 0
    (tpr, fpr), (fnr, tnr) = np.dot(
        np.array([y, 1 - y]).reshape(2, -1),
        np.array([y_pred, 1 - y_pred]).reshape(2, -1).T,
    )

    accuracy = (tpr + tnr) / (tpr + fpr + fnr + tnr)
    assert 0.95 > accuracy > 0.75
コード例 #9
0
def test_full_hierachical(data):
    samples = {
        Variable(f"samples_{i}"): sample
        for i, sample in enumerate(data)
    }
    x_i_ = [Variable(f"x_{i}") for i in range(n)]
    logt_i_ = [Variable(f"logt_{i}") for i in range(n)]

    mu_x_ = Variable("mu_x")
    logt_x_ = Variable("logt_x")
    mu_logt_ = Variable("mu_logt")
    logt_logt_ = Variable("logt_logt")
    hierarchical_params = (mu_x_, logt_x_, mu_logt_, logt_logt_)

    # Setting up model
    data_loglikes = [
        graph.Factor(
            normal_loglike_t,
            s_,
            x_,
            logt_,
            factor_jacobian=normal_loglike_t_jacobian,
            name=f"normal_{i}",
        ) for i, (s_, x_, logt_) in enumerate(zip(samples, x_i_, logt_i_))
    ]
    centre_loglike = graph.Factor(
        hierarchical_loglike_t,
        mu_x_,
        logt_x_,
        *x_i_,
        name="mean_loglike",
        factor_jacobian=hierarchical_loglike_t_jac,
    )
    logt_loglike = graph.Factor(
        hierarchical_loglike_t,
        mu_logt_,
        logt_logt_,
        *logt_i_,
        name="logt_loglike",
        factor_jacobian=hierarchical_loglike_t_jac,
    )
    priors = [
        messages.NormalMessage(0.0, 10.0).as_factor(v, name=f"prior_{v.name}")
        for v in hierarchical_params
    ]

    model = graph.utils.prod(data_loglikes + priors +
                             [centre_loglike, logt_loglike])

    model_approx = graph.EPMeanField.from_approx_dists(
        model,
        {
            **{v: messages.NormalMessage(0.0, 10.0)
               for v in model.variables},
            **{
                v: messages.FixedMessage(sample)
                for v, sample in samples.items()
            },
        },
    )

    # Mean field approximation
    model_approx = graph.EPMeanField.from_approx_dists(
        model,
        {
            **{v: messages.NormalMessage(0.0, 10.0)
               for v in model.variables},
            **{
                v: messages.FixedMessage(sample)
                for v, sample in samples.items()
            },
        },
    )

    laplace = graph.LaplaceOptimiser()
    ep_opt = graph.EPOptimiser(model, default_optimiser=laplace)
    new_approx = ep_opt.run(model_approx, max_steps=20)
    new_approx.mean_field.subset(hierarchical_params)

    m = np.mean([np.mean(sample) for sample in data])
    logt = np.mean([np.log(np.std(sample)**-2) for sample in data])

    assert new_approx.mean_field[mu_x_].mean == pytest.approx(m, rel=1.0)
    assert new_approx.mean_field[mu_logt_].mean == pytest.approx(logt, rel=1.0)