Exemplo n.º 1
0
def test_empty_population_adaptive(db_path, sampler):
    def make_model(theta):
        def model(args):
            return {"result": 1 if random.random() > theta else 0}

        return model

    theta1 = .2
    theta2 = .6
    model1 = make_model(theta1)
    model2 = make_model(theta2)
    models = [model1, model2]
    models = list(map(SimpleModel, models))
    population_size = AdaptivePopulationSize(1500)
    parameter_given_model_prior_distribution = [Distribution(), Distribution()]
    abc = ABCSMC(models, parameter_given_model_prior_distribution,
                 MinMaxDistanceFunction(measures_to_use=["result"]),
                 population_size,
                 eps=MedianEpsilon(0),
                 sampler=sampler)

    abc.new(db_path, {"result": 0})

    minimum_epsilon = -1
    history = abc.run(minimum_epsilon, max_nr_populations=3)
    mp = history.get_model_probabilities(history.max_t)
    expected_p1, expected_p2 = theta1 / (theta1 + theta2), theta2 / (theta1 + theta2)
    assert abs(mp.p[0] - expected_p1) + abs(mp.p[1] - expected_p2) < .1
Exemplo n.º 2
0
def test_beta_binomial_two_identical_models_adaptive(db_path, sampler):
    binomial_n = 5

    def model_fun(args):
        return {"result": st.binom(binomial_n, args.theta).rvs()}

    models = [model_fun for _ in range(2)]
    models = list(map(FunctionModel, models))
    population_size = AdaptivePopulationSize(800)
    parameter_given_model_prior_distribution = [
        Distribution(theta=st.beta(1, 1)) for _ in range(2)
    ]
    abc = ABCSMC(
        models,
        parameter_given_model_prior_distribution,
        MinMaxDistance(measures_to_use=["result"]),
        population_size,
        eps=MedianEpsilon(0.1),
        sampler=sampler,
    )
    abc.new(db_path, {"result": 2})

    minimum_epsilon = 0.2
    history = abc.run(minimum_epsilon, max_nr_populations=3)
    mp = history.get_model_probabilities(history.max_t)
    assert abs(mp.p[0] - 0.5) + abs(mp.p[1] - 0.5) < 0.08
Exemplo n.º 3
0
def test_two_competing_gaussians_multiple_population_adaptive_populatin_size(db_path, sampler):
    # Define a gaussian model
    sigma = .5

    def model(args):
        return {"y": st.norm(args['x'], sigma).rvs()}

    # We define two models, but they are identical so far
    models = [model, model]
    models = list(map(SimpleModel, models))

    # The prior over the model classes is uniform
    model_prior = RV("randint", 0, 2)

    # However, our models' priors are not the same. Their mean differs.
    mu_x_1, mu_x_2 = 0, 1
    parameter_given_model_prior_distribution = [Distribution(x=st.norm(mu_x_1, sigma)),
                                                Distribution(x=st.norm(mu_x_2, sigma))]

    # Particles are perturbed in a Gaussian fashion
    parameter_perturbation_kernels = [MultivariateNormalTransition() for _ in range(2)]

    # We plug all the ABC setup together
    nr_populations = 3
    population_size = AdaptivePopulationSize(400, mean_cv=0.05,
                                             max_population_size=1000)
    abc = ABCSMC(models, parameter_given_model_prior_distribution,
                 MinMaxDistanceFunction(measures_to_use=["y"]),
                 population_size,
                 model_prior=model_prior,
                 eps=MedianEpsilon(.2),
                 sampler=sampler)

    # Finally we add meta data such as model names and define where to store the results
    # y_observed is the important piece here: our actual observation.
    y_observed = 1
    abc.new(db_path, {"y": y_observed})

    # We run the ABC with 3 populations max
    minimum_epsilon = .05
    history = abc.run(minimum_epsilon, max_nr_populations=3)

    # Evaluate the model probabililties
    mp = history.get_model_probabilities(history.max_t)

    def p_y_given_model(mu_x_model):
        return st.norm(mu_x_model, sp.sqrt(sigma ** 2 + sigma ** 2)).pdf(y_observed)

    p1_expected_unnormalized = p_y_given_model(mu_x_1)
    p2_expected_unnormalized = p_y_given_model(mu_x_2)
    p1_expected = p1_expected_unnormalized / (p1_expected_unnormalized + p2_expected_unnormalized)
    p2_expected = p2_expected_unnormalized / (p1_expected_unnormalized + p2_expected_unnormalized)
    assert history.max_t == nr_populations-1
    assert abs(mp.p[0] - p1_expected) + abs(mp.p[1] - p2_expected) < .07
Exemplo n.º 4
0
def test_gaussian_multiple_populations_adpative_population_size(
        db_path, sampler):
    sigma_x = 1
    sigma_y = 0.5
    y_observed = 2

    def model(args):
        return {"y": st.norm(args['x'], sigma_y).rvs()}

    models = [model]
    models = list(map(FunctionModel, models))
    nr_populations = 4
    population_size = AdaptivePopulationSize(600)
    parameter_given_model_prior_distribution = [
        Distribution(x=st.norm(0, sigma_x))
    ]
    abc = ABCSMC(
        models,
        parameter_given_model_prior_distribution,
        MinMaxDistance(measures_to_use=["y"]),
        population_size,
        eps=MedianEpsilon(0.2),
        sampler=sampler,
    )
    abc.new(db_path, {"y": y_observed})

    minimum_epsilon = -1

    abc.do_not_stop_when_only_single_model_alive()
    history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
    posterior_x, posterior_weight = history.get_distribution(0, None)
    posterior_x = posterior_x["x"].values
    sort_indices = np.argsort(posterior_x)
    f_empirical = sp.interpolate.interp1d(
        np.hstack((-200, posterior_x[sort_indices], 200)),
        np.hstack((0, np.cumsum(posterior_weight[sort_indices]), 1)),
    )

    sigma_x_given_y = 1 / np.sqrt(1 / sigma_x**2 + 1 / sigma_y**2)
    mu_x_given_y = sigma_x_given_y**2 * y_observed / sigma_y**2
    expected_posterior_x = st.norm(mu_x_given_y, sigma_x_given_y)
    x = np.linspace(-8, 8)
    max_distribution_difference = np.absolute(
        f_empirical(x) - expected_posterior_x.cdf(x)).max()
    assert max_distribution_difference < 0.15
    assert history.max_t == nr_populations - 1
    mean_emp, std_emp = mean_and_std(posterior_x, posterior_weight)
    assert abs(mean_emp - mu_x_given_y) < 0.07
    assert abs(std_emp - sigma_x_given_y) < 0.12