Esempio n. 1
0
def test_dichotomous_bmrs():
    # not checking if numbers are correct, but that functionality works
    session = Session(
        mcmc_iterations=2000,
        mcmc_num_chains=4,
        mcmc_warmup_fraction=0.25,
    )
    session.add_dichotomous_data(
        dose=[0, 1.96, 5.69, 29.75],
        n=[75, 49, 50, 49],
        incidence=[5, 1, 3, 14]
    )
    session.add_models(
        dmodels.Logistic(),
    )
    session.add_bmrs(
        dbmr.Extra(bmr=0.1, priors=[1, ]),
        dbmr.Added(bmr=0.1, priors=[1, ])
    )

    session.execute()
    session.calculate_bmrs()

    # check scaler weight
    assert session.models[0].model_weight_scaler == 1.

    # check bmd medians are calculated
    medians1 = [b.model_average['stats']['p50'] for b in session.bmrs]
    medians2 = [17.887098904857172, 18.403145098571546]
    assert np.all(np.isclose(medians1, medians2))
Esempio n. 2
0
def test_threaded():
    session = Session(
        mcmc_iterations=2000,
        mcmc_num_chains=2,
        mcmc_warmup_fraction=0.25,
    )
    session.add_dichotomous_data(dose=[0, 1.96, 5.69, 29.75],
                                 n=[75, 49, 50, 49],
                                 incidence=[5, 1, 3, 14])
    session.add_models(
        dmodels.Logistic(),
        dmodels.LogLogistic(),
        dmodels.Logistic(),
        dmodels.LogLogistic(),
    )
    # check that both run
    session.execute(pythreads=True)
    session.execute(pythreads=False)
Esempio n. 3
0
def test_bmr_priors():
    # Check that modifying BMR priors give expected results.
    session = Session(
        mcmc_iterations=2000,
        mcmc_num_chains=2,
        mcmc_warmup_fraction=0.25,
    )
    session.add_dichotomous_data(
        dose=[0, 1.96, 5.69, 29.75],
        n=[75, 49, 50, 49],
        incidence=[5, 1, 3, 14]
    )
    session.add_models(
        dmodels.Logistic(),
        dmodels.Probit(),
    )
    session.add_bmrs(
        dbmr.Extra(bmr=0.1, priors=[0, 1]),
        dbmr.Extra(bmr=0.1, priors=[0.5, 0.5]),
        dbmr.Extra(bmr=0.1, priors=[3, 1]),
    )

    session.execute()
    session.calculate_bmrs()

    # get model weights using a non-informative prior
    expected_weights = [0.51190026721838455, 0.48809973278161539]
    model_weights = [m.model_weight_scaler for m in session.models]
    assert np.isclose(model_weights, expected_weights).all()

    # when using a zero weight, posterior should also be zero
    bmr = session.bmrs[0]
    assert np.isclose(bmr.priors, [0., 1.]).all()
    assert np.isclose(bmr.get_model_posterior_weights(session.models),
                      [0., 1.]).all()

    # when using equal weights, posterior should be same as model weights
    bmr = session.bmrs[1]
    assert np.isclose(bmr.get_model_posterior_weights(session.models),
                      model_weights).all()

    # when using non-equal weights, ensure posterior are different and move
    # in the expected direction using the weights. Also check to make sure the
    # priors are normalized correctly.
    bmr = session.bmrs[2]
    assert np.isclose(bmr.priors, [0.75, 0.25]).all()
    assert np.isclose(bmr.get_model_posterior_weights(session.models),
                      [0.70457014, 0.29542986]).all()
Esempio n. 4
0
def test_basic_dichotomous():
    session = Session(
        mcmc_iterations=2000,
        mcmc_num_chains=4,
        mcmc_warmup_fraction=0.25,
    )
    session.add_dichotomous_data(dose=[0, 1.96, 5.69, 29.75],
                                 n=[75, 49, 50, 49],
                                 incidence=[5, 1, 3, 14])
    session.add_models(
        dmodels.Logistic(),
        dmodels.LogLogistic(),
    )
    session.execute()
    weights = [m.model_weight_scaler for m in session.models]
    expected = [0.62553842799869863, 0.37446157200130137]
    assert np.all(np.isclose(weights, expected))
    assert np.isclose(sum(weights), 1.)
Esempio n. 5
0
def test_extract_summary_value():
    model = dmodels.Logistic()
    summary_text = 'Inference for Stan model: anon_model_6ec9f268cf3fd9261d734b7a1232b68a.\n2 chains, each with iter=20000; warmup=10000; thin=1; \npost-warmup draws per chain=10000, total post-warmup draws=20000.\n\n       mean se_mean     sd   2.5%    25%    50%    75%  97.5%  n_eff   Rhat\na     -1.72  2.7e-3   0.17  -2.06  -1.83  -1.71  -1.61  -1.41 3818.0    1.0\nb      1.13  4.2e-3   0.26   0.63   0.96   1.13    1.3   1.65 3856.0    1.0\nlp__ -65.81    0.02   1.01  -68.5  -66.2  -65.5 -65.08 -64.84 4253.0    1.0\n\nSamples were drawn using NUTS at Sun Aug  7 19:14:37 2016.\nFor each parameter, n_eff is a crude measure of effective sample size,\nand Rhat is the potential scale reduction factor on split chains (at \nconvergence, Rhat=1).'  # noqa
    result = model.extract_summary_values(summary_text)
    expected = {
        'a': {
            '25%': -1.83,
            '97.5%': -1.41,
            'Rhat': 1.0,
            '50%': -1.71,
            '75%': -1.61,
            '2.5%': -2.06,
            'sd': 0.17,
            'n_eff': 3818.0,
            'se_mean': 0.0027,
            'mean': -1.72
        },  # noqa
        'b': {
            '25%': 0.96,
            '97.5%': 1.65,
            'Rhat': 1.0,
            '50%': 1.13,
            '75%': 1.3,
            '2.5%': 0.63,
            'sd': 0.26,
            'n_eff': 3856.0,
            'se_mean': 0.0042,
            'mean': 1.13
        },  # noqa
        'lp__': {
            '25%': -66.2,
            '97.5%': -64.84,
            'Rhat': 1.0,
            '50%': -65.5,
            '75%': -65.08,
            '2.5%': -68.5,
            'sd': 1.01,
            'n_eff': 4253.0,
            'se_mean': 0.02,
            'mean': -65.81
        },  # noqa
    }
    assert expected == result