Esempio n. 1
0
def test_sample_ppc_var_names(model_fixture):
    model, observed = model_fixture
    trace = pm.inference.utils.trace_to_arviz({
        "model/sd":
        tf.ones((10, 1), dtype="float32"),
        "model/y":
        tf.convert_to_tensor(observed[:, None]),
    })

    with pytest.raises(ValueError):
        forward_sampling.sample_posterior_predictive(model(),
                                                     trace,
                                                     var_names=[])

    with pytest.raises(KeyError):
        forward_sampling.sample_posterior_predictive(
            model(), trace, var_names=["name not in model!"])

    with pytest.raises(TypeError):
        trace.posterior["name not in model!"] = tf.constant(1.0)
        pm.sample_posterior_predictive(model(), trace)
    del trace.posterior["name not in model!"]

    var_names = ["model/sd", "model/x", "model/dy"]
    ppc = pm.sample_posterior_predictive(
        model(), trace, var_names=var_names).posterior_predictive
    assert set(var_names) == set(ppc)
    assert ppc["model/sd"].shape == trace.posterior["model/sd"].shape
Esempio n. 2
0
def test_sample_ppc_corrupt_trace():
    @pm.model
    def model():
        x = yield pm.Normal("x", tf.ones(5), 1)
        y = yield pm.Normal("y", x, 1)

    trace1 = {"model/x": np.ones(7, dtype="float32")}

    trace2 = {
        "model/x": np.ones(5, dtype="float32"),
        "model/y": np.array(0, dtype="float32")
    }
    with pytest.raises(EvaluationError):
        pm.sample_posterior_predictive(model(), trace1)
    with pytest.raises(EvaluationError):
        pm.sample_posterior_predictive(model(), trace2)
Esempio n. 3
0
def test_posterior_predictive(mixture):
    model, n, _ = mixture
    trace = pm.sample(model, num_samples=100, num_chains=2)
    ppc = pm.sample_posterior_predictive(model, trace).posterior_predictive
    if n == 1:
        assert ppc["mixture/mixture"].shape == (2, 100, 100)
    else:
        assert ppc["mixture/mixture"].shape == (2, 100, 100, n)
Esempio n. 4
0
def test_sample_ppc_var_names(model_fixture):
    model, observed = model_fixture
    trace = {
        "model/sd": tf.convert_to_tensor(np.array(1.0, dtype="float32")),
        "model/y": tf.convert_to_tensor(observed),
    }

    with pytest.raises(ValueError):
        pm.sample_posterior_predictive(model(), trace, var_names=[])

    with pytest.raises(KeyError):
        pm.sample_posterior_predictive(model(),
                                       trace,
                                       var_names=["name not in model!"])

    with pytest.raises(TypeError):
        bad_trace = trace.copy()
        bad_trace["name not in model!"] = tf.constant(1.0)
        pm.sample_posterior_predictive(model(), bad_trace)

    var_names = ["model/sd", "model/x", "model/dy"]
    ppc = pm.sample_posterior_predictive(model(), trace, var_names=var_names)
    assert set(var_names) == set(ppc)
    assert ppc["model/sd"].shape == trace["model/sd"].shape
    assert np.all(
        [v.shape == observed.shape for k, v in ppc.items() if k != "model/sd"])
Esempio n. 5
0
def test_sample_ppc_corrupt_trace():
    @pm.model
    def model():
        x = yield pm.Normal("x", tf.ones(5), 1)
        y = yield pm.Normal("y", x, 1)

    trace1 = pm.inference.utils.trace_to_arviz(
        {"model/x": tf.ones((7, 1), dtype="float32")})

    trace2 = pm.inference.utils.trace_to_arviz({
        "model/x":
        tf.ones((1, 5), dtype="float32"),
        "model/y":
        tf.zeros((1, 1), dtype="float32")
    })
    with pytest.raises(EvaluationError):
        pm.sample_posterior_predictive(model(), trace1)
    with pytest.raises(EvaluationError):
        pm.sample_posterior_predictive(model(), trace2)
Esempio n. 6
0
def test_sample_posterior_predictive(posterior_predictive_fixture):
    (
        model,
        observed,
        core_ppc_shapes,
        observed_in_RV,
        trace,
        num_samples,
        num_chains,
    ) = posterior_predictive_fixture

    if observed_in_RV:
        observed_kwarg = None
    else:
        observed_kwarg = observed
    ppc = pm.sample_posterior_predictive(
        model(), trace, observed=observed_kwarg).posterior_predictive
    assert set(sorted(list(ppc))) == set(observed)
    assert np.all([
        v.shape == (num_chains, num_samples) + observed[k].shape
        for k, v in ppc.items()
    ])
Esempio n. 7
0
def test_data_from_model(model, modelParams, params_dict, to_return=None):
    """
    Generates a test dataset from our model! Needs some pregenerated
    data file which is quite strange but only uses it to cast the right dimensions!

    Parameters
    ----------
    model: pymc4 model
        The model to sample from
    modelParams: :py:class:`covid19_npis.ModelParams`
        Instance of modelParams, mainly used for number of age groups and
        number of countries.
    params_dict: dictionary
        Parameters for the test run.
    to_return: list str
        Return these variables

    Returns
    -------
    : new_cases_inferred, R_t, interventions
    """
    # ------------------------------------------------------------------------------ #
    # Set params for the test dataset
    # ------------------------------------------------------------------------------ #
    len_gen_interv_kernel = 12
    num_interventions = 2

    if to_return is None:
        to_return = []

    model_name = model(modelParams).name

    dict_with_model_name = {
        f"{model_name}/{key}": tf.cast(value, "float32")[tf.newaxis,
                                                         tf.newaxis]
        for key, value in params_dict.items()
    }
    trace = az.from_dict(posterior=dict_with_model_name, )

    var_names = []
    # variables = list(
    #    set(
    #    #    ["R_0", "new_cases_inferred", "R_t", "g", "d_i_c_p", "new_I_t", "h_0_t",]
    #        to_return
    #    )
    # )
    variables = to_return

    for var in variables:
        var_names.append(f"{model_name}/{var}")

    # Sample
    trace = pm.sample_posterior_predictive(
        model(modelParams),
        trace,
        var_names=var_names,
        use_auto_batching=False,
    )

    # Convert to pandas
    _, sample_state = pm.evaluate_model(model(modelParams))

    def convert_to_pandas(key):
        df = data.convert_trace_to_dataframe(
            trace,
            sample_state=sample_state,
            key=key,
            data_type="posterior_predictive",
        )
        df.index = df.index.droplevel(["chain", "draw"])
        if "time" in df.index.names:
            df = df.stack().unstack(level="time").T
            df.columns = df.columns.droplevel(
                -1)  # stack adds an additional dimension
            df.index = df.index.rename("date")
        return df

    """
    new_cases_inferred = convert_to_pandas("new_cases_inferred")
    R_t = convert_to_pandas("R_t")
    d = data.convert_trace_to_dataframe(
        trace,
        sample_state=sample_state,
        key="d_i_c_p",
        data_type="posterior_predictive",
    )

    # Take intervention array from model params
    for c, country in enumerate(modelParams.countries):
        if c == 0:
            interv = country.data_interventions
        else:
            interv = interv.join(country.data_interventions)

    extra = []
    for var in to_return:
        extra.append(convert_to_pandas(var))
    """
    dfs = [convert_to_pandas(var) for var in variables]

    return dfs