Example #1
0
def test_proceed_deterministic_failure_on_unnamed_deterministic():
    @pm.model
    def model():
        x = yield pm.Normal("x", 0, 1)
        yield pm.Deterministic(None, x)

    with pytest.raises(EvaluationError):
        pm.evaluate_model(model())
Example #2
0
def test_raise_if_return_distribution():
    def invalid_model():
        yield pm.distributions.Normal("n1", 0, 1)
        return pm.distributions.Normal("n2", 0, 1)

    with pytest.raises(pm.flow.executor.EvaluationError) as e:
        pm.evaluate_model(invalid_model())
    assert e.match("should not contain")
Example #3
0
def test_deterministic_with_distribution_name_fails():
    @pm.model
    def model():
        x = yield pm.Normal("x", 0, 1)
        det = yield pm.Deterministic("x", x)
        return det

    with pytest.raises(pm.flow.executor.EvaluationError):
        pm.evaluate_model(model())
Example #4
0
def test_evaluate_model_failure_on_state_and_values():
    values = {"model/x": 1}
    st = pm.flow.executor.SamplingState(observed_values=values)

    @pm.model
    def model():
        yield pm.Normal("x", 0, 1)

    with pytest.raises(ValueError):
        pm.evaluate_model(model(), state=st, values=values)
Example #5
0
def test_unable_to_create_duplicate_variable():
    def invdalid_model():
        yield pm.distributions.HalfNormal("n", 1, transform=pm.distributions.transforms.Log())
        yield pm.distributions.Normal("n", 0, 1)

    with pytest.raises(pm.flow.executor.EvaluationError) as e:
        pm.evaluate_model(invdalid_model())
    assert e.match("duplicate")
    with pytest.raises(pm.flow.executor.EvaluationError) as e:
        pm.evaluate_model_transformed(invdalid_model())
    assert e.match("duplicate")
Example #6
0
def test_incompatible_observed_shape():
    @pm.model
    def model(observed):
        a = yield pm.Normal("a", 0, [1, 2], observed=observed)

    observed_value = np.arange(3, dtype="float32")

    with pytest.raises(EvaluationError):
        pm.evaluate_model(model(None), observed={"model/a": observed_value})

    with pytest.raises(EvaluationError):
        pm.evaluate_model(model(observed_value))
Example #7
0
def test_unnamed_return_2():
    @pm.model(name=None)
    def a_model():
        return (yield pm.HalfNormal("n", 1, transform=pm.distributions.transforms.Log()))

    _, state = pm.evaluate_model(a_model(name="b_model"))
    assert "b_model" in state.deterministics_values

    with pytest.raises(pm.flow.executor.EvaluationError) as e:
        pm.evaluate_model(a_model())
    assert e.match("unnamed")

    with pytest.raises(pm.flow.executor.EvaluationError) as e:
        pm.evaluate_model_transformed(a_model())
    assert e.match("unnamed")
Example #8
0
def test_uncatched_exception_works():
    @pm.model
    def a_model():
        try:
            yield 1
        except:
            pass
        yield pm.distributions.HalfNormal("n", 1, transform=pm.distributions.transforms.Log())

    with pytest.raises(pm.flow.executor.StopExecution) as e:
        pm.evaluate_model(a_model())
    assert e.match("something_bad")

    with pytest.raises(pm.flow.executor.StopExecution) as e:
        pm.evaluate_model_transformed(a_model())
    assert e.match("something_bad")
Example #9
0
def test_unreduced_log_prob(fixture_batch_shapes):
    observed_value = np.ones(10, dtype="float32")

    @pm.model
    def model():
        a = yield pm.Normal("a", 0, 1)
        b = yield pm.HalfNormal("b", 1)
        c = yield pm.Normal("c",
                            loc=a,
                            scale=b,
                            event_stack=len(observed_value))

    values = {
        "model/a": np.zeros(fixture_batch_shapes, dtype="float32"),
        "model/b": np.ones(fixture_batch_shapes, dtype="float32"),
    }
    observed = {
        "model/c":
        np.broadcast_to(observed_value,
                        fixture_batch_shapes + observed_value.shape)
    }
    state = pm.evaluate_model(model(), values=values, observed=observed)[1]
    unreduced_log_prob = state.collect_unreduced_log_prob()
    assert unreduced_log_prob.numpy().shape == fixture_batch_shapes
    np.testing.assert_allclose(tf.reduce_sum(unreduced_log_prob),
                               state.collect_log_prob())
Example #10
0
def test_as_sampling_state_works_observed_is_constrained(
        complex_model_with_observed):
    _, state = pm.evaluate_model(complex_model_with_observed())
    sampling_state = state.as_sampling_state()
    assert not sampling_state.transformed_values
    assert set(sampling_state.observed_values) == {"complex_model/a/n"}
    assert set(sampling_state.untransformed_values) == {"complex_model/n"}
Example #11
0
def test_complex_model_no_keep_return(complex_model):
    _, state = pm.evaluate_model(complex_model())

    assert set(state.untransformed_values) == {"complex_model/n", "complex_model/a/n"}
    assert set(state.deterministics_values) == {"complex_model/a"}
    assert not state.transformed_values  # we call untransformed executor
    assert not state.observed_values
Example #12
0
def test_differently_shaped_logp():
    def model():
        yield dist.Normal("n1", np.zeros(10), np.ones(10))
        yield dist.Normal("n2", np.zeros(3), np.ones(3))

    _, state = pm.evaluate_model(model())
    state.collect_log_prob()  # this should work
Example #13
0
def test_observed_cant_mix_with_untransformed_and_raises_an_error_case_untransformed_executor(
    transformed_model_with_observed, ):
    with pytest.raises(pm.flow.executor.EvaluationError) as e:
        _, state = pm.evaluate_model(transformed_model_with_observed(),
                                     values=dict(n=0.0))
    assert e.match("{'n': None}")
    assert e.match("'n' from untransformed values")
Example #14
0
def test_complex_model_keep_return():
    @pm.model
    def nested_model(cond):
        norm = yield dist.HalfNormal("n",
                                     cond**2,
                                     transform=dist.transforms.Log())
        return norm

    @pm.model()
    def complex_model():
        norm = yield dist.Normal("n", 0, 1)
        result = yield nested_model(norm, name="a")
        return result

    _, state = pm.evaluate_model(complex_model())

    assert set(state.untransformed_values) == {
        "complex_model/n", "complex_model/a/n"
    }
    assert set(state.deterministics) == {
        "complex_model",
        "complex_model/a",
    }
    assert not state.transformed_values  # we call untransformed executor
    assert not state.observed_values
Example #15
0
def test_observed_are_passed_correctly(complex_model_with_observed):
    _, state = pm.evaluate_model(complex_model_with_observed())

    assert set(state.untransformed_values) == {"complex_model/n", "complex_model/a"}
    assert not state.transformed_values  # we call untransformed executor
    assert set(state.observed_values) == {"complex_model/a/n"}
    assert np.allclose(state.all_values["complex_model/a/n"], np.ones(10))
Example #16
0
def test_executor_logp_tensorflow(transformed_model):
    norm = tfd.HalfNormal(1)

    _, state = pm.evaluate_model(transformed_model(), values=dict(n=math.pi))

    np.testing.assert_allclose(state.collect_log_prob(),
                               norm.log_prob(math.pi),
                               equal_nan=False)
Example #17
0
def test_executor_logp_tensorflow(transformed_model):
    tfp = pytest.importorskip("tensorflow_probability")
    tfd = tfp.distributions

    norm = tfd.HalfNormal(1)

    _, state = pm.evaluate_model(transformed_model(), values=dict(n=math.pi))

    np.testing.assert_allclose(state.collect_log_prob(), norm.log_prob(math.pi), equal_nan=False)
Example #18
0
def test_executor_on_conditionally_independent(fixture_batch_shapes):
    @pm.model
    def model():
        a = yield pm.Normal("a", 0, 1, conditionally_independent=True)
        b = yield pm.Normal("b", a, 1)

    _, state = pm.evaluate_model(model(), sample_shape=fixture_batch_shapes)
    assert state.untransformed_values["model/a"].shape == fixture_batch_shapes
    assert state.untransformed_values["model/b"].shape == fixture_batch_shapes
Example #19
0
def test_sampling_state_clone(deterministics_in_nested_models):
    model = deterministics_in_nested_models[0]
    observed = {"model/nested_model/x": 0.0}
    _, state = pm.evaluate_model(model(), observed=observed)
    clone = state.clone()
    assert set(state.all_values) == set(clone.all_values)
    assert all((state.all_values[k] == v for k, v in clone.all_values.items()))
    assert set(state.deterministics) == set(clone.deterministics)
    assert all((state.deterministics[k] == v
                for k, v in clone.deterministics.items()))
    assert state.posterior_predictives == clone.posterior_predictives
Example #20
0
def test_observed_are_set_to_none_for_posterior_predictive_correctly(
        complex_model_with_observed):
    _, state = pm.evaluate_model(complex_model_with_observed(),
                                 observed={"complex_model/a/n": None})

    assert set(state.untransformed_values) == {
        "complex_model/n", "complex_model/a/n"
    }
    assert set(state.deterministics) == {"complex_model/a"}
    assert not state.transformed_values  # we call untransformed executor
    assert not state.observed_values
    assert not np.allclose(state.all_values["complex_model/a/n"], np.ones(10))
Example #21
0
def test_deterministics(model_with_deterministics):
    model, expected_deterministics, expected_ops, expected_ops_inputs = model_with_deterministics
    _, state = pm.evaluate_model(model())

    assert len(state.deterministics) == len(expected_deterministics)
    assert set(expected_deterministics) <= set(state.deterministics)
    for expected_deterministic, op, op_inputs in zip(expected_deterministics,
                                                     expected_ops,
                                                     expected_ops_inputs):
        inputs = [v for k, v in state.all_values.items() if k in op_inputs]
        out = op(*inputs)
        np.testing.assert_allclose(
            state.deterministics[expected_deterministic], out)
Example #22
0
def test_log_prob_elemwise(fixture_model_with_plates):
    model, expected_rv_shapes = fixture_model_with_plates
    _, state = pm.evaluate_model(model())
    log_prob_elemwise = dict(
        zip(state.distributions, state.collect_log_prob_elemwise())
    )  # This will discard potentials in log_prob_elemwise
    log_prob = state.collect_log_prob()
    assert len(log_prob_elemwise) == len(expected_rv_shapes)
    assert all(rv in log_prob_elemwise for rv in expected_rv_shapes)
    assert all(log_prob_elemwise[rv].shape == shape
               for rv, shape in expected_rv_shapes.items())
    assert log_prob.numpy() == sum(
        map(tf.reduce_sum, log_prob_elemwise.values())).numpy()
Example #23
0
def test_simple_model(simple_model):
    _, state = pm.evaluate_model(simple_model())
    assert "n" in state.untransformed_values
    assert not state.observed_values
    assert not state.transformed_values
Example #24
0
def test_single_distribution():
    _, state = pm.evaluate_model(pm.distributions.Normal("n", 0, 1))
    assert "n" in state.all_values
Example #25
0
def test_class_model(class_model):
    """Test that model can be defined as method in an object definition"""
    _, state = pm.evaluate_model(class_model.class_model_method())
    assert "class_model_method/n" in state.untransformed_values
    assert not state.observed_values
    assert not state.transformed_values
Example #26
0
def test_unnamed_distribution_to_prior():
    f = lambda: (yield pm.distributions.Normal.dist(0, 1).prior("n"))
    _, state = pm.evaluate_model(f())
    assert "n" in state.untransformed_values
Example #27
0
def test_unnamed_distribution():
    f = lambda: (yield pm.distributions.Normal.dist(0, 1))
    with pytest.raises(pm.flow.executor.EvaluationError) as e:
        pm.evaluate_model(f())
    assert e.match("anonymous Distribution")
Example #28
0
def test_data_from_model(model, modelParams, params_dict, to_return=None):
    """
    Generates a test dataset from our model! Needs some pregenerated
    data file which is quite strange but only uses it to cast the right dimensions!

    Parameters
    ----------
    model: pymc4 model
        The model to sample from
    modelParams: :py:class:`covid19_npis.ModelParams`
        Instance of modelParams, mainly used for number of age groups and
        number of countries.
    params_dict: dictionary
        Parameters for the test run.
    to_return: list str
        Return these variables

    Returns
    -------
    : new_cases_inferred, R_t, interventions
    """
    # ------------------------------------------------------------------------------ #
    # Set params for the test dataset
    # ------------------------------------------------------------------------------ #
    len_gen_interv_kernel = 12
    num_interventions = 2

    if to_return is None:
        to_return = []

    model_name = model(modelParams).name

    dict_with_model_name = {
        f"{model_name}/{key}": tf.cast(value, "float32")[tf.newaxis,
                                                         tf.newaxis]
        for key, value in params_dict.items()
    }
    trace = az.from_dict(posterior=dict_with_model_name, )

    var_names = []
    # variables = list(
    #    set(
    #    #    ["R_0", "new_cases_inferred", "R_t", "g", "d_i_c_p", "new_I_t", "h_0_t",]
    #        to_return
    #    )
    # )
    variables = to_return

    for var in variables:
        var_names.append(f"{model_name}/{var}")

    # Sample
    trace = pm.sample_posterior_predictive(
        model(modelParams),
        trace,
        var_names=var_names,
        use_auto_batching=False,
    )

    # Convert to pandas
    _, sample_state = pm.evaluate_model(model(modelParams))

    def convert_to_pandas(key):
        df = data.convert_trace_to_dataframe(
            trace,
            sample_state=sample_state,
            key=key,
            data_type="posterior_predictive",
        )
        df.index = df.index.droplevel(["chain", "draw"])
        if "time" in df.index.names:
            df = df.stack().unstack(level="time").T
            df.columns = df.columns.droplevel(
                -1)  # stack adds an additional dimension
            df.index = df.index.rename("date")
        return df

    """
    new_cases_inferred = convert_to_pandas("new_cases_inferred")
    R_t = convert_to_pandas("R_t")
    d = data.convert_trace_to_dataframe(
        trace,
        sample_state=sample_state,
        key="d_i_c_p",
        data_type="posterior_predictive",
    )

    # Take intervention array from model params
    for c, country in enumerate(modelParams.countries):
        if c == 0:
            interv = country.data_interventions
        else:
            interv = interv.join(country.data_interventions)

    extra = []
    for var in to_return:
        extra.append(convert_to_pandas(var))
    """
    dfs = [convert_to_pandas(var) for var in variables]

    return dfs
Example #29
0
def test_transformed_model_untransformed_executor(transformed_model):
    _, state = pm.evaluate_model(transformed_model())

    assert set(state.untransformed_values) == {"n"}
    assert not state.transformed_values  # we call untransformed executor
    assert not state.observed_values
Example #30
0
def test_as_sampling_state_does_not_works_if_untransformed_exec(complex_model):
    _, state = pm.evaluate_model(complex_model())
    with pytest.raises(TypeError) as e:
        state.as_sampling_state()
    e.match("'complex_model/a/__log_n' is not found")