Example #1
0
    def test_deterministic_of_observed_modified_interface(self):
        rng = np.random.RandomState(4982)

        meas_in_1 = pm.aesaraf.floatX(2 + 4 * rng.randn(100))
        meas_in_2 = pm.aesaraf.floatX(5 + 4 * rng.randn(100))
        with pm.Model(rng_seeder=rng) as model:
            mu_in_1 = pm.Normal("mu_in_1", 0, 1, initval=0)
            sigma_in_1 = pm.HalfNormal("sd_in_1", 1, initval=1)
            mu_in_2 = pm.Normal("mu_in_2", 0, 1, initval=0)
            sigma_in_2 = pm.HalfNormal("sd__in_2", 1, initval=1)

            in_1 = pm.Normal("in_1", mu_in_1, sigma_in_1, observed=meas_in_1)
            in_2 = pm.Normal("in_2", mu_in_2, sigma_in_2, observed=meas_in_2)
            out_diff = in_1 + in_2
            pm.Deterministic("out", out_diff)

            trace = pm.sample(
                100,
                return_inferencedata=False,
                compute_convergence_checks=False,
            )
            varnames = [v for v in trace.varnames if v != "out"]
            ppc_trace = [
                dict(zip(varnames, row)) for row in zip(*(trace.get_values(v) for v in varnames))
            ]
            ppc = pm.sample_posterior_predictive(
                return_inferencedata=False,
                model=model,
                trace=ppc_trace,
                samples=len(ppc_trace),
                var_names=[x.name for x in (model.deterministics + model.basic_RVs)],
            )

            rtol = 1e-5 if aesara.config.floatX == "float64" else 1e-3
            npt.assert_allclose(ppc["in_1"] + ppc["in_2"], ppc["out"], rtol=rtol)
Example #2
0
    def setup_class(self):
        super().setup_class()
        self.data = np.random.normal(loc=0, scale=1, size=1000)

        with pm.Model() as self.SMABC_test:
            a = pm.Normal("a", mu=0, sigma=1)
            b = pm.HalfNormal("b", sigma=1)
            s = pm.Simulator("s", self.normal_sim, a, b, sum_stat="sort", observed=self.data)
            self.s = s

        with pm.Model() as self.SMABC_potential:
            a = pm.Normal("a", mu=0, sigma=1, initval=0.5)
            b = pm.HalfNormal("b", sigma=1)
            c = pm.Potential("c", pm.math.switch(a > 0, 0, -np.inf))
            s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data)
Example #3
0
def test_sample_deterministic():
    with pm.Model() as model:
        x = pm.HalfNormal("x", 1)
        y = pm.Deterministic("y", x + 100)
        idata = pm.sample(chains=1, draws=50, compute_convergence_checks=False)

    np.testing.assert_allclose(idata.posterior["y"], idata.posterior["x"] + 100)
Example #4
0
def build_constant_model(data, sigma_squared=10.0):
    tau = 1.0 / sigma_squared
    B_0 = pm.Normal('B_0', mu=0.0, tau=tau, doc='mean', rseed=0)
    SI = pm.HalfNormal('SI', tau=tau, doc='sigma', rseed=0)
    OUT = pm.Normal('accuracies', mu=B_0, tau=1.0/SI**2, value=data, \
                                        observed=True, rseed=0)
    return [B_0, SI, OUT]
Example #5
0
def test_transform_samples():
    aesara.config.on_opt_error = "raise"
    np.random.seed(13244)

    obs = np.random.normal(10, 2, size=100)
    obs_at = aesara.shared(obs, borrow=True, name="obs")
    with pm.Model() as model:
        a = pm.Uniform("a", -20, 20)
        sigma = pm.HalfNormal("sigma")
        b = pm.Normal("b", a, sigma=sigma, observed=obs_at)

        trace = sample_numpyro_nuts(chains=1, random_seed=1322, keep_untransformed=True)

    log_vals = trace.posterior["sigma_log__"].values

    trans_vals = trace.posterior["sigma"].values
    assert np.allclose(np.exp(log_vals), trans_vals)

    assert 8 < trace.posterior["a"].mean() < 11
    assert 1.5 < trace.posterior["sigma"].mean() < 2.5

    obs_at.set_value(-obs)
    with model:
        trace = sample_numpyro_nuts(chains=2, random_seed=1322, keep_untransformed=False)

    assert -11 < trace.posterior["a"].mean() < -8
    assert 1.5 < trace.posterior["sigma"].mean() < 2.5
Example #6
0
    def model_pymc(self, guess_value_for_pymc=None):

        xdata1, ydata1, ydata_not_normalized, norm_const = self.fittingdata()
        x = xdata1
        f = ydata1

        # if the selected model in PieceWise then we take the default values
        # using the curve_fit as input, if
        # if guess values are not provided then we calculate them using curve fit

        if guess_value_for_pymc is None:
            Fit_params_Piecewise, pcov = self.model_fitting_parameters(
                function_to_fit="Piecewise", bounds=self.bounds)
            # print("Test Successful")
        else:
            Fit_params_Piecewise = guess_value_for_pymc
            # print("using stored values")
        p0_center = Fit_params_Piecewise[0]  # mean
        p0_width = Fit_params_Piecewise[1]  # sigma
        p0_slope = Fit_params_Piecewise[2]  # alpha
        p0_TP = Fit_params_Piecewise[3]  # Transition point

        # if the curvy fit routines are not senstive to the changes
        # then the intial conditions are imposed by hand

        if Fit_params_Piecewise[3] >= np.max(
                xdata1) or Fit_params_Piecewise[2] > 0:
            p0_TP = x[np.where(f == max(f))][0] + np.std(x)
            p0_slope = -3.

        p0 = [p0_center, p0_width, p0_slope, p0_TP]

        #     A1 = pymc.Uniform("A1", 2.,6., value = p0[0])
        x0 = pymc.Uniform("x0", p0[0] - .4, p0[0] + .4, value=p0[0])
        sigma = pymc.Uniform("sigma", -0.01, 1., value=p0[1])
        alpha = pymc.Uniform("alpha", -7., 0., value=p0[2])
        TP_min = pymc.Uniform("TP_min", p0[3] - .4, p0[3] + .8, value=p0[3])

        @pymc.deterministic(plot=False)
        def function(x=x, x0=x0, sigma=sigma, alpha=alpha, TP_min=TP_min):
            xG = np.array([xx for xx in x if xx <= TP_min])

            def F1(xG):
                p1 = np.log10(np.log(10) * 1 / (np.sqrt(2 * np.pi) * sigma))
                p2 = np.log(10**xG)
                p3 = p1 - (p2 - x0)**2 / (2 * np.log(10) * sigma**2)
                return p3

            xL = np.array([xx for xx in x if xx > TP_min])
            A2 = F1(TP_min) - alpha * TP_min

            def F2(xL):
                return alpha * xL + A2

            return np.concatenate((F1(xG), F2(xL)))

        # self.lognormal_powerlaw_function(xdata=x, x0=x0, sigma=sigma, alpha=alpha, TP_min=TP_min)
        s = pymc.HalfNormal('s', tau=1)
        y = pymc.Normal("y", mu=function, tau=1 / s**2, value=f, observed=True)
        return locals()
def model(x, f, p0):
    #     A1 = pymc.Uniform("A1", 2.,6., value = p0[0])
    x0 = pymc.Uniform("x0", p0[0] - .4, p0[0] + .4, value=p0[0])
    sigma = pymc.Uniform("sigma", -0.01, 1., value=p0[1])
    alpha = pymc.Uniform("alpha", -7., 0., value=p0[2])
    TP_min = pymc.Uniform("TP_min", p0[3] - .4, p0[3] + .8, value=p0[3])

    @pymc.deterministic(plot=False)
    def function(x=x, x0=x0, sigma=sigma, alpha=alpha, TP_min=TP_min):
        xG = np.array([xx for xx in x if xx <= TP_min])

        def F1(xG):
            p1 = np.log10(np.log(10) * 1 / (np.sqrt(2 * np.pi) * sigma))
            p2 = np.log(10**xG)
            p3 = p1 - (p2 - x0)**2 / (2 * np.log(10) * sigma**2)
            return p3

        xL = np.array([xx for xx in x if xx > TP_min])
        A2 = F1(TP_min) - alpha * TP_min

        def F2(xL):
            return alpha * xL + A2

        return np.concatenate((F1(xG), F2(xL)))

    s = pymc.HalfNormal('s', tau=1)
    #         return F1(xG)
    #     y = pymc.Normal("y", mu=function,tau = 1./f_err**2, value = f, observed = True)
    y = pymc.Normal("y", mu=function, tau=1 / s**2, value=f, observed=True)
    return locals()
Example #8
0
def model_with_dims():
    with pm.Model(
            coords={"city": ["Aachen", "Maastricht", "London", "Bergheim"]
                    }) as pmodel:
        economics = pm.Uniform("economics", lower=-1, upper=1, shape=(1, ))

        population = pm.HalfNormal("population", sigma=5, dims=("city"))

        time = pm.ConstantData("time", [2014, 2015, 2016], dims="year")

        n = pm.Deterministic("tax revenue",
                             economics * population[None, :] * time[:, None],
                             dims=("year", "city"))

        yobs = pm.MutableData("observed", np.ones((3, 4)))
        L = pm.Normal("L", n, observed=yobs)

    compute_graph = {
        "economics": set(),
        "population": set(),
        "time": set(),
        "tax revenue": {"economics", "population", "time"},
        "L": {"tax revenue"},
        "observed": {"L"},
    }
    plates = {
        "1": {"economics"},
        "city (4)": {"population"},
        "year (3)": {"time"},
        "year (3) x city (4)": {"tax revenue"},
        "3 x 4": {"L", "observed"},
    }

    return pmodel, compute_graph, plates
Example #9
0
 def test_zeroinflatedpoisson(self):
     with pm.Model():
         theta = pm.Beta("theta", alpha=1, beta=1)
         psi = pm.HalfNormal("psi", sd=1)
         pm.ZeroInflatedPoisson("suppliers", psi=psi, theta=theta, size=20)
         gen_data = pm.sample_prior_predictive(samples=5000)
         assert gen_data.prior["theta"].shape == (1, 5000)
         assert gen_data.prior["psi"].shape == (1, 5000)
         assert gen_data.prior["suppliers"].shape == (1, 5000, 20)
def aevb_model():
    with pm.Model() as model:
        pm.HalfNormal("x", size=(2, ), total_size=5)
        pm.Normal("y", size=(2, ))
    x = model.x
    y = model.y
    xr = model.compute_initial_point(0)[model.rvs_to_values[x].name]
    mu = aesara.shared(xr)
    rho = aesara.shared(np.zeros_like(xr))
    return {"model": model, "y": y, "x": x, "replace": dict(mu=mu, rho=rho)}
Example #11
0
def test_missing_data():
    X = np.random.normal(0, 1, size=(2, 50)).T
    Y = np.random.normal(0, 1, size=50)
    X[10:20, 0] = np.nan

    with pm.Model() as model:
        mu = pm.BART("mu", X, Y, m=10)
        sigma = pm.HalfNormal("sigma", 1)
        y = pm.Normal("y", mu, sigma, observed=Y)
        idata = pm.sample(random_seed=3415)
Example #12
0
    def test_transformed_vars(self):
        # Test that prior predictive returns transformation of RVs when these are
        # passed explicitly in `var_names`

        def ub_interval_forward(x, ub):
            # Interval transform assuming lower bound is zero
            return np.log(x - 0) - np.log(ub - x)

        with pm.Model(rng_seeder=123) as model:
            ub = pm.HalfNormal("ub", 10)
            x = pm.Uniform("x", 0, ub)

            prior = pm.sample_prior_predictive(
                var_names=["ub", "ub_log__", "x", "x_interval__"],
                samples=10,
            )

        # Check values are correct
        assert np.allclose(prior.prior["ub_log__"].data, np.log(prior.prior["ub"].data))
        assert np.allclose(
            prior.prior["x_interval__"].data,
            ub_interval_forward(prior.prior["x"].data, prior.prior["ub"].data),
        )

        # Check that it works when the original RVs are not mentioned in var_names
        with pm.Model(rng_seeder=123) as model_transformed_only:
            ub = pm.HalfNormal("ub", 10)
            x = pm.Uniform("x", 0, ub)

            prior_transformed_only = pm.sample_prior_predictive(
                var_names=["ub_log__", "x_interval__"],
                samples=10,
            )
        assert (
            "ub" not in prior_transformed_only.prior.data_vars
            and "x" not in prior_transformed_only.prior.data_vars
        )
        assert np.allclose(
            prior.prior["ub_log__"].data, prior_transformed_only.prior["ub_log__"].data
        )
        assert np.allclose(
            prior.prior["x_interval__"], prior_transformed_only.prior["x_interval__"].data
        )
Example #13
0
 def test_start(self):
     with pm.Model() as model:
         a = pm.Poisson("a", 5)
         b = pm.HalfNormal("b", 10)
         y = pm.Normal("y", a, b, observed=[1, 2, 3, 4])
         start = {
             "a": np.random.poisson(5, size=500),
             "b_log__": np.abs(np.random.normal(0, 10, size=500)),
         }
         trace = pm.sample_smc(500, chains=1, start=start)
Example #14
0
    def test_normal_model(self):
        data = st.norm(10, 0.5).rvs(1000, random_state=self.get_random_state())
        with pm.Model() as m:
            mu = pm.Normal("mu", 0, 3)
            sigma = pm.HalfNormal("sigma", 1)
            y = pm.Normal("y", mu, sigma, observed=data)
            idata = pm.sample_smc(draws=2000, kernel=pm.smc.MH)

        post = idata.posterior.stack(sample=("chain", "draw"))
        assert np.abs(post["mu"].mean() - 10) < 0.1
        assert np.abs(post["sigma"].mean() - 0.5) < 0.05
Example #15
0
    def test_variable_type(self):
        with pm.Model() as model:
            mu = pm.HalfNormal("mu", 1)
            a = pm.Normal("a", mu=mu, sigma=2, observed=np.array([1, 2]))
            b = pm.Poisson("b", mu, observed=np.array([1, 2]))
            trace = pm.sample(compute_convergence_checks=False, return_inferencedata=False)

        with model:
            ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, samples=1)
            assert ppc["a"].dtype.kind == "f"
            assert ppc["b"].dtype.kind == "i"
Example #16
0
def test_nested_model_coords():
    with pm.Model(name="m1", coords=dict(dim1=range(2))) as m1:
        a = pm.Normal("a", dims="dim1")
        with pm.Model(name="m2", coords=dict(dim2=range(4))) as m2:
            b = pm.Normal("b", dims="dim1")
            m1.add_coord("dim3", range(4))
            c = pm.HalfNormal("c", dims="dim3")
            d = pm.Normal("d", b, c, dims="dim2")
        e = pm.Normal("e", a[None] + d[:, None], dims=("dim2", "dim1"))
    assert m1.coords is m2.coords
    assert m1.dim_lengths is m2.dim_lengths
    assert set(m2.RV_dims) < set(m1.RV_dims)
Example #17
0
    def test_named_model(self):
        # Named models used to fail with Simulator because the arguments to the
        # random fn used to be passed by name. This is no longer true.
        # https://github.com/pymc-devs/pymc/pull/4365#issuecomment-761221146
        name = "NamedModel"
        with pm.Model(name=name):
            a = pm.Normal("a", mu=0, sigma=1)
            b = pm.HalfNormal("b", sigma=1)
            s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data)

            trace = pm.sample_smc(draws=10, chains=2, return_inferencedata=False)
            assert f"{name}/a" in trace.varnames
            assert f"{name}/b" in trace.varnames
            assert f"{name}/b_log__" in trace.varnames
Example #18
0
def fixture_model():
    with pm.Model() as model:
        n = 5
        dim = 4
        with pm.Model():
            cov = pm.InverseGamma("cov", alpha=1, beta=1)
            x = pm.Normal("x",
                          mu=np.ones((dim, )),
                          sigma=pm.math.sqrt(cov),
                          shape=(n, dim))
            eps = pm.HalfNormal("eps", np.ones((n, 1)), shape=(n, dim))
            mu = pm.Deterministic("mu", at.sum(x + eps, axis=-1))
            y = pm.Normal("y", mu=mu, sigma=1, shape=(n, ))
    return model, [cov, x, eps, y]
def test_iterator():
    with pm.Model() as model:
        a = pm.Normal("a", shape=1)
        b = pm.HalfNormal("b")
        step1 = pm.NUTS([model.rvs_to_values[a]])
        step2 = pm.Metropolis([model.rvs_to_values[b]])

    step = pm.CompoundStep([step1, step2])

    start = {"a": floatX(np.array([1.0])), "b_log__": floatX(np.array(2.0))}
    sampler = ps.ParallelSampler(10, 10, 3, 2, [2, 3, 4], [start] * 3, step, 0, False)
    with sampler:
        for draw in sampler:
            pass
Example #20
0
class TestUtils:
    X = np.random.normal(0, 1, size=(2, 50)).T
    Y = np.random.normal(0, 1, size=50)

    with pm.Model() as model:
        mu = pm.BART("mu", X, Y, m=10)
        sigma = pm.HalfNormal("sigma", 1)
        y = pm.Normal("y", mu, sigma, observed=Y)
        idata = pm.sample(random_seed=3415)

    def test_predict(self):
        rng = RandomState(12345)
        pred_all = pm.bart.utils.predict(self.idata, rng, size=2)
        rng = RandomState(12345)
        pred_first = pm.bart.utils.predict(self.idata, rng, X_new=self.X[:10])

        assert_almost_equal(pred_first, pred_all[0, :10], decimal=4)
        assert pred_all.shape == (2, 50)
        assert pred_first.shape == (10, )

    @pytest.mark.parametrize(
        "kwargs",
        [
            {},
            {
                "kind": "pdp",
                "samples": 2,
                "xs_interval": "quantiles",
                "xs_values": [0.25, 0.5, 0.75],
            },
            {
                "kind": "ice",
                "instances": 2
            },
            {
                "var_idx": [0],
                "rug": False,
                "smooth": False,
                "color": "k"
            },
            {
                "grid": (1, 2),
                "sharey": "none",
                "alpha": 1
            },
        ],
    )
    def test_pdp(self, kwargs):
        pm.bart.utils.plot_dependence(self.idata, X=self.X, Y=self.Y, **kwargs)
Example #21
0
    def test_deterministic_of_observed(self):
        rng = np.random.RandomState(8442)

        meas_in_1 = pm.aesaraf.floatX(2 + 4 * rng.randn(10))
        meas_in_2 = pm.aesaraf.floatX(5 + 4 * rng.randn(10))
        nchains = 2
        with pm.Model(rng_seeder=rng) as model:
            mu_in_1 = pm.Normal("mu_in_1", 0, 2)
            sigma_in_1 = pm.HalfNormal("sd_in_1", 1)
            mu_in_2 = pm.Normal("mu_in_2", 0, 2)
            sigma_in_2 = pm.HalfNormal("sd__in_2", 1)

            in_1 = pm.Normal("in_1", mu_in_1, sigma_in_1, observed=meas_in_1)
            in_2 = pm.Normal("in_2", mu_in_2, sigma_in_2, observed=meas_in_2)
            out_diff = in_1 + in_2
            pm.Deterministic("out", out_diff)

            trace = pm.sample(
                100,
                chains=nchains,
                return_inferencedata=False,
                compute_convergence_checks=False,
            )

            rtol = 1e-5 if aesara.config.floatX == "float64" else 1e-4

            ppc = pm.sample_posterior_predictive(
                return_inferencedata=False,
                model=model,
                trace=trace,
                samples=len(trace) * nchains,
                random_seed=0,
                var_names=[var.name for var in (model.deterministics + model.basic_RVs)],
            )

            npt.assert_allclose(ppc["in_1"] + ppc["in_2"], ppc["out"], rtol=rtol)
Example #22
0
def test_bart_vi():
    X = np.random.normal(0, 1, size=(3, 250)).T
    Y = np.random.normal(0, 1, size=250)
    X[:, 0] = np.random.normal(Y, 0.1)

    with pm.Model() as model:
        mu = pm.BART("mu", X, Y, m=10)
        sigma = pm.HalfNormal("sigma", 1)
        y = pm.Normal("y", mu, sigma, observed=Y)
        idata = pm.sample(random_seed=3415)
        var_imp = (idata.sample_stats["variable_inclusion"].stack(
            samples=("chain", "draw")).mean("samples"))
        var_imp /= var_imp.sum()
        assert var_imp[0] > var_imp[1:].sum()
        assert_almost_equal(var_imp.sum(), 1)
Example #23
0
    def test_density_dist(self):
        obs = np.random.normal(-1, 0.1, size=10)
        with pm.Model():
            mu = pm.Normal("mu", 0, 1)
            sd = pm.HalfNormal("sd", 1e-6)
            a = pm.DensityDist(
                "a",
                mu,
                sd,
                random=lambda mu, sd, rng=None, size=None: rng.normal(loc=mu, scale=sd, size=size),
                observed=obs,
            )
            prior = pm.sample_prior_predictive(return_inferencedata=False)

        npt.assert_almost_equal((prior["a"] - prior["mu"][..., None]).mean(), 0, decimal=3)
Example #24
0
def test_get_log_likelihood():
    obs = np.random.normal(10, 2, size=100)
    obs_at = aesara.shared(obs, borrow=True, name="obs")
    with pm.Model() as model:
        a = pm.Normal("a", 0, 2)
        sigma = pm.HalfNormal("sigma")
        b = pm.Normal("b", a, sigma=sigma, observed=obs_at)

        trace = pm.sample(tune=10, draws=10, chains=2, random_seed=1322)

    b_true = trace.log_likelihood.b.values
    a = np.array(trace.posterior.a)
    sigma_log_ = np.log(np.array(trace.posterior.sigma))
    b_jax = _get_log_likelihood(model, [a, sigma_log_])["b"]

    assert np.allclose(b_jax.reshape(-1), b_true.reshape(-1))
Example #25
0
def test_init_jitter(initval, jitter_max_retries, expectation):
    with pm.Model() as m:
        pm.HalfNormal("x", transform=None, initval=initval)

    with expectation:
        # Starting value is negative (invalid) when np.random.rand returns 0 (jitter = -1)
        # and positive (valid) when it returns 1 (jitter = 1)
        with mock.patch("numpy.random.Generator.uniform", side_effect=[-1, -1, -1, 1, -1]):
            start = pm.sampling._init_jitter(
                model=m,
                initvals=None,
                seeds=[1],
                jitter=True,
                jitter_max_retries=jitter_max_retries,
            )
            m.check_start_vals(start)
Example #26
0
def check_exec_nuts_init(method):
    with pm.Model() as model:
        pm.Normal("a", mu=0, sigma=1, size=2)
        pm.HalfNormal("b", sigma=1)
    with model:
        start, _ = pm.init_nuts(init=method, n_init=10, seeds=[1])
        assert isinstance(start, list)
        assert len(start) == 1
        assert isinstance(start[0], dict)
        assert model.a.tag.value_var.name in start[0]
        assert model.b.tag.value_var.name in start[0]
        start, _ = pm.init_nuts(init=method, n_init=10, chains=2, seeds=[1, 2])
        assert isinstance(start, list)
        assert len(start) == 2
        assert isinstance(start[0], dict)
        assert model.a.tag.value_var.name in start[0]
        assert model.b.tag.value_var.name in start[0]
Example #27
0
    def test_custom_dist_sum_stat(self):
        with pm.Model() as m:
            a = pm.Normal("a", mu=0, sigma=1)
            b = pm.HalfNormal("b", sigma=1)
            s = pm.Simulator(
                "s",
                self.normal_sim,
                a,
                b,
                distance=self.abs_diff,
                sum_stat=self.quantiles,
                observed=self.data,
            )

        assert self.count_rvs(m.logpt) == 1

        with m:
            pm.sample_smc(draws=100)
Example #28
0
def test_bart_random():
    X = np.random.normal(0, 1, size=(2, 50)).T
    Y = np.random.normal(0, 1, size=50)

    with pm.Model() as model:
        mu = pm.BART("mu", X, Y, m=10)
        sigma = pm.HalfNormal("sigma", 1)
        y = pm.Normal("y", mu, sigma, observed=Y)
        idata = pm.sample(random_seed=3415, chains=1)

    rng = RandomState(12345)
    pred_all = mu.owner.op.rng_fn(rng, size=2)
    rng = RandomState(12345)
    pred_first = mu.owner.op.rng_fn(rng, X_new=X[:10])

    assert_almost_equal(pred_first, pred_all[0, :10], decimal=4)
    assert pred_all.shape == (2, 50)
    assert pred_first.shape == (10, )
Example #29
0
def build_linear_model(x_vals, data, sigma_squared=10.0):
    tau = 1.0 / sigma_squared
    B_0 = pm.Normal('B_0', mu=0.0, tau=tau, doc='line slope', rseed=0)
    B_1 = pm.Normal('B_1', mu=0.0, tau=tau, doc='line intercept', rseed=0)
    SI = pm.HalfNormal('SI', tau=tau, doc='line sigma', rseed=0)
    MU = pm.Deterministic(
        name='mus',
        eval=lambda B_0, B_1: B_0 * x_vals + B_1,
        parents={
            'B_0': B_0,
            'B_1': B_1
        },
        doc='mu for line',
        plot=False,
        # rseed=0  NOTE: PyMC version 2.3.6 throws an error here.
    )
    OUT = pm.Normal('accuracies', mu=MU, tau=1.0/SI**2, value=data, \
                                        observed=True, rseed=0)
    return [B_0, B_1, SI, MU, OUT]
Example #30
0
    def test_multiobservedrv_to_observed_data(self, multiobs):
        # fake regression data, with weights (W)
        np.random.seed(2019)
        N = 100
        X = np.random.uniform(size=N)
        W = 1 + np.random.poisson(size=N)
        a, b = 5, 17
        Y = a + np.random.normal(b * X)

        with pm.Model():
            a = pm.Normal("a", 0, 10)
            b = pm.Normal("b", 0, 10)
            mu = a + b * X
            sigma = pm.HalfNormal("sigma", 1)
            w = W

            def weighted_normal(value, mu, sigma, w):
                return w * pm.Normal.logp(value, mu, sigma)

            y_logp = pm.DensityDist(  # pylint: disable=unused-variable
                "y_logp",
                mu,
                sigma,
                w,
                logp=weighted_normal,
                observed=Y,
                size=N)
            idata = pm.sample(20,
                              tune=20,
                              return_inferencedata=True,
                              idata_kwargs={"density_dist_obs": multiobs})
        multiobs_str = "" if multiobs else "~"
        test_dict = {
            "posterior": ["a", "b", "sigma"],
            "sample_stats": ["lp"],
            "log_likelihood": ["y_logp"],
            f"{multiobs_str}observed_data": ["y", "w"],
        }
        fails = check_multiple_attrs(test_dict, idata)
        assert not fails
        if multiobs:
            assert idata.observed_data.y.dtype.kind == "f"