Beispiel #1
0
    def test_sample_after_set_data(self):
        with pm.Model() as model:
            x = pm.Data("x", [1.0, 2.0, 3.0])
            y = pm.Data("y", [1.0, 2.0, 3.0])
            beta = pm.Normal("beta", 0, 10.0)
            pm.Normal("obs", beta * x, np.sqrt(1e-2), observed=y)
            pm.sample(1000, init=None, tune=1000, chains=1)
        # Predict on new data.
        new_x = [5.0, 6.0, 9.0]
        new_y = [5.0, 6.0, 9.0]
        with model:
            pm.set_data(new_data={"x": new_x, "y": new_y})
            new_trace = pm.sample(1000, init=None, tune=1000, chains=1)
            pp_trace = pm.sample_posterior_predictive(new_trace, 1000)
            pp_tracef = pm.fast_sample_posterior_predictive(new_trace, 1000)

        assert pp_trace["obs"].shape == (1000, 3)
        assert pp_tracef["obs"].shape == (1000, 3)
        np.testing.assert_allclose(new_y,
                                   pp_trace["obs"].mean(axis=0),
                                   atol=1e-1)
        np.testing.assert_allclose(new_y,
                                   pp_tracef["obs"].mean(axis=0),
                                   atol=1e-1)
Beispiel #2
0
    def test_implicit_coords_series(self):
        ser_sales = pd.Series(data=np.random.randint(low=0, high=30, size=22),
                              index=pd.date_range(start="2020-05-01",
                                                  periods=22,
                                                  freq="24H",
                                                  name="date"),
                              name="sales")
        with pm.Model() as pmodel:
            pm.Data("sales",
                    ser_sales,
                    dims="date",
                    export_index_as_coords=True)

        assert "date" in pmodel.coords
        assert len(pmodel.coords["date"]) == 22
        assert pmodel.RV_dims == {'sales': ('date', )}
Beispiel #3
0
    def test_autodetect_coords_from_model(self, use_context):
        df_data = pd.DataFrame(columns=["date"]).set_index("date")
        dates = pd.date_range(start="2020-05-01", end="2020-05-20")
        for city, mu in {"Berlin": 15, "San Marino": 18, "Paris": 16}.items():
            df_data[city] = np.random.normal(loc=mu, size=len(dates))
        df_data.index = dates
        df_data.index.name = "date"

        coords = {"date": df_data.index, "city": df_data.columns}
        with pm.Model(coords=coords) as model:
            europe_mean = pm.Normal("europe_mean_temp", mu=15.0, sd=3.0)
            city_offset = pm.Normal("city_offset", mu=0.0, sd=3.0, dims="city")
            city_temperature = pm.Deterministic("city_temperature",
                                                europe_mean + city_offset,
                                                dims="city")

            data_dims = ("date", "city")
            data = pm.Data("data", df_data, dims=data_dims)
            _ = pm.Normal("likelihood",
                          mu=city_temperature,
                          sd=0.5,
                          observed=data,
                          dims=data_dims)

            trace = pm.sample(
                return_inferencedata=False,
                compute_convergence_checks=False,
                cores=1,
                chains=1,
                tune=20,
                draws=30,
                step=pm.Metropolis(),
            )
            if use_context:
                idata = from_pymc3(trace=trace)
        if not use_context:
            idata = from_pymc3(trace=trace, model=model)

        assert "city" in list(idata.posterior.dims)
        assert "city" in list(idata.observed_data.dims)
        assert "date" in list(idata.observed_data.dims)
        np.testing.assert_array_equal(idata.posterior.coords["city"],
                                      coords["city"])
        np.testing.assert_array_equal(idata.observed_data.coords["date"],
                                      coords["date"])
        np.testing.assert_array_equal(idata.observed_data.coords["city"],
                                      coords["city"])
Beispiel #4
0
    def test_ignores_observed(self):
        observed = np.random.normal(10, 1, size=200)
        with pm.Model():
            # Use a prior that's way off to show we're ignoring the observed variables
            observed_data = pm.Data("observed_data", observed)
            mu = pm.Normal("mu", mu=-100, sigma=1)
            positive_mu = pm.Deterministic("positive_mu", np.abs(mu))
            z = -1 - positive_mu
            pm.Normal("x_obs", mu=z, sigma=1, observed=observed_data)
            prior = pm.sample_prior_predictive()

        assert "observed_data" not in prior
        assert (prior["mu"] < 90).all()
        assert (prior["positive_mu"] > 90).all()
        assert (prior["x_obs"] < 90).all()
        assert prior["x_obs"].shape == (500, 200)
        npt.assert_array_almost_equal(prior["positive_mu"], np.abs(prior["mu"]), decimal=4)
Beispiel #5
0
    def get_model_1(self,
                    dataframe,
                    prior_mu_orthogonal=(0.09, 2.5),
                    prior_diff_scale=2.5,
                    prior_sigma=1.0):
        """ Compute marginal-log-likelihood of M5: Synergy is needed to perform a task.
        
        Main effect of projection (parallel > orthogonal).
        Strong synergies in all the tasks. This model implies a bad performance for the additional goal in the
        constrained task.
        
        Prior believe: Unlikely.
        
        :type dataframe: pandas.DataFrame
        :rtype: float
        """
        coords = self.get_coordinates(dataframe)

        with pm.Model(coords=coords) as model:
            model.name = "M1"
            # non-centered priors.
            z_ortho = pm.Normal('z_ortho', mu=0, sigma=1)
            mu_ortho = pm.Deterministic(
                "mu_ortho",
                prior_mu_orthogonal[0] + z_ortho * prior_mu_orthogonal[1])
            # Assume positive difference.
            mu_diff = pm.HalfNormal('mu_diff', sigma=prior_diff_scale)
            mu_parallel = pm.Deterministic('mu_parallel', mu_ortho + mu_diff)

            # Stack priors.
            theta = pm.math.stack((mu_ortho, mu_parallel)).T
            # Model error:
            sigma = pm.Exponential("sigma", lam=prior_sigma, dims='Direction')
            # Observed variable.
            projection_obs = pm.Data("projection_obs",
                                     dataframe[coords['Direction']],
                                     dims=('obs_id', 'Direction'))
            # Using user_idx to index theta somehow tells which prior belongs to which user.
            projection = pm.Normal("projection",
                                   mu=theta,
                                   sigma=sigma,
                                   observed=projection_obs,
                                   dims=('obs_id', 'Direction'))

        self.update_sample_params(model.name, tune=4000, target_accept=0.95)
        return model
Beispiel #6
0
    def test_shared_data_as_rv_input(self):
        """
        Allow pm.Data to be used as input for other RVs.
        See https://github.com/pymc-devs/pymc3/issues/3842
        """
        with pm.Model() as m:
            x = pm.Data("x", [1.0, 2.0, 3.0])
            y = pm.Normal("y", mu=x, size=(2, 3))
            assert y.eval().shape == (2, 3)
            idata = pm.sample(
                chains=1,
                tune=500,
                draws=550,
                return_inferencedata=True,
                compute_convergence_checks=False,
            )
        samples = idata.posterior["y"]
        assert samples.shape == (1, 550, 2, 3)

        np.testing.assert_allclose(np.array([1.0, 2.0, 3.0]),
                                   x.get_value(),
                                   atol=1e-1)
        np.testing.assert_allclose(np.array([1.0, 2.0, 3.0]),
                                   samples.mean(("chain", "draw", "y_dim_0")),
                                   atol=1e-1)

        with m:
            pm.set_data({"x": np.array([2.0, 4.0, 6.0])})
            assert y.eval().shape == (2, 3)
            idata = pm.sample(
                chains=1,
                tune=500,
                draws=620,
                return_inferencedata=True,
                compute_convergence_checks=False,
            )
        samples = idata.posterior["y"]
        assert samples.shape == (1, 620, 2, 3)

        np.testing.assert_allclose(np.array([2.0, 4.0, 6.0]),
                                   x.get_value(),
                                   atol=1e-1)
        np.testing.assert_allclose(np.array([2.0, 4.0, 6.0]),
                                   samples.mean(("chain", "draw", "y_dim_0")),
                                   atol=1e-1)
Beispiel #7
0
def process_numexpr(store, numexp):
    # Base Cases
    if numexp.data == 'number':
        return float(numexp.children[0].value)
    if numexp.data == 'id':
        var = numexp.children[0].value
        rv = store.lookup_rv(var)
        if rv != None:
            return rv
        data = store.lookup_data(var)
        if data is not None:
            # Data is fixed, not a rv, but hack it into rv because it should work
            with store.model:
                d = pm.Data(var, data)
            store.add_rv(var, d)
            return d
    if numexp.data == 'call':
        # In this case, the first child is the identifier of the fn to call, the rest are the args
        children = numexp.children[1:]
    else:
        children = numexp.children

    # Recursive cases
    processed_children = [None] * len(children)
    for i, child in enumerate(children):
        processed_children[i] = process_numexpr(store, child)
    if numexp.data == 'sum':
        return processed_children[0] + processed_children[1]
    elif numexp.data == 'difference':
        return processed_children[0] - processed_children[1]
    elif numexp.data == 'product':
        return processed_children[0] * processed_children[1]
    elif numexp.data == 'quotient':
        return processed_children[0] / processed_children[1]
    elif numexp.data == 'negation':
        return -processed_children[0]
    elif numexp.data == 'matmul':
        return processed_children[0] @ processed_children[1]
    elif numexp.data == 'call':
        f_name = numexp.children[0].value
        # Will throw if f_name is not a tt function
        return getattr(pm.math, f_name)(*processed_children)
    elif numexp.data == 'parantheses':
        return processed_children[0]
Beispiel #8
0
    def test_sample(self):
        x = np.random.normal(size=100)
        y = x + np.random.normal(scale=1e-2, size=100)

        x_pred = np.linspace(-3, 3, 200, dtype="float32")

        with pm.Model():
            x_shared = pm.Data("x_shared", x)
            b = pm.Normal("b", 0.0, 10.0)
            pm.Normal("obs", b * x_shared, np.sqrt(1e-2), observed=y)

            prior_trace0 = pm.sample_prior_predictive(1000)
            trace = pm.sample(1000, init=None, tune=1000, chains=1)
            pp_trace0 = pm.sample_posterior_predictive(trace, 1000)
            pp_trace01 = pm.fast_sample_posterior_predictive(trace, 1000)

            x_shared.set_value(x_pred)
            prior_trace1 = pm.sample_prior_predictive(1000)
            pp_trace1 = pm.sample_posterior_predictive(trace, samples=1000)
            pp_trace11 = pm.fast_sample_posterior_predictive(trace,
                                                             samples=1000)

        assert prior_trace0["b"].shape == (1000, )
        assert prior_trace0["obs"].shape == (1000, 100)
        assert prior_trace1["obs"].shape == (1000, 200)

        assert pp_trace0["obs"].shape == (1000, 100)
        assert pp_trace01["obs"].shape == (1000, 100)

        np.testing.assert_allclose(x, pp_trace0["obs"].mean(axis=0), atol=1e-1)
        np.testing.assert_allclose(x,
                                   pp_trace01["obs"].mean(axis=0),
                                   atol=1e-1)

        assert pp_trace1["obs"].shape == (1000, 200)
        assert pp_trace11["obs"].shape == (1000, 200)

        np.testing.assert_allclose(x_pred,
                                   pp_trace1["obs"].mean(axis=0),
                                   atol=1e-1)
        np.testing.assert_allclose(x_pred,
                                   pp_trace11["obs"].mean(axis=0),
                                   atol=1e-1)
Beispiel #9
0
    def test_explicit_coords(self):
        N_rows = 5
        N_cols = 7
        data = np.random.uniform(size=(N_rows, N_cols))
        coords = {
            "rows": [f"R{r+1}" for r in range(N_rows)],
            "columns": [f"C{c+1}" for c in range(N_cols)]
        }
        # pass coordinates explicitly, use numpy array in Data container
        with pm.Model(coords=coords) as pmodel:
            pm.Data('observations', data, dims=("rows", "columns"))

        assert "rows" in pmodel.coords
        assert pmodel.coords["rows"] == ['R1', 'R2', 'R3', 'R4', 'R5']
        assert "columns" in pmodel.coords
        assert pmodel.coords["columns"] == [
            'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7'
        ]
        assert pmodel.RV_dims == {'observations': ('rows', 'columns')}
Beispiel #10
0
    def test_explicit_coords(self):
        N_rows = 5
        N_cols = 7
        data = np.random.uniform(size=(N_rows, N_cols))
        coords = {
            "rows": [f"R{r+1}" for r in range(N_rows)],
            "columns": [f"C{c+1}" for c in range(N_cols)],
        }
        # pass coordinates explicitly, use numpy array in Data container
        with pm.Model(coords=coords) as pmodel:
            pm.Data("observations", data, dims=("rows", "columns"))

        assert "rows" in pmodel.coords
        assert pmodel.coords["rows"] == ["R1", "R2", "R3", "R4", "R5"]
        assert "columns" in pmodel.coords
        assert pmodel.coords["columns"] == [
            "C1", "C2", "C3", "C4", "C5", "C6", "C7"
        ]
        assert pmodel.RV_dims == {"observations": ("rows", "columns")}
Beispiel #11
0
    def test_symbolic_coords(self):
        """
        In v4 dimensions can be created without passing coordinate values.
        Their lengths are then automatically linked to the corresponding Tensor dimension.
        """
        with pm.Model() as pmodel:
            intensity = pm.Data("intensity",
                                np.ones((2, 3)),
                                dims=("row", "column"))
            assert "row" in pmodel.dim_lengths
            assert "column" in pmodel.dim_lengths
            assert isinstance(pmodel.dim_lengths["row"], TensorVariable)
            assert isinstance(pmodel.dim_lengths["column"], TensorVariable)
            assert pmodel.dim_lengths["row"].eval() == 2
            assert pmodel.dim_lengths["column"].eval() == 3

            intensity.set_value(floatX(np.ones((4, 5))))
            assert pmodel.dim_lengths["row"].eval() == 4
            assert pmodel.dim_lengths["column"].eval() == 5
Beispiel #12
0
def test_save_load(tmp_path_factory, c, sig_defs):

    # make small for speed
    c = c[0:30]
    sig_defs = sig_defs[0:5]

    dataset_args = {'foo': 'bar'}
    model_args = {'bar': 'baz'}
    pymc3_args = {'baz': 'foo'}

    # train a model with 5 sigs
    with pm.Model() as model:
        data = pm.Data("data", c)
        N = data.sum(1).reshape((c.shape[0], 1))
        activities = ch_dirichlet("activities",
                                  a=np.ones(5),
                                  shape=(c.shape[0], 5))
        B = pm.math.dot(activities, sig_defs)
        pm.Multinomial('corpus', n=N, p=B, observed=data)

        trace = pm.ADVI()
        trace.fit()

    # checkpoint
    fp = tmp_path_factory.mktemp("ckp") / "vanilla_lda.ckp"
    save_checkpoint(fp, model, trace, dataset_args, model_args, pymc3_args)

    # load model
    m2, t2, dataset_args2, model_args2, pymc3_args2 = load_checkpoint(fp)

    # all params should be identical
    # checks are weak because __eq__ methods are not provided
    #assert str(model) == str(m2), 'model load failed'
    assert np.allclose(trace.hist, t2.hist), 'trace load failed'
    assert dataset_args == dataset_args2, 'dataset_args load failed'
    assert model_args == model_args2, 'model_args load failed'
    assert pymc3_args == pymc3_args2, 'dataset_args load failed'

    # with same seed, both models should tune with same result
    # test model tuning
    trace.refine(100)
    t2.refine(100)
    assert np.allclose(trace.hist, t2.hist), 'trace tuning failed'
Beispiel #13
0
def pooled(t, y, coords, dims, sigma = 0.5): 
    
    with pm.Model(coords=coords) as m:
        
        # shared variables
        t_ = pm.Data('t_shared', t, dims = dims)
        
        # specify priors for parameters & model error
        beta = pm.Normal("beta", mu = 0, sigma = sigma)
        alpha = pm.Normal("alpha", mu = 1.5, sigma = sigma)
        sigma = pm.HalfNormal("sigma", sigma = sigma)
        
        # calculate mu
        mu = alpha + beta * t_
        
        # likelihood
        y_pred = pm.Normal("y_pred", mu = mu, sigma = sigma, observed = y)
        
    return m
Beispiel #14
0
    def test_implicit_coords_dataframe(self):
        N_rows = 5
        N_cols = 7
        df_data = pd.DataFrame()
        for c in range(N_cols):
            df_data[f'Column {c+1}'] = np.random.normal(size=(N_rows, ))
        df_data.index.name = 'rows'
        df_data.columns.name = 'columns'

        # infer coordinates from index and columns of the DataFrame
        with pm.Model() as pmodel:
            pm.Data('observations',
                    df_data,
                    dims=("rows", "columns"),
                    export_index_as_coords=True)

        assert "rows" in pmodel.coords
        assert "columns" in pmodel.coords
        assert pmodel.RV_dims == {'observations': ('rows', 'columns')}
    def get_model(self, data: xr.Dataset) -> pm.Model:
        # transpose the dataset to ensure that it is the way we expect
        data = data.transpose("sentence", "word")
        active = [None for _ in range(1 + self.num_topics)]
        with pm.Model() as model:
            S = pm.Data("S_obs", data.S.values[0])
            active[0] = pm.Bernoulli("active[0]", p=1.0)
            for j in range(1, self.num_topics + 1):
                # note: if p = 1 - exp(-w) then logit(p) = log(1-exp(-w)) + w
                w = self.edge_weight[j, :j] @ active[:j]
                topic_logit = pm.math.log1mexp(w) + w
                active[j] = pm.Bernoulli(f"active[{j}]", logit_p=topic_logit)
            w = pm.math.dot(self.edge_weight[1 + self.num_topics:], active)
            word_logit = pm.math.log1mexp(w) + w
            pm.Bernoulli("S",
                         logit_p=word_logit,
                         observed=S,
                         shape=self.num_words)

        return model
Beispiel #16
0
    def get_model_0(self, dataframe, prior_mu=(0.80, 2.74), prior_sigma=1.0):
        """ Compute marginal-log-likelihood of M0: all variances are equal in all blocks, but unknown. 
        For each user all data is generated by the same distribution.
        
        No synergy effects can be observed for this particular bi-manual task.
        
        Prior believe: Unlikely. Synergy effects have been observed time and time again.
        But thumbs could receive independent control signals as they're quite independent in the musculoskeletal system.
        
        :param prior_mu: Prior parameters for the total mean and its spread (standard error).
        :type prior_mu: tuple[float]
        :param prior_sigma: Prior parameters for the mean standard deviation. Lambda of exponential function.
        :type prior_sigma: float
        :type dataframe: pandas.DataFrame
        :rtype: float
        """
        coords = self.get_coordinates(dataframe)

        with pm.Model(coords=coords) as model:
            model.name = "M0"
            # Prior. non-centered.
            z = pm.Normal("z", mu=0, sigma=1)
            mu = pm.Deterministic("mu", prior_mu[0] + z * prior_mu[1])
            # Our coordinates aren't in long format, so we need to have the same prior 2 times to cover both directions.
            theta = pm.math.stack((mu, mu)).T
            # Model error.
            sigma = pm.Exponential("sigma", lam=prior_sigma)

            # Observed variable.
            projection_obs = pm.Data("projection_obs",
                                     dataframe[coords['Direction']],
                                     dims=('obs_id', 'Direction'))
            # Using user_idx to index theta somehow tells which prior belongs to which user.
            projection = pm.Normal("projection",
                                   mu=theta,
                                   sigma=sigma,
                                   observed=projection_obs,
                                   dims=('obs_id', 'Direction'))

        self.update_sample_params(model.name)  # defaults
        return model
Beispiel #17
0
    def test_ovewrite_model_coords_dims(self):
        """Check coords and dims from model object can be partially overwrited."""
        dim1 = ["a", "b"]
        new_dim1 = ["c", "d"]
        coords = {"dim1": dim1, "dim2": ["c1", "c2"]}
        x_data = np.arange(4).reshape((2, 2))
        y = x_data + np.random.normal(size=(2, 2))
        with pm.Model(coords=coords):
            x = pm.Data("x", x_data, dims=("dim1", "dim2"))
            beta = pm.Normal("beta", 0, 1, dims="dim1")
            _ = pm.Normal("obs",
                          x * beta,
                          1,
                          observed=y,
                          dims=("dim1", "dim2"))
            trace = pm.sample(100, tune=100)
            idata1 = from_pymc3(trace)
            idata2 = from_pymc3(trace,
                                coords={"dim1": new_dim1},
                                dims={"beta": ["dim2"]})

        test_dict = {
            "posterior": ["beta"],
            "observed_data": ["obs"],
            "constant_data": ["x"]
        }
        fails1 = check_multiple_attrs(test_dict, idata1)
        assert not fails1
        fails2 = check_multiple_attrs(test_dict, idata2)
        assert not fails2
        assert "dim1" in list(idata1.posterior.beta.dims)
        assert "dim2" in list(idata2.posterior.beta.dims)
        assert np.all(idata1.constant_data.x.dim1.values == np.array(dim1))
        assert np.all(
            idata1.constant_data.x.dim2.values == np.array(["c1", "c2"]))
        assert np.all(idata2.constant_data.x.dim1.values == np.array(new_dim1))
        assert np.all(
            idata2.constant_data.x.dim2.values == np.array(["c1", "c2"]))
Beispiel #18
0
    def get_model_7(
        self,
        dataframe,
        prior_mu=(0.09, 2.5),
        prior_mu_diff1=2.5,  # Difference between orthogonal projection block 1 and 3 to block 2.
        prior_mu_diff2=2.5,  # Difference between orthogonal projection block 2 to parallel 1.
        prior_sigma=1.0):
        """ Compute marginal-log-likelihood of M3: Priming/Learning effect after constrained task.
        
        Strong synergy in first unconstrained task, no synergy in constrained task, weak/no synergy in the
        unconstrained task. With optimal control we'd expect strong synergies again as soon as constrains are lifted.
        This model contradicts the prediction and postulates a priming effect of the constrained task onto the
        following unconstrained task.
        # ToDo: insert reference on reduced synergy with higher precision through training.
        
        Prior believe: Likely
        
        :type dataframe: pandas.DataFrame
        :rtype: float
        """
        coords = self.get_coordinates(dataframe)
        block_mx = self.get_block_dmatrix(dataframe)

        with pm.Model(coords=coords) as model:
            model.name = "M7"
            blocks_idx = pm.Data('blocks_idx',
                                 block_mx.values,
                                 dims=('obs_id', 'Block'))
            # Prior blocks 1 an 3 orthogonal, non-centered.
            z_blocks13_ortho = pm.Normal("z_blocks_1_3", mu=0, sigma=1)
            mu_blocks13_ortho = pm.Deterministic(
                'mu_blocks_1_3_orthogonal',
                prior_mu[0] + z_blocks13_ortho * prior_mu[1])
            # Poisitive differences. First for direction, second for block 2.
            diff = pm.HalfNormal('mu_diff',
                                 sigma=np.array(
                                     [prior_mu_diff1, prior_mu_diff2]),
                                 shape=2)

            mu_block2 = pm.Deterministic('mu_block_2',
                                         mu_blocks13_ortho + diff[0])
            mu_block1_para = pm.Deterministic(
                'mu_block_1_parallel', mu_blocks13_ortho + diff[0] + diff[1])
            # Variability in block 2 shouldbe the same for both directions.
            theta_ortho = (1 - blocks_idx[:, 1]
                           ) * mu_blocks13_ortho + blocks_idx[:, 1] * mu_block2
            theta_para = blocks_idx[:, 0] * mu_block1_para + (
                1 - blocks_idx[:, 0]) * mu_block2

            # Model error:
            sigma = pm.Exponential("sigma", lam=prior_sigma, dims='Direction')
            # Expected deviation per direction:
            theta = pm.math.stack((theta_ortho, theta_para)).T
            # Observed variable.
            projection_obs = pm.Data("projection_obs",
                                     dataframe[coords['Direction']],
                                     dims=('obs_id', 'Direction'))
            # Using user_idx to index theta somehow tells which prior belongs to which user.
            projection = pm.Normal("projection",
                                   mu=theta,
                                   sigma=sigma,
                                   observed=projection_obs,
                                   dims=('obs_id', 'Direction'))

        self.update_sample_params(model.name, tune=4000, target_accept=0.9)
        return model
# set shared theano variables
y_data = deepcopy(y_train)
x_1_data = hierachical_variable_train.to_numpy(dtype="int")
x_2_data = X_train.loc[:, 'var_02'].to_numpy(dtype="int")
x_3_data = X_train.loc[:, 'var_03'].to_numpy(dtype="int")
x_4_data = X_train.loc[:, 'var_04'].to_numpy(dtype="int")
x_5_data = X_train.loc[:, 'var_05'].to_numpy(dtype="int")
x_6_data = X_train.loc[:, 'var_06'].to_numpy(dtype="int")
x_7_data = X_train.loc[:, 'var_07'].to_numpy(dtype="int")
x_8_data = X_train.loc[:, 'var_08'].to_numpy(dtype="int")

# build model
with pm.Model() as glm_model:

    # create data
    y_shared = pm.Data('y_shared', y_data)
    x_1_shared = pm.Data('x_1_shared', x_1_data)
    x_2_shared = pm.Data('x_2_shared', x_2_data)
    x_3_shared = pm.Data('x_3_shared', x_3_data)
    x_4_shared = pm.Data('x_4_shared', x_4_data)
    x_5_shared = pm.Data('x_5_shared', x_5_data)
    x_6_shared = pm.Data('x_6_shared', x_6_data)
    x_7_shared = pm.Data('x_7_shared', x_7_data)
    x_8_shared = pm.Data('x_8_shared', x_8_data)

    # COMPLEX MODEL:
    # Priors
    mu_a = pm.Normal('mu_a', mu=0., sd=2)
    sigma_a = pm.HalfCauchy('sigma_a', 5)
    mu_b_1 = pm.Normal('mu_b_1', mu=0., sd=2)
    sigma_b_1 = pm.HalfCauchy('sigma_b_1', 5)
Beispiel #20
0
    def get_model_5(
        self,
        dataframe,
        prior_mu=(0.09, 2.5),
        prior_mu_diff1=2.5,  # Difference between orthogonal projection block 1 and 3 to block 2.
        prior_mu_diff2=2.5,  # Difference between orthogonal projection block 2 to parallel 1 and 3.
        prior_sigma=1.0):
        """ Compute marginal-log-likelihood of M1: Synergy affects task performance by increasing precision.
        
        Following predictions made by Todorov (2004), in the constrained task (block 2) the variance in the direction
        parallel to the UCM can be reduced "at the expense of increased variance in the task-relevant direction"
        (orthogonal to UCM) in comparison to the unconstrained task (blocks 1&3).
        Parallel and orthogonal projection variances are expected to be roughly the same in the constrained task,
        since the control signals are expected to be the same.
        
        Prior believe: Likely. Synergy effect might stem from upper level in hierarchical control.
        Increased control signals in constrained task could lead to more muscle noise.
        
        :type dataframe: pandas.DataFrame
        :rtype: float
        """
        coords = self.get_coordinates(dataframe)
        block_mx = self.get_block_dmatrix(dataframe)

        with pm.Model(coords=coords) as model:
            model.name = "M5"
            block2_idx = pm.Data('block2_idx',
                                 block_mx[2].values,
                                 dims='obs_id')
            # Prior blocks 1 an 3 orthogonal, non-centered.
            z_blocks13_ortho = pm.Normal("z_blocks_1_3", mu=0, sigma=1)
            mu_blocks13_ortho = pm.Deterministic(
                'mu_blocks_1_3_orthogonal',
                prior_mu[0] + z_blocks13_ortho * prior_mu[1])
            # Poisitive differences. First for direction, second for block 2.
            diff = pm.HalfNormal('mu_diff',
                                 sigma=np.array(
                                     [prior_mu_diff1, prior_mu_diff2]),
                                 shape=2)

            mu_block2 = pm.Deterministic('mu_block_2',
                                         mu_blocks13_ortho + diff[0])
            mu_blocks13_para = pm.Deterministic(
                'mu_blocks_1_3_parallel',
                mu_blocks13_ortho + diff[0] + diff[1])
            # Variability in block 2 shouldbe the same for both directions.
            theta_ortho = (
                1 - block2_idx) * mu_blocks13_ortho + block2_idx * mu_block2
            theta_para = (
                1 - block2_idx) * mu_blocks13_para + block2_idx * mu_block2

            # Model error:
            sigma = pm.Exponential("sigma", lam=prior_sigma, dims='Direction')
            # Expected deviation per direction:
            theta = pm.math.stack((theta_ortho, theta_para)).T
            # Observed variable.
            projection_obs = pm.Data("projection_obs",
                                     dataframe[coords['Direction']],
                                     dims=('obs_id', 'Direction'))
            # Using user_idx to index theta somehow tells which prior belongs to which user.
            projection = pm.Normal("projection",
                                   mu=theta,
                                   sigma=sigma,
                                   observed=projection_obs,
                                   dims=('obs_id', 'Direction'))

        self.update_sample_params(model.name, tune=4000, target_accept=0.9)
        return model
Beispiel #21
0
    def get_model_6(self,
                    dataframe,
                    prior_mu=(0.09, 2.5),
                    prior_diff_scale=2.5,
                    prior_sigma=1.0):
        """ Compute marginal-log-likelihood of M2: Synergy has no effect on task performance.
        
        Orthogonal variances are small in all blocks.
        Danion, F., & Latash, M. L. (2011): Same performance variance without synergy in constrained task (block 2).
        Parallel deviations in block 2 are on average as large as orthogonal deviations in all blocks,
        and larger in blocks 1&3 (because they are uncontrolled).
        
        Prior believe: Likely. A thumbs' movement does not directly influence the other through the
        musculoskeletal system (but maybe through the device connecting the two). We just haven't exhausted our control
        over all degrees of freedom yet in the unconstrained task, but when necessary in the constrained task, we can
        do it.
        Even increased control signals could be so low that we fail to detect the effect of increased muscle noise.
        
        :type dataframe: pandas.DataFrame
        :rtype: float
        """
        coords = self.get_coordinates(dataframe)
        block_mx = self.get_block_dmatrix(dataframe)

        with pm.Model(coords=coords) as model:
            model.name = "M6"
            block2_idx = pm.Data('block2_idx',
                                 block_mx[2].values,
                                 dims='obs_id')
            # Prior orthogonal projections, non-centered.
            z_ortho = pm.Normal("z_ortho", mu=0, sigma=1)
            mu_ortho = pm.Normal('mu_orthogonal',
                                 prior_mu[0] + z_ortho * prior_mu[1])
            # Poisitive difference to parallel projections in blocks 1 and 3.
            diff = pm.HalfNormal('mu_diff', sigma=prior_diff_scale)
            mu_parallel_blocks13 = pm.Deterministic('mu_parallel_blocks_1_3',
                                                    mu_ortho + diff)

            # Expected deviation per direction and block:
            theta_ortho = mu_ortho * np.ones(
                len(dataframe
                    ))  # Must be same length as theta_parallel for stacking.
            theta_parallel = block2_idx * mu_ortho + (
                1 - block2_idx) * mu_parallel_blocks13
            theta = pm.math.stack((theta_ortho, theta_parallel)).T

            # Model error:
            sigma = pm.Exponential("sigma", lam=prior_sigma, dims='Direction')

            # Observed variable.
            projection_obs = pm.Data("projection_obs",
                                     dataframe[coords['Direction']],
                                     dims=('obs_id', 'Direction'))
            # Using user_idx to index theta somehow tells which prior belongs to which user.
            projection = pm.Normal("projection",
                                   mu=theta,
                                   sigma=sigma,
                                   observed=projection_obs,
                                   dims=('obs_id', 'Direction'))

        self.update_sample_params(model.name, tune=5000, target_accept=0.95)
        return model
Beispiel #22
0
19 147 20
20 152 24"""
data = pd.read_csv(io.StringIO(golf_data), sep=" ")

#model-inference
coords = {"distance": data.distance}
fileName='golf_geometry_PyMC3'
samples=2000
chains=2 
tune=1000
geometry_model=pm.Model(coords=coords)
with geometry_model:
    #to store the n-parameter of Binomial dist 
    #in the constant group of ArviZ InferenceData
    #You should always call it n for imd to retrieve it
    n = pm.Data('n', data.tries)
    sigma_angle = pm.HalfNormal('sigma_angle')
    p_goes_in = pm.Deterministic('p_goes_in', 2 * Phi(tt.arcsin((CUP_RADIUS - BALL_RADIUS) / data.distance) / sigma_angle) - 1, dims='distance')
    successes = pm.Binomial('successes', n=n, p=p_goes_in, observed=data.successes, dims='distance')
    #inference
    trace_g = pm.sample(draws=samples, chains=chains, tune=tune)
    prior_g= pm.sample_prior_predictive(samples=samples)
    posterior_predictive_g = pm.sample_posterior_predictive(trace_g,samples=samples)    
    
## STEP 1
# will also capture all the sampler statistics
data_g = az.from_pymc3(trace=trace_g, prior=prior_g, posterior_predictive=posterior_predictive_g)

## STEP 2
#dag    
dag_g = get_dag(geometry_model)    
Beispiel #23
0
    def get_model_3(self,
                    dataframe,
                    prior_mu_ortho=(0.09, 2.5),
                    prior_diff_dir=2.5,
                    prior_diff_block=3.5,
                    prior_sigma=1.0):
        """ Compute marginal-log-likelihood of M4: Both main effects, for direction and block.
                                                   (Extraneous cognitive load I).
        
        Constrained task is perceived more difficult by visual instructions. Strong synergy in simpler task.
        
        Prior believe: Likely for users with no video-game experience. It's also likely that participants don't figure
        out the control scheme of df1=df2=target/2, since only one df seems constrained which might get more attention.
        
        :type dataframe: pandas.DataFrame
        :rtype: float
        """
        coords = self.get_coordinates(dataframe)
        block_mx = self.get_block_dmatrix(dataframe)

        with pm.Model(coords=coords) as model:
            model.name = "M3"
            block2_idx = pm.Data('block2_idx',
                                 block_mx[2].values,
                                 dims='obs_id')
            # Prior blocks 1 an 3 orthogonal, non-centered.
            z_blocks13_ortho = pm.Normal("z_blocks_1_3", mu=0, sigma=1)
            mu_blocks13_ortho = pm.Deterministic(
                'mu_blocks_1_3_orthogonal',
                prior_mu_ortho[0] + z_blocks13_ortho * prior_mu_ortho[1])

            # Poisitive differences. First for direction, second for block 2.
            diff = pm.HalfNormal('mu_diff',
                                 sigma=np.array(
                                     [prior_diff_dir, prior_diff_block]),
                                 shape=2)

            mu_block2_ortho = pm.Deterministic('mu_block_2_orthogonal',
                                               mu_blocks13_ortho + diff[1])
            mu_blocks13_para = pm.Deterministic('mu_blocks_1_3_parallel',
                                                mu_blocks13_ortho + diff[0])
            mu_block2_para = pm.Deterministic(
                'mu_block_2_parallel', mu_blocks13_ortho + diff[0] + diff[1])

            mu_ortho = (1 - block2_idx
                        ) * mu_blocks13_ortho + block2_idx * mu_block2_ortho
            mu_para = (1 - block2_idx
                       ) * mu_blocks13_para + block2_idx * mu_block2_para

            # Model error:
            sigma = pm.Exponential("sigma", lam=prior_sigma, dims='Direction')
            # Expected deviation per direction:
            theta = pm.math.stack((mu_ortho, mu_para)).T
            # Observed variable.
            projection_obs = pm.Data("projection_obs",
                                     dataframe[coords['Direction']],
                                     dims=('obs_id', 'Direction'))
            # Using user_idx to index theta somehow tells which prior belongs to which user.
            projection = pm.Normal("projection",
                                   mu=theta,
                                   sigma=sigma,
                                   observed=projection_obs,
                                   dims=('obs_id', 'Direction'))

        self.update_sample_params(model.name, target_accept=0.9)
        return model
Beispiel #24
0
def model_factory(X_continuos, X_categorical_selection, X_categorical_gender,
                  X_categorical_audience, X_categorical_browser,
                  X_categorical_city, X_categorical_device, y_data,
                  variables_to_be_used, variant_df, arviz_inference, samples):
    """ please check run_model_oob's function docstring below for a description  
        of the inputs.
    """

    with pm.Model(coords=coords) as varying_intercept_slope_noncentered:

        # build tensors from Pandas DataFrame/Series
        X_continuos_var = pm.Data('X_continuos',
                                  X_continuos,
                                  dims=("X_continuos_index"))
        X_categorical_selection_var = pm.Data(
            'X_categorical_selection',
            X_categorical_selection,
            dims=("X_categorical_selection_index"))
        X_categorical_gender_var = pm.Data('X_categorical_gender',
                                           X_categorical_gender,
                                           dims=("X_categorical_gender_index"))
        X_categorical_audience_var = pm.Data(
            'X_categorical_audience',
            X_categorical_audience,
            dims=("X_categorical_audience_index"))
        X_categorical_browser_var = pm.Data(
            'X_categorical_browser',
            X_categorical_browser,
            dims=("X_categorical_browser_index"))
        X_categorical_city_var = pm.Data('X_categorical_city',
                                         X_categorical_city,
                                         dims=("X_categorical_city_index"))
        X_categorical_device_var = pm.Data('X_categorical_device',
                                           X_categorical_device,
                                           dims=("X_categorical_device_index"))

        # hyperpriors for intercept
        mu_alpha_tmp = pm.Laplace('mu_alpha_tmp',
                                  mu=0.05,
                                  b=1.,
                                  shape=(variant_df.shape[0] - 1))
        mu_alpha = theano.tensor.concatenate([[0], mu_alpha_tmp])

        sigma_alpha_tmp = pm.HalfNormal('sigma_alpha_tmp',
                                        sigma=1.,
                                        shape=(variant_df.shape[0] - 1))
        sigma_alpha = theano.tensor.concatenate([[0], sigma_alpha_tmp])

        # prior for non-centered random intercepts
        u = pm.Laplace('u', mu=0.05, b=1.)

        # random intercept
        alpha_eq = mu_alpha + u * sigma_alpha
        alpha_eq_deter = pm.Deterministic('alpha_eq_deter', alpha_eq)
        alpha = pm.Laplace('alpha',
                           mu=alpha_eq_deter,
                           b=1.,
                           shape=(variant_df.shape[0]))

        #######################################################################

        # hyperpriors for slopes (continuos)
        mu_beta_continuos_tmp = pm.Laplace('mu_beta_continuos_tmp',
                                           mu=0.05,
                                           b=1.,
                                           shape=(1,
                                                  (variant_df.shape[0] - 2)))
        mu_beta_continuos = theano.tensor.concatenate(
            [np.zeros((1, 1)), mu_beta_continuos_tmp], axis=1)
        sigma_beta_continuos_tmp = pm.HalfNormal(
            'sigma_beta_continuos_tmp',
            sigma=1.,
            shape=(1, (variant_df.shape[0] - 2)))
        sigma_beta_continuos = theano.tensor.concatenate(
            [np.zeros((1, 1)), sigma_beta_continuos_tmp], axis=1)

        # prior for non-centered random slope (continuos)
        g = pm.Laplace('g', mu=0.05, b=1., shape=(1, 1))

        # random slopes (continuos)
        beta_continuos_eq = mu_beta_continuos + pm.math.dot(
            g, sigma_beta_continuos)
        beta_con_deter_percentage = pm.Deterministic(
            'beta_con_deter_percentage', beta_continuos_eq)
        beta_con_tmp_percentage = pm.Laplace('beta_con_tmp_percentage',
                                             mu=beta_con_deter_percentage,
                                             b=1.,
                                             shape=(1,
                                                    (variant_df.shape[0] - 1)))
        beta_con_percentage = theano.tensor.concatenate(
            [np.zeros((1, 1)), beta_con_tmp_percentage], axis=1)

        # expected value (continuos)
        dot_product_continuos = pm.math.dot(
            theano.tensor.shape_padaxis(X_continuos_var, axis=1),
            beta_con_percentage)

        #######################################################################

        # hyperpriors for slopes (categorical_selection)
        mu_beta_categorical_selection_tmp = pm.Laplace(
            'mu_beta_categorical_selection_tmp',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_selection)),
                   (variant_df.shape[0] - 2)))
        mu_beta_categorical_selection = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_selection)), 1)),
            mu_beta_categorical_selection_tmp
        ],
                                                                  axis=1)
        sigma_beta_categorical_selection_tmp = pm.HalfNormal(
            'sigma_beta_categorical_selection_tmp',
            sigma=1.,
            shape=(len(pd.unique(X_categorical_selection)),
                   (variant_df.shape[0] - 2)))
        sigma_beta_categorical_selection = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_selection)), 1)),
            sigma_beta_categorical_selection_tmp
        ],
                                                                     axis=1)

        # prior for non-centered random slope (categorical_selection)
        non_centered_selection = pm.Laplace(
            'non_centered_selection',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_selection)),
                   len(pd.unique(X_categorical_selection))))

        #random slopes (categorical_selection)
        beta_categorical_eq_selection = mu_beta_categorical_selection + pm.math.dot(
            non_centered_selection, sigma_beta_categorical_selection)
        beta_cat_deter_selection = pm.Deterministic(
            'beta_cat_deter_selection', beta_categorical_eq_selection)
        beta_cat_tmp_selection = pm.Laplace(
            'beta_cat_tmp_selection',
            mu=beta_cat_deter_selection,
            b=1.,
            shape=(len(pd.unique(X_categorical_selection)),
                   (variant_df.shape[0] - 1)))
        beta_cat_selection = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_selection)), 1)),
            beta_cat_tmp_selection
        ],
                                                       axis=1)

        #######################################################################

        # hyperpriors for slopes (categorical_gender)
        mu_beta_categorical_gender_tmp = pm.Laplace(
            'mu_beta_categorical_gender_tmp',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_gender)),
                   (variant_df.shape[0] - 2)))
        mu_beta_categorical_gender = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_gender)), 1)),
            mu_beta_categorical_gender_tmp
        ],
                                                               axis=1)
        sigma_beta_categorical_gender_tmp = pm.HalfNormal(
            'sigma_beta_categorical_gender_tmp',
            sigma=1.,
            shape=(len(pd.unique(X_categorical_gender)),
                   (variant_df.shape[0] - 2)))
        sigma_beta_categorical_gender = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_gender)), 1)),
            sigma_beta_categorical_gender_tmp
        ],
                                                                  axis=1)

        # prior for non-centered random slope (categorical_gender)
        non_centered_gender = pm.Laplace(
            'non_centered_gender',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_gender)),
                   len(pd.unique(X_categorical_gender))))

        #random slopes (categorical_gender)
        beta_categorical_eq_gender = mu_beta_categorical_gender + pm.math.dot(
            non_centered_gender, sigma_beta_categorical_gender)
        beta_cat_deter_gender = pm.Deterministic('beta_cat_deter_gender',
                                                 beta_categorical_eq_gender)
        beta_cat_tmp_gender = pm.Laplace('beta_cat_tmp_gender',
                                         mu=beta_cat_deter_gender,
                                         b=1.,
                                         shape=(len(
                                             pd.unique(X_categorical_gender)),
                                                (variant_df.shape[0] - 1)))
        beta_cat_gender = theano.tensor.concatenate([
            np.zeros(
                (len(pd.unique(X_categorical_gender)), 1)), beta_cat_tmp_gender
        ],
                                                    axis=1)

        # hyperpriors for slopes (categorical_audience)
        mu_beta_categorical_audience_tmp = pm.Laplace(
            'mu_beta_categorical_audience_tmp',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_audience)),
                   (variant_df.shape[0] - 2)))
        mu_beta_categorical_audience = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_audience)), 1)),
            mu_beta_categorical_audience_tmp
        ],
                                                                 axis=1)
        sigma_beta_categorical_audience_tmp = pm.HalfNormal(
            'sigma_beta_categorical_audience_tmp',
            sigma=1.,
            shape=(len(pd.unique(X_categorical_audience)),
                   (variant_df.shape[0] - 2)))
        sigma_beta_categorical_audience = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_audience)), 1)),
            sigma_beta_categorical_audience_tmp
        ],
                                                                    axis=1)

        # prior for non-centered random slope (categorical_audience)
        non_centered_audience = pm.Laplace(
            'non_centered_audience',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_audience)),
                   len(pd.unique(X_categorical_audience))))

        #random slopes (categorical_audience)
        beta_categorical_eq_audience = mu_beta_categorical_audience + pm.math.dot(
            non_centered_audience, sigma_beta_categorical_audience)
        beta_cat_deter_audience = pm.Deterministic(
            'beta_cat_deter_audience', beta_categorical_eq_audience)
        beta_cat_tmp_audience = pm.Laplace(
            'beta_cat_tmp_audience',
            mu=beta_cat_deter_audience,
            b=1.,
            shape=(len(pd.unique(X_categorical_audience)),
                   (variant_df.shape[0] - 1)))
        beta_cat_audience = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_audience)), 1)),
            beta_cat_tmp_audience
        ],
                                                      axis=1)

        #######################################################################

        # hyperpriors for slopes (categorical_browser)
        mu_beta_categorical_browser_tmp = pm.Laplace(
            'mu_beta_categorical_browser_tmp',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_browser)),
                   (variant_df.shape[0] - 2)))
        mu_beta_categorical_browser = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_browser)), 1)),
            mu_beta_categorical_browser_tmp
        ],
                                                                axis=1)
        sigma_beta_categorical_browser_tmp = pm.HalfNormal(
            'sigma_beta_categorical_browser_tmp',
            sigma=1.,
            shape=(len(pd.unique(X_categorical_browser)),
                   (variant_df.shape[0] - 2)))
        sigma_beta_categorical_browser = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_browser)), 1)),
            sigma_beta_categorical_browser_tmp
        ],
                                                                   axis=1)

        # prior for non-centered random slope (categorical_browser)
        non_centered_browser = pm.Laplace(
            'non_centered_browser',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_browser)),
                   len(pd.unique(X_categorical_browser))))

        #random slopes (categorical_browser)
        beta_categorical_eq_browser = mu_beta_categorical_browser + pm.math.dot(
            non_centered_browser, sigma_beta_categorical_browser)
        beta_cat_deter_browser = pm.Deterministic('beta_cat_deter_browser',
                                                  beta_categorical_eq_browser)
        beta_cat_tmp_browser = pm.Laplace(
            'beta_cat_tmp_browser',
            mu=beta_cat_deter_browser,
            b=1.,
            shape=(len(pd.unique(X_categorical_browser)),
                   (variant_df.shape[0] - 1)))
        beta_cat_browser = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_browser)), 1)),
            beta_cat_tmp_browser
        ],
                                                     axis=1)

        #######################################################################

        # hyperpriors for slopes (categorical_city)
        mu_beta_categorical_city_tmp = pm.Laplace(
            'mu_beta_categorical_city_tmp',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_city)),
                   (variant_df.shape[0] - 2)))
        mu_beta_categorical_city = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_city)), 1)),
            mu_beta_categorical_city_tmp
        ],
                                                             axis=1)
        sigma_beta_categorical_city_tmp = pm.HalfNormal(
            'sigma_beta_categorical_city_tmp',
            sigma=1.,
            shape=(len(pd.unique(X_categorical_city)),
                   (variant_df.shape[0] - 2)))
        sigma_beta_categorical_city = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_city)), 1)),
            sigma_beta_categorical_city_tmp
        ],
                                                                axis=1)

        # prior for non-centered random slope (categorical_city)
        non_centered_city = pm.Laplace(
            'non_centered_city',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_city)),
                   len(pd.unique(X_categorical_city))))

        #random slopes (categorical_city)
        beta_categorical_eq_city = mu_beta_categorical_city + pm.math.dot(
            non_centered_city, sigma_beta_categorical_city)
        beta_cat_deter_city = pm.Deterministic('beta_cat_deter_city',
                                               beta_categorical_eq_city)
        beta_cat_tmp_city = pm.Laplace('beta_cat_tmp_city',
                                       mu=beta_cat_deter_city,
                                       b=1.,
                                       shape=(len(
                                           pd.unique(X_categorical_city)),
                                              (variant_df.shape[0] - 1)))
        beta_cat_city = theano.tensor.concatenate([
            np.zeros(
                (len(pd.unique(X_categorical_city)), 1)), beta_cat_tmp_city
        ],
                                                  axis=1)

        #######################################################################

        # hyperpriors for slopes (categorical_device)
        mu_beta_categorical_device_tmp = pm.Laplace(
            'mu_beta_categorical_device_tmp',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_device)),
                   (variant_df.shape[0] - 2)))
        mu_beta_categorical_device = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_device)), 1)),
            mu_beta_categorical_device_tmp
        ],
                                                               axis=1)
        sigma_beta_categorical_device_tmp = pm.HalfNormal(
            'sigma_beta_categorical_device_tmp',
            sigma=1.,
            shape=(len(pd.unique(X_categorical_device)),
                   (variant_df.shape[0] - 2)))
        sigma_beta_categorical_device = theano.tensor.concatenate([
            np.zeros((len(pd.unique(X_categorical_device)), 1)),
            sigma_beta_categorical_device_tmp
        ],
                                                                  axis=1)

        # prior for non-centered random slope (categorical_device)
        non_centered_device = pm.Laplace(
            'non_centered_device',
            mu=0.05,
            b=1.,
            shape=(len(pd.unique(X_categorical_device)),
                   len(pd.unique(X_categorical_device))))

        #random slopes (categorical_device)
        beta_categorical_eq_device = mu_beta_categorical_device + pm.math.dot(
            non_centered_device, sigma_beta_categorical_device)
        beta_cat_deter_device = pm.Deterministic('beta_cat_deter_device',
                                                 beta_categorical_eq_device)
        beta_cat_tmp_device = pm.Laplace('beta_cat_tmp_device',
                                         mu=beta_cat_deter_device,
                                         b=1.,
                                         shape=(len(
                                             pd.unique(X_categorical_device)),
                                                (variant_df.shape[0] - 1)))
        beta_cat_device = theano.tensor.concatenate([
            np.zeros(
                (len(pd.unique(X_categorical_device)), 1)), beta_cat_tmp_device
        ],
                                                    axis=1)
        # theano.printing.Print('vector', attrs=['shape'])(beta_cat_device)

        #######################################################################

        # hyperpriors for epsilon
        sigma_epsilon = pm.HalfNormal('sigma_epsilon',
                                      sigma=1.,
                                      shape=(variant_df.shape[0]))

        # epsilon
        epsilon = pm.HalfNormal(
            'epsilon',
            sigma=sigma_epsilon,  # not working
            shape=(variant_df.shape[0]))

        #######################################################################

        y_hat_tmp = (alpha + dot_product_continuos +
                     beta_cat_selection[X_categorical_selection_var, :] +
                     beta_cat_gender[X_categorical_gender_var, :] +
                     beta_cat_audience[X_categorical_audience_var, :] +
                     beta_cat_browser[X_categorical_browser_var, :] +
                     beta_cat_city[X_categorical_city_var, :] +
                     beta_cat_device[X_categorical_device_var, :] + epsilon)

        # softmax
        y_hat = theano.tensor.nnet.softmax(y_hat_tmp)
        # theano.printing.Print('vector', attrs=['shape'])(y_hat)

        # likelihood
        y_likelihood = pm.Categorical('y_likelihood', p=y_hat, observed=y_data)

        # predicting new values from the posterior distribution of the previously trained model
        # Check whether the predicted output is correct (e.g. if we have 4 classes to be predicted,
        # then there should be present the numbers 0, 1, 2, 3 ... no more, no less!)
        post_pred_big_tmp = pm.sample_posterior_predictive(
            trace=arviz_inference, samples=samples)

    return post_pred_big_tmp
                                      axis=1)

#The first island has the largest entropy, followed by the third, and then the second in last place

#Q2

d = pd.read_csv('../../data/happiness.csv', header=0)
d.head()
d = d.loc[d.age > 17, ]
d['age'] = (d['age'] - 18) / (65 - 18)
d['married'] = d['married'].astype('Int64')

married = theano.shared(np.array(d.married))
with pm.Model() as model_69:
    # Data
    age = pm.Data('age', d['age'].values)
    #married = pm.Data('married', d['married'].values)
    happiness = pm.Data('happiness', d['happiness'].values)

    # Priors
    a = pm.Normal('a', mu=0, sd=1, shape=2)
    bA = pm.Normal('bA', mu=0, sd=2)
    sigma = pm.Exponential('sigma', lam=1)

    # Regression
    mu = a[married] + bA * age
    happy_hat = pm.Normal('happy_hat', mu=mu, sd=sigma, observed=happiness)

    # Prior sampling, trace definition and posterior sampling
    prior = pm.sample_prior_predictive(samples=30)
    posterior_69 = pm.sample()
Beispiel #26
0
    def build(self):
        """ Builds and returns the Generative model. Also sets self.model """

        p_delay = get_delay_distribution()
        nonzero_days = self.observed.total.gt(0)
        len_observed = len(self.observed)
        convolution_ready_gt = self._get_convolution_ready_gt(len_observed)
        x = np.arange(len_observed)[:, None]

        coords = {
            "date": self.observed.index.values,
            "nonzero_date":
            self.observed.index.values[self.observed.total.gt(0)],
        }
        with pm.Model(coords=coords) as self.model:

            # Let log_r_t walk randomly with a fixed prior of ~0.035. Think
            # of this number as how quickly r_t can react.
            log_r_t = pm.GaussianRandomWalk("log_r_t",
                                            sigma=0.035,
                                            dims=["date"])
            r_t = pm.Deterministic("r_t", pm.math.exp(log_r_t), dims=["date"])

            # For a given seed population and R_t curve, we calculate the
            # implied infection curve by simulating an outbreak. While this may
            # look daunting, it's simply a way to recreate the outbreak
            # simulation math inside the model:
            # https://staff.math.su.se/hoehle/blog/2020/04/15/effectiveR0.html
            seed = pm.Exponential("seed", 1 / 0.02)
            y0 = tt.zeros(len_observed)
            y0 = tt.set_subtensor(y0[0], seed)
            outputs, _ = theano.scan(
                fn=lambda t, gt, y, r_t: tt.set_subtensor(
                    y[t], tt.sum(r_t * y * gt)),
                sequences=[tt.arange(1, len_observed), convolution_ready_gt],
                outputs_info=y0,
                non_sequences=r_t,
                n_steps=len_observed - 1,
            )
            infections = pm.Deterministic("infections",
                                          outputs[-1],
                                          dims=["date"])

            # Convolve infections to confirmed positive reports based on a known
            # p_delay distribution. See patients.py for details on how we calculate
            # this distribution.
            test_adjusted_positive = pm.Deterministic(
                "test_adjusted_positive",
                conv2d(
                    tt.reshape(infections, (1, len_observed)),
                    tt.reshape(p_delay, (1, len(p_delay))),
                    border_mode="full",
                )[0, :len_observed],
                dims=["date"])

            # Picking an exposure with a prior that exposure never goes below
            # 0.1 * max_tests. The 0.1 only affects early values of Rt when
            # testing was minimal or when data errors cause underreporting
            # of tests.
            tests = pm.Data("tests", self.observed.total.values, dims=["date"])
            exposure = pm.Deterministic("exposure",
                                        pm.math.clip(
                                            tests,
                                            self.observed.total.max() * 0.1,
                                            1e9),
                                        dims=["date"])

            # Test-volume adjust reported cases based on an assumed exposure
            # Note: this is similar to the exposure parameter in a Poisson
            # regression.
            positive = pm.Deterministic("positive",
                                        exposure * test_adjusted_positive,
                                        dims=["date"])

            # Save data as part of trace so we can access in inference_data
            observed_positive = pm.Data("observed_positive",
                                        self.observed.positive.values,
                                        dims=["date"])
            nonzero_observed_positive = pm.Data(
                "nonzero_observed_positive",
                self.observed.positive[nonzero_days.values].values,
                dims=["nonzero_date"])

            positive_nonzero = pm.NegativeBinomial(
                "nonzero_positive",
                mu=positive[nonzero_days.values],
                alpha=pm.Gamma("alpha", mu=6, sigma=1),
                observed=nonzero_observed_positive,
                dims=["nonzero_date"])

        return self.model
Beispiel #27
0
 def test_creation_of_data_outside_model_context(self):
     with pytest.raises((IndexError, TypeError)) as error:
         pm.Data("data", [1.1, 2.2, 3.3])
     error.match("No model on context stack")
Beispiel #28
0
X = scale(X)
n_hidden_units = 5

initial_w1 = np.random.normal(loc=0,
                              scale=10,
                              size=(X.shape[1], n_hidden_units)).astype(
                                  theano.config.floatX)
initial_w2 = np.random.normal(loc=0,
                              scale=10,
                              size=(n_hidden_units, n_hidden_units)).astype(
                                  theano.config.floatX)
initial_out = np.random.normal(loc=0, scale=10, size=n_hidden_units).astype(
    theano.config.floatX)

with pm.Model() as BNN:
    x = pm.Data('x', X)
    Y = pm.Data('Y', y)

    weight_1 = pm.Normal('layer_1',
                         mu=0,
                         sd=10,
                         shape=(X.shape[1], n_hidden_units),
                         testval=initial_w1)
    weight_2 = pm.Normal('layer_2',
                         mu=0,
                         sd=10,
                         shape=(n_hidden_units, n_hidden_units),
                         testval=initial_w2)
    weight_Out = pm.Normal('layer_out',
                           mu=0,
                           sd=10,
Beispiel #29
0
def main(args):
    print("Loading data...")
    teams, df = load_data()
    nt = len(teams)
    train = df[df["split"] == "train"]

    print("Starting inference...")
    with pm.Model() as model:
        # priors
        alpha = pm.Normal("alpha", mu=0, sigma=1)
        sd_att = pm.HalfStudentT("sd_att", nu=3, sigma=2.5)
        sd_def = pm.HalfStudentT("sd_def", nu=3, sigma=2.5)

        home = pm.Normal("home", mu=0, sigma=1)  # home advantage

        # team-specific model parameters
        attack = pm.Normal("attack", mu=0, sigma=sd_att, shape=nt)
        defend = pm.Normal("defend", mu=0, sigma=sd_def, shape=nt)

        # data
        home_id = pm.Data("home_data", train["Home_id"])
        away_id = pm.Data("away_data", train["Away_id"])

        # likelihood
        theta1 = tt.exp(alpha + home + attack[home_id] - defend[away_id])
        theta2 = tt.exp(alpha + attack[away_id] - defend[home_id])

        pm.Poisson("s1", mu=theta1, observed=train["score1"])
        pm.Poisson("s2", mu=theta2, observed=train["score2"])

    with model:
        fit = pm.sample(
            draws=args.num_samples,
            tune=args.num_warmup,
            chains=args.num_chains,
            cores=args.num_cores,
            random_seed=args.rng_seed,
        )

    print("Analyse posterior...")
    az.plot_forest(
        fit,
        var_names=("alpha", "home", "sd_att", "sd_def"),
        backend="bokeh",
    )

    az.plot_trace(
        fit,
        var_names=("alpha", "home", "sd_att", "sd_def"),
        backend="bokeh",
    )

    # Attack and defence
    quality = teams.copy()
    quality = quality.assign(
        attack=fit["attack"].mean(axis=0),
        attacksd=fit["attack"].std(axis=0),
        defend=fit["defend"].mean(axis=0),
        defendsd=fit["defend"].std(axis=0),
    )
    quality = quality.assign(
        attack_low=quality["attack"] - quality["attacksd"],
        attack_high=quality["attack"] + quality["attacksd"],
        defend_low=quality["defend"] - quality["defendsd"],
        defend_high=quality["defend"] + quality["defendsd"],
    )

    plot_quality(quality)

    # Predicted goals and table
    predict = df[df["split"] == "predict"]

    with model:
        pm.set_data({"home_data": predict["Home_id"]})
        pm.set_data({"away_data": predict["Away_id"]})

        predicted_score = pm.sample_posterior_predictive(
            fit, var_names=["s1", "s2"], random_seed=1)

    predicted_full = predict.copy()
    predicted_full = predicted_full.assign(
        score1=predicted_score["s1"].mean(axis=0).round(),
        score1error=predicted_score["s1"].std(axis=0),
        score2=predicted_score["s2"].mean(axis=0).round(),
        score2error=predicted_score["s2"].std(axis=0),
    )

    predicted_full = train.append(
        predicted_full.drop(columns=["score1error", "score2error"]))

    print(score_table(df))
    print(score_table(predicted_full))
Beispiel #30
0
 def test_deterministic(self):
     data_values = np.array([0.5, 0.4, 5, 2])
     with pm.Model() as model:
         X = pm.Data("X", data_values)
         pm.Normal("y", 0, 1, observed=X)
         model.logp(model.test_point)