Пример #1
0
def simple_categorical():
    p = floatX_array([0.1, 0.2, 0.3, 0.4])
    v = floatX_array([0.0, 1.0, 2.0, 3.0])
    with Model() as model:
        Categorical("x", p, shape=3, testval=[1, 2, 3])

    mu = np.dot(p, v)
    var = np.dot(p, (v - mu)**2)
    return model.test_point, model, (mu, var)
    def fit(self, base_models_predictions, true_targets,
            model_identifiers=None):

        ba = BayesianAverage()
        weight_vector = ba.fit(base_models_predictions, true_targets)
        default = True

        base_models_predictions = base_models_predictions.transpose()
        n_basemodels = base_models_predictions.shape[2]
        with Model() as basic_model:
            #define prior
            HalfNormal('weights', sd=1, shape=n_basemodels)
            #define likelihood function
            ensemble_pred = np.dot(base_models_predictions, weight_vector)
            Categorical('likelihood', p=ensemble_pred.transpose(), observed=true_targets)

        with basic_model:
            start = find_MAP(model=basic_model)
            if not default:
                step = Metropolis()
            step = NUTS()
            trace = sample(self.n_samples, step=step, start=start)
        trace = trace[5000:]
        self.sampled_weights = trace["weights"]
Пример #3
0
print C
print U
plt.plot(X_obs[:], np.ones(X_obs.shape), 'o', markersize=8)
plt.show()

# Infer class labels
from pymc3 import Dirichlet, Normal, MvNormal, HalfNormal, Categorical
import theano.tensor

with Model() as gmm:
    C = Dirichlet('mixture_coeff',
                  dirichlet_scale * dirichlet_shape,
                  shape=nclusters)
    S = HalfNormal('S', sd=sd_halfnormal, shape=nclusters)
    U = Normal('mu', mu=mean_prior_mean, sd=mean_prior_sd, shape=nclusters)
    Y = Categorical('labels', p=C, shape=nsamples)
    X = Normal('X', mu=U[Y], sd=S[Y], observed=X_obs)

from pymc3 import find_MAP
map_estimate = find_MAP(model=gmm)
print map_estimate

from pymc3 import NUTS, sample, Slice, Metropolis, ElemwiseCategorical, HamiltonianMC

modified_map_estimate = copy.deepcopy(map_estimate)
modified_map_estimate['mu'] = [
    1 if x < 0.001 else x for x in modified_map_estimate['mu']
]

with gmm:
    # step = Slice(vars=[Y])
Пример #4
0
IDs = df.values[:, 0].astype(np.int32) - 1
Items = df.values[:, 1].astype(np.int32) - 1

#shape = クラスの数の確率変数に、クラスの値を取るデータ数次元のベクトルを入れる操作がありますが
#その詳細な説明は(https://pymc-devs.github.io/pymc3/notebooks/GLM-hierarchical.html)参照

basic_model = Model()
with basic_model:
    #事前分布[50,6]
    theta = Dirichlet('p_theta', a=(1.0 / K) * np.ones(K), shape=(n_person, K))
    #事前分布[6,112]
    phi = Dirichlet('p_phi',
                    a=(1.0 / n_item) * np.ones(n_item),
                    shape=(K, n_item))

    #likelihood
    #データ数 x 各データのカテゴリー確率ベクトル [1117,6]
    theta = theta[IDs, :]
    #データ数 x 各IDに対するアイテム確率ベクトル [1117,112]
    person_to_item = tt.dot(theta, phi)

    H = Categorical("tes", p=person_to_item, shape=(1117), observed=Items)

    ##サンプリング
    #パラメータの数が多く、ローカルで実行するには重いのでサンプリング数はかなり少なくしてます。
    #もしサンプル数を大きくしたければ、HのCategoricalを連続値にしてADVIをおすすめします。
    #連続値にするには、log-likelihoodを自分で定義すれば良いですが、その方法は下記に。
    #https://pymc-devs.github.io/pymc3/notebooks/lda-advi-aevb.html
    step = CategoricalGibbsMetropolis(vars=[H])
    trace = sample(12, init=None, step=step)
    summary(trace)
Пример #5
0
def edhmm_fit(inp, nans, n_subs, last, method='advi'):
    # inp - array containing responses, outcomes, and a switch variable witch turns off update in the presence of nans
    # nans - bool array pointing towards locations of nan responses and outcomes
    # n_subs - int value, total number of subjects (each subjects is fited to a different parameter value)
    # last - int value, negative value denoting number of last trials to exclude from parameter estimation
    #        e.g. setting last = -35 excludes the last 35 trials from parameter estimation.

    # define the hierarchical parametric model for ED-HMM
    # define the hierarchical parametric model
    d_max = 200  # maximal value for state duration
    with Model() as edhmm:
        d = tt.arange(
            d_max)  # vector of possible duration values from zero to d_max
        d = tt.tile(d, (n_subs, 1))
        P = tt.ones((2, 2)) - tt.eye(2)  # permutation matrix

        # set prior state probability
        theta0 = tt.ones(n_subs) / 2

        # set hierarchical prior for delta parameter of prior beliefs p_0(d)
        dtau = HalfCauchy('dtau', beta=1)
        dloc = HalfCauchy('dloc', beta=dtau, shape=(n_subs, ))
        delta = Deterministic('delta', dloc / (1 + dloc))

        # set hierarchical prior for r parameter of prior beleifs p_0(d)
        rtau = HalfCauchy('rtau', beta=1)
        rloc = HalfCauchy('rloc', beta=rtau, shape=(n_subs, ))
        r = Deterministic('r', 1 + rloc)

        # compute prior beliefs over state durations for given
        binomln = tt.gammaln(d + r[:, None]) - tt.gammaln(d + 1) - tt.gammaln(
            r[:, None])
        pd0 = tt.nnet.softmax(binomln + d * log(1 - delta[:, None]) +
                              r[:, None] * log(delta[:, None]))

        # set joint probability distribution
        joint0 = tt.stack([theta0[:, None] * pd0,
                           (1 - theta0)[:, None] * pd0]).dimshuffle(1, 0, 2)

        # set hierarchical priors for response noises
        btau = HalfCauchy('btau', beta=1)
        bloc = HalfCauchy('bloc', beta=btau, shape=(n_subs, ))
        beta = Deterministic('beta', 1 / bloc)

        # set hierarchical priors for initial inital beliefs about reward probability
        mtau = HalfCauchy('mtau', beta=4)
        mloc = HalfCauchy('mloc', beta=mtau, shape=(n_subs, 2))
        muA = Deterministic('muA', mloc[:, 0] / (1 + mloc[:, 0]))
        muB = Deterministic('muB', 1 / (1 + mloc[:, 1]))
        init = tt.stacklists([[10 * muA, 10 * (1 - muA)],
                              [10 * muB, 10 * (1 - muB)]]).dimshuffle(2, 0, 1)

        # compute the posterior beleifs over states, durations, and reward probabilities
        (post, _) = scan(edhmm_model,
                         sequences=[inp],
                         outputs_info=[init, joint0],
                         non_sequences=[pd0, P, range(n_subs)],
                         name='edhmm')

        # get posterior reward probabliity and state probability
        a0 = init[None, ..., 0]
        b0 = init[None, ..., 1]
        a = tt.concatenate([a0, post[0][:-1, ..., 0]])
        b = tt.concatenate([b0, post[0][:-1, ..., 1]])
        mu = Deterministic('mu', a / (a + b))
        theta = Deterministic(
            'theta',
            tt.concatenate(
                [theta0[None, :], post[1][:-1].sum(axis=-1)[..., 0]])[...,
                                                                      None])

        # compute choice dependend expected reward probability
        mean = (theta * mu + (1 - theta) * mu.dot(P))

        # compute expected utility
        U = Deterministic('U', 2 * mean - 1)

        # set hierarchical prior for response biases
        ctau = HalfCauchy('ctau', beta=1)
        cloc = HalfCauchy('cloc', beta=ctau, shape=(n_subs, ))
        c0 = Deterministic('c0', cloc / (1 + cloc))

        # compute response noise and response bias modulated expected free energy
        G = Deterministic(
            'G', beta[None, :, None] * U + log([c0, 1 - c0]).T[None, ...])

        # compute response probability for the pre-reversal and the reversal phase of the experiment
        valid_obs = ~nans[:last]
        nzero = tt.nonzero(valid_obs)
        p = Deterministic('p', tt.nnet.softmax(G[:last][nzero]))

        # set observation likelihood of responses
        responses = inp[:last, :, 0][valid_obs]
        Categorical('obs', p=p, observed=responses)

    # fit the model
    with edhmm:
        approx = fit(method=method, n=50000, progressbar=True)

    return approx
Пример #6
0
def durw_fit(inp, nans, n_subs, last, method='advi'):
    # inp - array containing responses, outcomes, and a switch variable witch turns off update in the presence of nans
    # nans - bool array pointing towards locations of nan responses and outcomes
    # n_subs - int value, total number of subjects (each subjects is fited to a different parameter value)
    # last - int value, negative value denoting number of last trials to exclude from parameter estimation
    #        e.g. setting last = -35 excludes the last 35 trials from parameter estimation.

    # define the hierarchical parametric model for DU-RW
    with Model() as durw:

        # set hierarchical priors for learning rates
        atau = HalfCauchy('atau', beta=1)
        aloc = HalfCauchy('aloc', beta=atau, shape=(n_subs, ))
        alpha = Deterministic('alpha', aloc / (1 + aloc))

        # set hierarchical priors for coupling strengths
        ktau = HalfCauchy('ktau', beta=1)
        kloc = HalfCauchy('kloc', beta=ktau, shape=(n_subs, ))
        kappa = Deterministic('kappa', kloc / (1 + kloc))

        # set hierarchical priors for response noises
        btau = HalfCauchy('btau', beta=1)
        bloc = HalfCauchy('bloc', beta=btau, shape=(n_subs, ))
        beta = Deterministic('beta', 1 / bloc)

        # set hierarchical priors for initial choice value
        mtau = HalfCauchy('mtau', beta=1)
        mlocA = HalfCauchy('mlocA', beta=mtau, shape=(n_subs, ))
        mlocB = HalfCauchy('mlocB', beta=mtau, shape=(n_subs, ))
        muA = Deterministic('muA', mlocA / (1 + mlocA))
        muB = Deterministic('muB', 1 / (1 + mlocB))
        V0 = tt.stacklists([2 * muA - 1, 2 * muB - 1]).T

        # compute the choice values
        (Q, _) = scan(durw_model,
                      sequences=[inp],
                      outputs_info=V0,
                      non_sequences=[alpha, kappa, range(n_subs)],
                      name='rw')

        V0 = Deterministic('V0', V0[None, ...])
        V = Deterministic('V', tt.concatenate([V0, Q[:-1]]))

        # set hierarchical prior for response biases
        ctau = HalfCauchy('ctau', beta=1)
        cloc = HalfCauchy('cloc', beta=ctau, shape=(n_subs, ))
        c0 = Deterministic('c0', cloc / (1 + cloc))

        # compute response noise and response bias modulated response values
        G = Deterministic(
            'G', beta[None, :, None] * V + log([c0, 1 - c0]).T[None, ...])

        # compute response probability for the prereversal and the reversal phase of the experiment
        valid_obs = ~nans[:last]
        nzero = tt.nonzero(valid_obs)
        p = Deterministic('p', tt.nnet.softmax(G[:last][nzero]))

        # set observation likelihood of responses
        responses = inp[:last, :, 0][valid_obs]
        Categorical('obs', p=p, observed=responses)

    # fit the model
    with durw:
        approx = fit(method=method, n=50000, progressbar=True)

    return approx