def _indvdl_gg( hparams, std_x, n_samples, L_cov, Normal, Gamma, Deterministic, sgn, gamma, floatX, cholesky, tt, verbose): # Uniform distribution on sphere gs = Normal('gs', np.float32(0.0), np.float32(1.0), shape=(n_samples, 2), dtype=floatX) ss = Deterministic('ss', gs + sgn(sgn(gs) + np.float32(1e-10)) * np.float32(1e-10)) ns = Deterministic('ns', ss.norm(L=2, axis=1)[:, np.newaxis]) us = Deterministic('us', ss / ns) # Scaling s.t. variance to 1 n = 2 # dimension beta = np.float32(hparams['beta_coeff']) m = n * gamma(0.5 * n / beta) \ / (2 ** (1 / beta) * gamma((n + 2) / (2 * beta))) L_cov_ = (np.sqrt(m) * cholesky(L_cov)).astype(floatX) # Scaling to v_indvdls scale1 = np.float32(std_x[0] * hparams['v_indvdl_1']) scale2 = np.float32(std_x[1] * hparams['v_indvdl_2']) tt.set_subtensor(L_cov_[0, :], L_cov_[0, :] * scale1, inplace=True) tt.set_subtensor(L_cov_[1, :], L_cov_[1, :] * scale2, inplace=True) # Draw samples ts = Gamma( 'ts', alpha=np.float32(n / (2 * beta)), beta=np.float32(.5), shape=n_samples, dtype=floatX )[:, np.newaxis] mus_ = Deterministic( 'mus_', ts**(np.float32(0.5 / beta)) * us.dot(L_cov_) ) mu1s_ = mus_[:, 0] mu2s_ = mus_[:, 1] if 10 <= verbose: print('GG for individual effect') print('gs.dtype = {}'.format(gs.dtype)) print('ss.dtype = {}'.format(ss.dtype)) print('ns.dtype = {}'.format(ns.dtype)) print('us.dtype = {}'.format(us.dtype)) print('ts.dtype = {}'.format(ts.dtype)) return mu1s_, mu2s_
def edhmm_fit(inp, nans, n_subs, last, method='advi'): # inp - array containing responses, outcomes, and a switch variable witch turns off update in the presence of nans # nans - bool array pointing towards locations of nan responses and outcomes # n_subs - int value, total number of subjects (each subjects is fited to a different parameter value) # last - int value, negative value denoting number of last trials to exclude from parameter estimation # e.g. setting last = -35 excludes the last 35 trials from parameter estimation. # define the hierarchical parametric model for ED-HMM # define the hierarchical parametric model d_max = 200 # maximal value for state duration with Model() as edhmm: d = tt.arange( d_max) # vector of possible duration values from zero to d_max d = tt.tile(d, (n_subs, 1)) P = tt.ones((2, 2)) - tt.eye(2) # permutation matrix # set prior state probability theta0 = tt.ones(n_subs) / 2 # set hierarchical prior for delta parameter of prior beliefs p_0(d) dtau = HalfCauchy('dtau', beta=1) dloc = HalfCauchy('dloc', beta=dtau, shape=(n_subs, )) delta = Deterministic('delta', dloc / (1 + dloc)) # set hierarchical prior for r parameter of prior beleifs p_0(d) rtau = HalfCauchy('rtau', beta=1) rloc = HalfCauchy('rloc', beta=rtau, shape=(n_subs, )) r = Deterministic('r', 1 + rloc) # compute prior beliefs over state durations for given binomln = tt.gammaln(d + r[:, None]) - tt.gammaln(d + 1) - tt.gammaln( r[:, None]) pd0 = tt.nnet.softmax(binomln + d * log(1 - delta[:, None]) + r[:, None] * log(delta[:, None])) # set joint probability distribution joint0 = tt.stack([theta0[:, None] * pd0, (1 - theta0)[:, None] * pd0]).dimshuffle(1, 0, 2) # set hierarchical priors for response noises btau = HalfCauchy('btau', beta=1) bloc = HalfCauchy('bloc', beta=btau, shape=(n_subs, )) beta = Deterministic('beta', 1 / bloc) # set hierarchical priors for initial inital beliefs about reward probability mtau = HalfCauchy('mtau', beta=4) mloc = HalfCauchy('mloc', beta=mtau, shape=(n_subs, 2)) muA = Deterministic('muA', mloc[:, 0] / (1 + mloc[:, 0])) muB = Deterministic('muB', 1 / (1 + mloc[:, 1])) init = tt.stacklists([[10 * muA, 10 * (1 - muA)], [10 * muB, 10 * (1 - muB)]]).dimshuffle(2, 0, 1) # compute the posterior beleifs over states, durations, and reward probabilities (post, _) = scan(edhmm_model, sequences=[inp], outputs_info=[init, joint0], non_sequences=[pd0, P, range(n_subs)], name='edhmm') # get posterior reward probabliity and state probability a0 = init[None, ..., 0] b0 = init[None, ..., 1] a = tt.concatenate([a0, post[0][:-1, ..., 0]]) b = tt.concatenate([b0, post[0][:-1, ..., 1]]) mu = Deterministic('mu', a / (a + b)) theta = Deterministic( 'theta', tt.concatenate( [theta0[None, :], post[1][:-1].sum(axis=-1)[..., 0]])[..., None]) # compute choice dependend expected reward probability mean = (theta * mu + (1 - theta) * mu.dot(P)) # compute expected utility U = Deterministic('U', 2 * mean - 1) # set hierarchical prior for response biases ctau = HalfCauchy('ctau', beta=1) cloc = HalfCauchy('cloc', beta=ctau, shape=(n_subs, )) c0 = Deterministic('c0', cloc / (1 + cloc)) # compute response noise and response bias modulated expected free energy G = Deterministic( 'G', beta[None, :, None] * U + log([c0, 1 - c0]).T[None, ...]) # compute response probability for the pre-reversal and the reversal phase of the experiment valid_obs = ~nans[:last] nzero = tt.nonzero(valid_obs) p = Deterministic('p', tt.nnet.softmax(G[:last][nzero])) # set observation likelihood of responses responses = inp[:last, :, 0][valid_obs] Categorical('obs', p=p, observed=responses) # fit the model with edhmm: approx = fit(method=method, n=50000, progressbar=True) return approx