Esempio n. 1
0
# coding: utf-8

# In[17]:


import pymc3 as pm
import numpy as np

n = np.ones(4)*5
y = np.array([0,1,3,5])
dose = np.array([-.86,-.3,-.05,.73])

with pm.Model() as bioassay_model:
    
    # Prior
    alpha = pm.Normal('alpha', 0, sd=100)
    beta = pm.Normal('beta', 0, sd=100)
    
    # Linear combiations of parameters
    theta = pm.invlogit(alpha + beta*dose)
    
    # Model likelihood
    deaths = pm.Binomial('deaths', n=n, p=theta, observed=y)

    
    #### YOUR CODE HERE ####
    a = pm.Normal('a', mu=0, sd=100)
    b_1 = pm.Normal('b_1', mu=0, sd=100)
    b_2 = pm.Normal('b_2', mu=0, sd=100)
    ### END OF YOUR CODE ###
    
    # Thansform these random variables into vector of probabilities p(y_i=1) using logistic regression model specified 
    # above. PyMC random variables are theano shared variables and support simple mathematical operations.
    # For example:
    # z = pm.Normal('x', 0, 1) * np.array([1, 2, 3]) + pm.Normal('y', 0, 1) * np.array([4, 5, 6])`
    # is a correct PyMC expression.
    # Use pm.invlogit for the sigmoid function.
    
    #### YOUR CODE HERE ####
    p_y = pm.invlogit(a*np.ones(shape=data.shape[0]) + b_1*data['age'].values + b_2*data['educ'].values)
    ### END OF YOUR CODE ###
    
    # Declare PyMC Bernoulli random vector with probability of success equal to the corresponding value
    # given by the sigmoid function.
    # Supply target vector using "observed" argument in the constructor.

    #### YOUR CODE HERE ####
    y_hat = pm.Bernoulli('y_hat', p=p_y, observed=data['income_more_50K'].values)
    ### END OF YOUR CODE ###
    
    # Use pm.find_MAP() to find the maximum a-posteriori estimate for the vector of logistic regression weights.
    map_estimate = pm.find_MAP()
    print(map_estimate)

"""Sumbit MAP estimations of corresponding coefficients:"""
Esempio n. 3
0
def trial_step(info_A_tm1,info_A_t, # externally provided to function on each trial
            obs_choice_tm1,obs_choice_t,
            mag_1_t,mag_0_t,
            stabvol_t,rewpain_t,
            # outputs of this function passed back into it on next trial
            choice_tm1,# either generated or observed choice
            outcome_valence_tm1, # either generated or observed (although not used on input because immediately redefined, useful for storage)
            prob_choice_tm1, # internal state variables
            choice_val_tm1,
            estimate_tm1,
            choice_kernel_tm1,
            lr_tm1,lr_c_tm1,Amix_tm1,Binv_tm1,Bc_tm1,mdiff_tm1,eps_tm1,
            lr_baseline,lr_goodbad,lr_stabvol,lr_rewpain, # variables accessible on all trials
            lr_goodbad_stabvol,lr_rewpain_goodbad,lr_rewpain_stabvol,
            lr_rewpain_goodbad_stabvol,
            lr_c_baseline,lr_c_goodbad,lr_c_stabvol,lr_c_rewpain, # variables accessible on all trials
            lr_c_goodbad_stabvol,lr_c_rewpain_goodbad,lr_c_rewpain_stabvol,
            lr_c_rewpain_goodbad_stabvol,
            Amix_baseline,Amix_goodbad,Amix_stabvol,Amix_rewpain,
            Amix_goodbad_stabvol,Amix_rewpain_goodbad,Amix_rewpain_stabvol,
            Amix_rewpain_goodbad_stabvol,
            Binv_baseline,Binv_goodbad,Binv_stabvol,Binv_rewpain,
            Binv_goodbad_stabvol,Binv_rewpain_goodbad,Binv_rewpain_stabvol,
            Binv_rewpain_goodbad_stabvol,
            Bc_baseline,Bc_goodbad,Bc_stabvol,Bc_rewpain,
            Bc_goodbad_stabvol,Bc_rewpain_goodbad,Bc_rewpain_stabvol,
            Bc_rewpain_goodbad_stabvol,
            mag_baseline,mag_rewpain,
            eps_baseline,eps_stabvol,eps_rewpain,eps_rewpain_stabvol,
            gen_indicator,B_max,nonlinear_indicator):
    '''
    Trial by Trial updates for the model

    '''

    # determine whether last trial had good outcome
    outcome_valence_tm1 = choice_tm1*info_A_tm1 +\
                 (1.0-choice_tm1)*(1.0-info_A_tm1) +\
                 (1.0-choice_tm1)*info_A_tm1*(-1.0) + \
                 (choice_tm1)*(1.0-info_A_tm1)*(-1.0)

    # determine Amix for this trial using last good outcome
    Amix_t = Amix_baseline + \
        outcome_valence_tm1*Amix_goodbad + \
        stabvol_t*Amix_stabvol + \
        rewpain_t*Amix_rewpain + \
        outcome_valence_tm1*stabvol_t*Amix_goodbad_stabvol + \
        outcome_valence_tm1*rewpain_t*Amix_rewpain_goodbad + \
        stabvol_t*rewpain_t*Amix_rewpain_stabvol + \
        outcome_valence_tm1*stabvol_t*rewpain_t*Amix_rewpain_goodbad_stabvol

    Amix_t =pm.invlogit(Amix_t)

    # Determine Binv for this trial using last good outcome
    Binv_t = Binv_baseline + \
        outcome_valence_tm1*Binv_goodbad + \
        stabvol_t*Binv_stabvol + \
        rewpain_t*Binv_rewpain + \
        outcome_valence_tm1*stabvol_t*Binv_goodbad_stabvol + \
        outcome_valence_tm1*rewpain_t*Binv_rewpain_goodbad + \
        stabvol_t*rewpain_t*Binv_rewpain_stabvol + \
        outcome_valence_tm1*stabvol_t*rewpain_t*Binv_rewpain_goodbad_stabvol

    Binv_t = T.exp(Binv_t)
    Binv_t = T.switch(Binv_t<0.1,0.1,Binv_t )
    Binv_t = T.switch(Binv_t>B_max.value,B_max.value,Binv_t)

    # Determine Bc for this trial using last good outcome
    Bc_t = Bc_baseline + \
        outcome_valence_tm1*Bc_goodbad + \
        stabvol_t*Bc_stabvol + \
        rewpain_t*Bc_rewpain + \
        outcome_valence_tm1*stabvol_t*Bc_goodbad_stabvol + \
        outcome_valence_tm1*rewpain_t*Bc_rewpain_goodbad + \
        stabvol_t*rewpain_t*Bc_rewpain_stabvol + \
        outcome_valence_tm1*stabvol_t*rewpain_t*Bc_rewpain_goodbad_stabvol

    Mag_t = T.exp(mag_baseline+rewpain_t*mag_rewpain)
    Mag_t= T.switch(Mag_t<0.1,0.1,Mag_t)
    Mag_t= T.switch(Mag_t>10,10,Mag_t)

    Bc_t = T.switch(Bc_t>B_max.value,B_max.value,Bc_t)
    Bc_t = T.switch(Bc_t<-1*B_max.value,-1*B_max.value,Bc_t)

    # define the defaults that will be replaced
    mdiff_t = (mag_1_t-mag_0_t)
    pdiff_t=(estimate_tm1-(1.0-estimate_tm1)) # compute value from previous probability estimates

    # Nonlinear magnitude or probability
    if nonlinear_indicator.value==0:
        # scale mag diff
        mdiff_t = T.sgn(mdiff_t)*T.abs_(mdiff_t)**Mag_t
    elif nonlinear_indicator.value==2:
        # scale mags separately
        mdiff_t = (T.sgn(mag_1_t)*T.abs_(mag_1_t)**Mag_t)-(T.sgn(mag_0_t)*T.abs_(mag_0_t)**Mag_t)
    elif nonlinear_indicator.value==3:
        # scale prob separately
        estimate_tm1 = estimate_tm1**Mag_t
        estimate_tm1 = T.switch(estimate_tm1<0.01,0.01,estimate_tm1)
        estimate_tm1 = T.switch(estimate_tm1>0.99,0.99,estimate_tm1)
        pdiff_t=(estimate_tm1-(1.0-estimate_tm1)) # compute value from previous probability estimates

    elif nonlinear_indicator.value==1:
        # scale prob diff
        pdiff_t = T.sgn(pdiff_t)*T.abs_(pdiff_t)**Mag_t

    # choice kernel diff
    cdiff_t=(choice_kernel_tm1-(1.0-choice_kernel_tm1))

    # Value
    choice_val_t = Binv_t*((1-Amix_t)*mdiff_t + (Amix_t)*pdiff_t) + Bc_t*cdiff_t

    # before Amix, choice value goes between -1 and 1
    prob_choice_t = 1.0/(1.0+T.exp(-1.0*choice_val_t))

    # determine eps
    eps_t = eps_baseline + \
        stabvol_t*eps_stabvol + \
        rewpain_t*eps_rewpain + \
        stabvol_t*rewpain_t*eps_rewpain_stabvol

    eps_t = pm.invlogit(eps_t)

    # add epsilon
    prob_choice_t = eps_t*0.5+(1.0-eps_t)*prob_choice_t

    # Generate choice or Copy participants choice (used for next trial as indicator)
    if gen_indicator.value==0:
        choice_t = obs_choice_t
    else:
        choice_t = trng.binomial(n=1, p=prob_choice_t,dtype='float64')

    # determine whether current trial is good or bad
    outcome_valence_t = choice_t*info_A_t +\
                 (1.0-choice_t)*(1.0-info_A_t) +\
                 (1.0-choice_t)*info_A_t*(-1.0) + \
                 (choice_t)*(1.0-info_A_t)*(-1.0)

    lr_t = lr_baseline + \
        outcome_valence_t*lr_goodbad + \
        stabvol_t*lr_stabvol + \
        rewpain_t*lr_rewpain + \
        outcome_valence_t*stabvol_t*lr_goodbad_stabvol + \
        outcome_valence_t*rewpain_t*lr_rewpain_goodbad + \
        stabvol_t*rewpain_t*lr_rewpain_stabvol + \
        outcome_valence_t*stabvol_t*rewpain_t*lr_rewpain_goodbad_stabvol

    lr_t = pm.invlogit(lr_t)

    # update probability estimate, These will be estimate after update on t
    # stored differently than before
    estimate_t = estimate_tm1 + lr_t*(info_A_t-estimate_tm1)

    # Choice kernel learning rate
    lr_c_t = lr_c_baseline + \
        outcome_valence_t*lr_c_goodbad + \
        stabvol_t*lr_c_stabvol + \
        rewpain_t*lr_c_rewpain + \
        outcome_valence_t*stabvol_t*lr_c_goodbad_stabvol + \
        outcome_valence_t*rewpain_t*lr_c_rewpain_goodbad + \
        stabvol_t*rewpain_t*lr_c_rewpain_stabvol + \
        outcome_valence_t*stabvol_t*rewpain_t*lr_c_rewpain_goodbad_stabvol

    lr_c_t = pm.invlogit(lr_c_t)

    choice_kernel_t =  choice_kernel_tm1 + lr_c_t*(choice_t - choice_kernel_tm1)

    return([choice_t,outcome_valence_t,prob_choice_t,choice_val_t,estimate_t,choice_kernel_t,lr_t,lr_c_t,Amix_t,Binv_t,Bc_t,mdiff_t,eps_t])
Esempio n. 4
0
def BinomGP(y, N, X, X_star, mcmc_iter, start={}):
    """Fits a logistic Gaussian process regression timeseries model.

    Details
    =======
    Let $y_t$ be the number of cases observed out of $N_t$ individuals tested.  Then
    $$y_t \sim Binomial(N_t, p_t)$$
    with 
    $$logit(p_t) \sim s_t$$

    $s_t$ is modelled as a Gaussian process such that
    $$s_t \sim \mbox{GP}(\mu_t, \Sigma)$$
    with a mean function capturing a linear time trend
    $$\mu_t = \alpha + \beta X_t$$
    and a periodic covariance plus white noise
    $$\Sigma_{i,j} = \sigma^2 \exp{ \frac{2 \sin^2(|x_i - x_j|/365)}{\phi^2}} + \tau^2$$

    Parameters
    ==========
    y -- a vector of cases
    N -- a vector of number tested
    X -- a vector of times at which y is observed
    X_star -- a vector of times at which predictons are to be made
    start -- a dictionary of starting values for the MCMC.

    Returns
    =======
    A tuple of (model,trace,pred) where model is a PyMC3 Model object, trace is a PyMC3 Multitrace object, and pred is a 5000 x X_star.shape[0] matrix of draws from the posterior predictive distribution of $\pi_t$.
    """

    X = np.array(X)[:, None]  # Inputs must be arranged as column vector
    X_star = X_star[:, None]

    model = pm.Model()

    with model:

        alpha = pm.Normal('alpha', 0, 1000, testval=0.)
        beta = pm.Normal('beta', 0, 100, testval=0.)
        sigmasq_s = pm.Gamma('sigmasq_s', .1, .1, testval=0.1)
        phi_s = pm.Gamma('phi_s', 1., 1., testval=0.5)
        tau2 = pm.Gamma('tau2', .1, .1, testval=0.1)

        # Construct GPs
        cov_s = sigmasq_s * pm.gp.cov.Periodic(1, 365., phi_s)
        mean_f = pm.gp.mean.Linear(coeffs=beta, intercept=alpha)
        gp_s = pm.gp.Latent(mean_func=mean_f, cov_func=cov_s)

        cov_nugget = pm.gp.cov.WhiteNoise(tau2)
        nugget = pm.gp.Latent(cov_func=cov_nugget)

        gp = gp_s + nugget
        model.gp = gp
        s = gp.prior('s', X=X)

        Y_obs = pm.Binomial('y_obs', N, pm.invlogit(s), observed=y)

        # Sample
        trace = pm.sample(mcmc_iter, chains=1, start=start)
        # Predictions
        s_star = gp.conditional('s_star', X_star)
        pred = pm.sample_ppc(trace, vars=[s_star, Y_obs])

        return (model, trace, pred)
Esempio n. 5
0
#Model Preminents

observed = Indexed_Successful_Shot_DF['Goal_B']
Shot_Type_Index = Indexed_Successful_Shot_DF['index_one']
N = len(np.unique(Indexed_Successful_Shot_DF['index_one']))

with pm.Model() as unpooled_model:

    # Independent parameters for each county
    a = pm.Normal('a', 0, sd=100, shape=N) #Intercept
    b = pm.Normal('b', 0, sd=100, shape=N) #Coefficient for Shot Type

    # Model error
    # Calculate predictions given values
    # for intercept and slope (Comment 4)
    yhat = pm.invlogit(a[Shot_Type_Index] + b[Shot_Type_Index] * Indexed_Successful_Shot_DF.Shot_Angle.values)
 
    # Make predictions fit reality
    y = pm.Binomial('y', n=np.ones(Indexed_Successful_Shot_DF.shape[0]), p=yhat, observed= observed)
    
    #Run It
    
    start = pm.find_MAP()
    step = pm.Metropolis()
    trace_h = pm.sample(2000, step = step, start = start,njobs = 2)




#Example Part 2
Esempio n. 6
0
def BinomGP(y, N, time, time_pred, mcmc_iter, start={}):
    """Fits a logistic Gaussian process regression timeseries model.

    Details
    =======
    Let $y_t$ be the number of cases observed out of $N_t$ individuals tested.  Then
    $$y_t \sim Binomial(N_t, p_t)$$
    with 
    $$logit(p_t) \sim s_t$$

    $s_t$ is modelled as a Gaussian process such that
    $$s_t \sim \mbox{GP}(\mu_t, \Sigma)$$
    with a mean function capturing a linear time trend
    $$\mu_t = \alpha + \beta X_t$$
    and a periodic covariance plus white noise
    $$\Sigma_{i,j} = \sigma^2 \exp{ \frac{2 \sin^2(|x_i - x_j|/365)}{\phi^2}} + \tau^2$$

    Parameters
    ==========
    y -- a vector of cases
    N -- a vector of number tested
    X -- a vector of times at which y is observed
    T -- a vector of time since recruitment
    X_star -- a vector of times at which predictons are to be made
    start -- a dictionary of starting values for the MCMC.

    Returns
    =======
    A tuple of (model,trace,pred) where model is a PyMC3 Model object, trace is a PyMC3 Multitrace object, and pred is a 5000 x X_star.shape[0] matrix of draws from the posterior predictive distribution of $\pi_t$.
    """
    time = np.array(time)[:, None]  # Inputs must be arranged as column vector
    offset = np.mean(time)
    time = time - offset  # Center time
    model = pm.Model()

    with model:
        alpha = pm.Normal('alpha', 0, 1000, testval=0.)
        beta = pm.Normal('beta', 0, 100, testval=0.)
        sigmasq_s = pm.HalfNormal('sigmasq_s', 5., testval=0.1)
        phi_s = 0.16 #pm.HalfNormal('phi_s', 5., testval=0.5)
        tau2 = pm.Gamma('tau2', .1, .1, testval=0.1)

        # Construct GPs
        cov_t = sigmasq_s * pm.gp.cov.Periodic(1, 365., phi_s)
        mean_t = pm.gp.mean.Linear(coeffs=beta, intercept=alpha)
        gp_period = pm.gp.Latent(mean_func=mean_t, cov_func=cov_t)

        cov_nugget = pm.gp.cov.WhiteNoise(tau2)
        gp_nugget = pm.gp.Latent(cov_func=cov_nugget)

        gp_t = gp_period + gp_nugget
        s = gp_t.prior('gp_t', X=time[:, None])

        Y_obs = pm.Binomial('y_obs', N, pm.invlogit(s), observed=y)

        # Sample
        trace = pm.sample(mcmc_iter,
                          chains=1,
                          start=start,
                          tune=1000,
                          adapt_step_size=True)

        # Prediction
        time_pred -= offset
        s_star = gp_t.conditional('s_star', time_pred[:, None])
        pi_star = pm.Deterministic('pi_star', pm.invlogit(s_star))
        pred = pm.sample_posterior_predictive(trace, var_names=['y_obs', 'pi_star'])

        return {'model': model, 'trace': trace, 'pred': pred}
def trial_step(info_A_tm1,info_A_t, # externally provided to function on each trial
            obs_choice_tm1,obs_choice_t,
            mag_1_t,mag_0_t,
            stabvol_t,rewpain_t,
            # outputs of this function passed back into it on next trial
            choice_tm1,# either generated or observed choice
            outcome_valence_tm1, # either generated or observed (although not used on input because immediately redefined, useful for storage)
            prob_choice_tm1, # internal state variables
            choice_val_tm1,
            estimate_tm1,
            choice_kernel_tm1,
            lr_tm1,lr_c_tm1,Gamma_tm1,Binv_tm1,Bc_tm1,mdiff_tm1,eps_tm1,
            lr_baseline,lr_goodbad,lr_stabvol,lr_rewpain, # variables accessible on all trials
            lr_goodbad_stabvol,lr_rewpain_goodbad,lr_rewpain_stabvol,
            lr_rewpain_goodbad_stabvol,
            lr_c_baseline,lr_c_goodbad,lr_c_stabvol,lr_c_rewpain, # variables accessible on all trials
            lr_c_goodbad_stabvol,lr_c_rewpain_goodbad,lr_c_rewpain_stabvol,
            lr_c_rewpain_goodbad_stabvol,
            Gamma_baseline,Gamma_goodbad,Gamma_stabvol,Gamma_rewpain,
            Gamma_goodbad_stabvol,Gamma_rewpain_goodbad,Gamma_rewpain_stabvol,
            Gamma_rewpain_goodbad_stabvol,
            Binv_baseline,Binv_goodbad,Binv_stabvol,Binv_rewpain,
            Binv_goodbad_stabvol,Binv_rewpain_goodbad,Binv_rewpain_stabvol,
            Binv_rewpain_goodbad_stabvol,
            Bc_baseline,Bc_goodbad,Bc_stabvol,Bc_rewpain,
            Bc_goodbad_stabvol,Bc_rewpain_goodbad,Bc_rewpain_stabvol,
            Bc_rewpain_goodbad_stabvol,
            mag_baseline,mag_rewpain,
            eps_baseline,eps_stabvol,eps_rewpain,eps_rewpain_stabvol,
            gen_indicator,B_max):
    '''
    Trial by Trial updates for the model

    '''

    # determine whether last trial had good outcome
    outcome_valence_tm1 = choice_tm1*info_A_tm1 +\
                 (1.0-choice_tm1)*(1.0-info_A_tm1) +\
                 (1.0-choice_tm1)*info_A_tm1*(-1.0) + \
                 (choice_tm1)*(1.0-info_A_tm1)*(-1.0)

    # determine Gamma for this trial using last good outcome
    Gamma_t = Gamma_baseline + \
        outcome_valence_tm1*Gamma_goodbad + \
        stabvol_t*Gamma_stabvol + \
        rewpain_t*Gamma_rewpain + \
        outcome_valence_tm1*stabvol_t*Gamma_goodbad_stabvol + \
        outcome_valence_tm1*rewpain_t*Gamma_rewpain_goodbad + \
        stabvol_t*rewpain_t*Gamma_rewpain_stabvol + \
        outcome_valence_tm1*stabvol_t*rewpain_t*Gamma_rewpain_goodbad_stabvol

    Gamma_t =pm.invlogit(Gamma_t)*5 # [0,5]


    # Determine Binv for this trial using last good outcome
    Binv_t = Binv_baseline + \
        outcome_valence_tm1*Binv_goodbad + \
        stabvol_t*Binv_stabvol + \
        rewpain_t*Binv_rewpain + \
        outcome_valence_tm1*stabvol_t*Binv_goodbad_stabvol + \
        outcome_valence_tm1*rewpain_t*Binv_rewpain_goodbad + \
        stabvol_t*rewpain_t*Binv_rewpain_stabvol + \
        outcome_valence_tm1*stabvol_t*rewpain_t*Binv_rewpain_goodbad_stabvol

    Binv_t = T.exp(Binv_t)
    Binv_t = T.switch(Binv_t<0.1,0.1,Binv_t )
    Binv_t = T.switch(Binv_t>B_max.value,B_max.value,Binv_t)

    # Determine Bc for this trial using last good outcome
    Bc_t = Bc_baseline + \
        outcome_valence_tm1*Bc_goodbad + \
        stabvol_t*Bc_stabvol + \
        rewpain_t*Bc_rewpain + \
        outcome_valence_tm1*stabvol_t*Bc_goodbad_stabvol + \
        outcome_valence_tm1*rewpain_t*Bc_rewpain_goodbad + \
        stabvol_t*rewpain_t*Bc_rewpain_stabvol + \
        outcome_valence_tm1*stabvol_t*rewpain_t*Bc_rewpain_goodbad_stabvol

    #Bc_t = T.exp(Bc_t)
    #Bc_t = T.switch(Bc_t<0.1,0.1,Bc_t )
    Bc_t = T.switch(Bc_t>B_max.value,B_max.value,Bc_t)
    Bc_t = T.switch(Bc_t<-1*B_max.value,-1*B_max.value,Bc_t)

    # Calculate Choice

    ev_1_t = (T.abs_(mag_1_t)**Gamma_t)*estimate_tm1
    ev_0_t = (T.abs_(mag_0_t)**Gamma_t)*(1-estimate_tm1)
    evdiff_t = (ev_1_t - ev_0_t)

    mdiff_t = (mag_1_t-mag_0_t) # not used but passed on


    cdiff_t=(choice_kernel_tm1-(1.0-choice_kernel_tm1))

    choice_val_t = Binv_t*evdiff_t + Bc_t*cdiff_t

    # before Gamma, choice value goes between -1 and 1
    prob_choice_t = 1.0/(1.0+T.exp(-1.0*choice_val_t))

    # determine eps
    eps_t = eps_baseline + \
        stabvol_t*eps_stabvol + \
        rewpain_t*eps_rewpain + \
        stabvol_t*rewpain_t*eps_rewpain_stabvol

    eps_t = pm.invlogit(eps_t)

    # add epsilon
    prob_choice_t = eps_t*0.5+(1.0-eps_t)*prob_choice_t

    # Generate choice or Copy participants choice (used for next trial as indicator)
    if gen_indicator.value==0:
        choice_t = obs_choice_t
    else:
        #import pdb; pdb.set_trace()
        #trng = T.shared_randomstreams.RandomStreams(1234)
        choice_t = trng.binomial(n=1, p=prob_choice_t,dtype='float64')
        # this works, but I don't want everyone binomial to have the same seed, so I'd want to update by 1
        #rng_val = choice_t.rng.get_value(borrow=True)   # Get the rng for rv_u
        #rng_val.seed(seed.value)                         # seeds the generator
        #choice_t.rng.set_value(rng_val, borrow=True)


    # determine whether current trial is good or bad
    outcome_valence_t = choice_t*info_A_t +\
                 (1.0-choice_t)*(1.0-info_A_t) +\
                 (1.0-choice_t)*info_A_t*(-1.0) + \
                 (choice_t)*(1.0-info_A_t)*(-1.0)

    #import pdb; pdb.set_trace()
    lr_t = lr_baseline + \
        outcome_valence_t*lr_goodbad + \
        stabvol_t*lr_stabvol + \
        rewpain_t*lr_rewpain + \
        outcome_valence_t*stabvol_t*lr_goodbad_stabvol + \
        outcome_valence_t*rewpain_t*lr_rewpain_goodbad + \
        stabvol_t*rewpain_t*lr_rewpain_stabvol + \
        outcome_valence_t*stabvol_t*rewpain_t*lr_rewpain_goodbad_stabvol

    lr_t = pm.invlogit(lr_t)

    # update probability estimate, These will be estimate after update on t
    # stored differently than before
    estimate_t = estimate_tm1 + lr_t*(info_A_t-estimate_tm1)

    # Choice kernel learning rate
    lr_c_t = lr_c_baseline + \
        outcome_valence_t*lr_c_goodbad + \
        stabvol_t*lr_c_stabvol + \
        rewpain_t*lr_c_rewpain + \
        outcome_valence_t*stabvol_t*lr_c_goodbad_stabvol + \
        outcome_valence_t*rewpain_t*lr_c_rewpain_goodbad + \
        stabvol_t*rewpain_t*lr_c_rewpain_stabvol + \
        outcome_valence_t*stabvol_t*rewpain_t*lr_c_rewpain_goodbad_stabvol

    lr_c_t = pm.invlogit(lr_c_t)

    choice_kernel_t =  choice_kernel_tm1 + lr_c_t*(choice_t - choice_kernel_tm1)

    return([choice_t,outcome_valence_t,prob_choice_t,choice_val_t,estimate_t,choice_kernel_t,lr_t,lr_c_t,Gamma_t,Binv_t,Bc_t,mdiff_t,eps_t])
Esempio n. 8
0


# Data
n = np.ones(4)*5
y = np.array([0, 1, 3, 5])
dose = np.array([-.86,-.3,-.05,.73])

with Model() as bioassay_model:

    # Prior distributions for latent variables
    alpha = Normal('alpha', 0, sd=100)
    beta = Normal('beta', 0, sd=100)

    # Linear combinations of parameters
    theta = invlogit(alpha + beta*dose)

    # Model likelihood
    deaths = Binomial('deaths', n=n, p=theta, observed=y)
  




with bioassay_model:

    # Draw wamples
    trace = sample(1000, njobs=2)
    # Plot two parameters
    forestplot(trace, varnames=['alpha', 'beta'])
  
Esempio n. 9
0
def trial_step(
        info_tm1_A,
        info_t_A,  # externally provided to function on each trial
        info_tm1_B,
        info_t_B,
        obs_choice_tm1,
        obs_choice_t,
        mag_1_t,
        mag_0_t,
        stabvol_t,
        rewpain_t,
        # outputs of this function passed back into it on next trial
        choice_tm1,  # either generated or observed choice
        outcome_valence_tm1,  # either generated or observed (although not used on input because immediately redefined, useful for storage)
        prob_choice_tm1,  # internal state variables
        choice_val_tm1,
        estimate_tm1_A,  #
        estimate_tm1_B,
        Ga_tm1,
        Ba_tm1,
        Gb_tm1,
        Bb_tm1,
        choice_kernel_tm1,
        lr_tm1,
        lr_c_tm1,
        Amix_tm1,
        Binv_tm1,
        Bc_tm1,
        decay_tm1,
        mdiff_tm1,
        eps_tm1,
        lr_baseline,
        lr_goodbad,
        lr_stabvol,
        lr_rewpain,  # variables accessible on all trials
        lr_goodbad_stabvol,
        lr_rewpain_goodbad,
        lr_rewpain_stabvol,
        lr_rewpain_goodbad_stabvol,
        lr_c_baseline,
        lr_c_goodbad,
        lr_c_stabvol,
        lr_c_rewpain,  # variables accessible on all trials
        lr_c_goodbad_stabvol,
        lr_c_rewpain_goodbad,
        lr_c_rewpain_stabvol,
        lr_c_rewpain_goodbad_stabvol,
        Amix_baseline,
        Amix_goodbad,
        Amix_stabvol,
        Amix_rewpain,
        Amix_goodbad_stabvol,
        Amix_rewpain_goodbad,
        Amix_rewpain_stabvol,
        Amix_rewpain_goodbad_stabvol,
        Binv_baseline,
        Binv_goodbad,
        Binv_stabvol,
        Binv_rewpain,
        Binv_goodbad_stabvol,
        Binv_rewpain_goodbad,
        Binv_rewpain_stabvol,
        Binv_rewpain_goodbad_stabvol,
        Bc_baseline,
        Bc_goodbad,
        Bc_stabvol,
        Bc_rewpain,
        Bc_goodbad_stabvol,
        Bc_rewpain_goodbad,
        Bc_rewpain_stabvol,
        Bc_rewpain_goodbad_stabvol,
        decay_baseline,
        decay_stabvol,
        decay_rewpain,
        decay_rewpain_stabvol,
        mag_baseline,
        mag_rewpain,
        eps_baseline,
        eps_stabvol,
        eps_rewpain,
        eps_rewpain_stabvol,
        gen_indicator,
        B_max):
    '''
    Trial by Trial updates for the model

    '''

    # determine whether last trial had good outcome
    outcome_valence_tm1 = choice_tm1*info_tm1_A +\
                 (1.0-choice_tm1)*(info_tm1_B) +\
                 (1.0-choice_tm1)*info_tm1_A*(-1.0) + \
                 (choice_tm1)*(info_tm1_B)*(-1.0)

    # determine Amix for this trial using last good outcome
    Amix_t = Amix_baseline + \
        outcome_valence_tm1*Amix_goodbad + \
        stabvol_t*Amix_stabvol + \
        rewpain_t*Amix_rewpain + \
        outcome_valence_tm1*stabvol_t*Amix_goodbad_stabvol + \
        outcome_valence_tm1*rewpain_t*Amix_rewpain_goodbad + \
        stabvol_t*rewpain_t*Amix_rewpain_stabvol + \
        outcome_valence_tm1*stabvol_t*rewpain_t*Amix_rewpain_goodbad_stabvol

    Amix_t = pm.invlogit(Amix_t)

    # Determine Binv for this trial using last good outcome
    Binv_t = Binv_baseline + \
        outcome_valence_tm1*Binv_goodbad + \
        stabvol_t*Binv_stabvol + \
        rewpain_t*Binv_rewpain + \
        outcome_valence_tm1*stabvol_t*Binv_goodbad_stabvol + \
        outcome_valence_tm1*rewpain_t*Binv_rewpain_goodbad + \
        stabvol_t*rewpain_t*Binv_rewpain_stabvol + \
        outcome_valence_tm1*stabvol_t*rewpain_t*Binv_rewpain_goodbad_stabvol

    Binv_t = T.exp(Binv_t)
    Binv_t = T.switch(Binv_t < 0.1, 0.1, Binv_t)
    Binv_t = T.switch(Binv_t > B_max.value, B_max.value, Binv_t)

    # Determine Bc for this trial using last good outcome
    Bc_t = Bc_baseline + \
        outcome_valence_tm1*Bc_goodbad + \
        stabvol_t*Bc_stabvol + \
        rewpain_t*Bc_rewpain + \
        outcome_valence_tm1*stabvol_t*Bc_goodbad_stabvol + \
        outcome_valence_tm1*rewpain_t*Bc_rewpain_goodbad + \
        stabvol_t*rewpain_t*Bc_rewpain_stabvol + \
        outcome_valence_tm1*stabvol_t*rewpain_t*Bc_rewpain_goodbad_stabvol

    #Bc_t = T.exp(Bc_t)
    #Bc_t = T.switch(Bc_t<0.1,0.1,Bc_t )
    Bc_t = T.switch(Bc_t > B_max.value, B_max.value, Bc_t)
    Bc_t = T.switch(Bc_t < -1 * B_max.value, -1 * B_max.value, Bc_t)

    # Calculate Choice
    mdiff_t = (mag_1_t - mag_0_t)

    Mag_t = T.exp(mag_baseline + rewpain_t * mag_rewpain)
    Mag_t = T.switch(Mag_t < 0.1, 0.1, Mag_t)
    Mag_t = T.switch(Mag_t > 10, 10, Mag_t)

    mdiff_t = T.sgn(mdiff_t) * T.abs_(mdiff_t)**Mag_t

    pdiff_t = (estimate_tm1_A - estimate_tm1_B)
    cdiff_t = (choice_kernel_tm1 - (1.0 - choice_kernel_tm1))

    choice_val_t = Binv_t * ((1 - Amix_t) * mdiff_t +
                             (Amix_t) * pdiff_t) + Bc_t * cdiff_t

    # before Amix, choice value goes between -1 and 1
    prob_choice_t = 1.0 / (1.0 + T.exp(-1.0 * choice_val_t))

    # determine eps
    eps_t = eps_baseline + \
        stabvol_t*eps_stabvol + \
        rewpain_t*eps_rewpain + \
        stabvol_t*rewpain_t*eps_rewpain_stabvol

    eps_t = pm.invlogit(eps_t)

    # add epsilon
    prob_choice_t = eps_t * 0.5 + (1.0 - eps_t) * prob_choice_t

    # Generate choice or Copy participants choice (used for next trial as indicator)
    if gen_indicator.value == 0:
        choice_t = obs_choice_t
    else:
        #import pdb; pdb.set_trace()
        #trng = T.shared_randomstreams.RandomStreams(1234)
        choice_t = trng.binomial(n=1, p=prob_choice_t, dtype='float64')
        # this works, but I don't want everyone binomial to have the same seed, so I'd want to update by 1
        #rng_val = choice_t.rng.get_value(borrow=True)   # Get the rng for rv_u
        #rng_val.seed(seed.value)                         # seeds the generator
        #choice_t.rng.set_value(rng_val, borrow=True)

    # determine whether current trial is good or bad
    outcome_valence_t = choice_t*info_t_A +\
                 (1.0-choice_t)*(info_t_B) +\
                 (1.0-choice_t)*info_t_A*(-1.0) + \
                 (choice_t)*(info_t_B)*(-1.0)

    #import pdb; pdb.set_trace()
    lr_t = lr_baseline + \
        outcome_valence_t*lr_goodbad + \
        stabvol_t*lr_stabvol + \
        rewpain_t*lr_rewpain + \
        outcome_valence_t*stabvol_t*lr_goodbad_stabvol + \
        outcome_valence_t*rewpain_t*lr_rewpain_goodbad + \
        stabvol_t*rewpain_t*lr_rewpain_stabvol + \
        outcome_valence_t*stabvol_t*rewpain_t*lr_rewpain_goodbad_stabvol

    #lr_t = pm.invlogit(lr_t)
    lr_t = pm.invlogit(lr_t) * 5

    # determine decay on current trial
    decay_t = decay_baseline + \
        stabvol_t*decay_stabvol + \
        rewpain_t*decay_rewpain + \
        stabvol_t*rewpain_t*decay_rewpain_stabvol

    decay_t = pm.invlogit(decay_t)

    # update the Beta Parameters
    Ga_t = decay_t * Ga_tm1 + lr_t * (choice_t * info_t_A)
    Ba_t = decay_t * Ba_tm1 + lr_t * (choice_t * (1 - info_t_A))

    Gb_t = decay_t * Gb_tm1 + lr_t * (
        (1 - choice_t) * (info_t_B))  # chose B and B rewarded
    Bb_t = decay_t * Bb_tm1 + lr_t * (
        (1 - choice_t) * (1 - info_t_B))  # chose B and B not rewarded

    # update the probability estimates of the shape chosen
    estimate_t_A = (Ga_t + 1) / (Ga_t + Ba_t + 2)
    estimate_t_B = (Gb_t + 1) / (Gb_t + Bb_t + 2)

    # Choice kernel learning rate
    lr_c_t = lr_c_baseline + \
        outcome_valence_t*lr_c_goodbad + \
        stabvol_t*lr_c_stabvol + \
        rewpain_t*lr_c_rewpain + \
        outcome_valence_t*stabvol_t*lr_c_goodbad_stabvol + \
        outcome_valence_t*rewpain_t*lr_c_rewpain_goodbad + \
        stabvol_t*rewpain_t*lr_c_rewpain_stabvol + \
        outcome_valence_t*stabvol_t*rewpain_t*lr_c_rewpain_goodbad_stabvol

    lr_c_t = pm.invlogit(lr_c_t)

    choice_kernel_t = choice_kernel_tm1 + lr_c_t * (choice_t -
                                                    choice_kernel_tm1)

    return ([
        choice_t, outcome_valence_t, prob_choice_t, choice_val_t, estimate_t_A,
        estimate_t_B, Ga_t, Ba_t, Gb_t, Bb_t, choice_kernel_t, lr_t, lr_c_t,
        Amix_t, Binv_t, Bc_t, decay_t, mdiff_t, eps_t
    ])
Esempio n. 10
0
import pymc3 as pm
with pm.Model() as pooled_model:

    # sample a and b
    b0 = pm.Normal('b0', mu=0, sigma=50)
    b1 = pm.Lognormal('b1', mu=0, sigma=50)

    # compute the purchase probability for each incidence
    prob = pm.Deterministic('prob', pm.invlogit(b0 - b1 * price_data['price']))
    likelihood = pm.Bernoulli('likelihood', p=prob, observed=price_data['buy'])
    posterior = pm.sample(draws=500, tune=500)

# pm.model_to_graphviz(pooled_model)
Esempio n. 11
0
def wmcap_morey_cowan(data, formulae, scale=5):
    r"""Constructs a Bayesian hierarchical model for the estimation of working-
    memory capacity, bias, and lapse rate using the method described by Morey
    for Cowan-style change detection tasks. This is a "fixed-effects" version
    of the model, which is best suited to studies with between-subjects
    designs.

    Args:
        data (pd.DataFrame): Data.
        formulae (list): List of patsy-style formulae; e.g.,
            `kappa ~ C(subject)`. Accepts up to three formulae for $\kappa$,
            $\gamma$, and/or $\zeta$. Missing formulae with default to an
            intercept-only model.
        scale (float): A scale parameter for all the stochastic nodes. Defaults
            to `5`.

    Returns:
        None: All model components are placed in the context.

    """

    # "Compress" the data so we can create the correct amount stochastic nodes
    # later on.

    data = compress(data, formulae)

    # Make a dictionary out of the formulae so they can be indexed easier.

    param_names = ['kappa', 'gamma', 'zeta']
    dic = {p: '1' for p in param_names}
    dic.update(
        {f.split('~')[0].replace(' ', ''): f.split('~')[1]
         for f in formulae})

    # Loop over parameters in the decision model. This is just less code than
    # hard-coding each one.

    for p in param_names:

        # Construct a linear model for the predictions on the parameter.

        dm = dmatrix(dic[p], data)
        X = np.asarray(dm)
        covs = dm.design_info.column_names
        beta = []

        # Make regression coefficients for the linear model.

        for cov in covs:

            # Nicely format the name of the coefficient.

            c = ''.join(i for i in cov if i not in '"\',$')
            name = r'$\beta_{(\%s)_\mathrm{%s}}$' % (p, c)
            beta.append(pm.Cauchy(name=name, alpha=0, beta=scale))

        # Make the predictions of the linear model on the parameter.

        mu = dot(X, tt.stack(*beta))

        # Make delta vector of parameter.

        name = r'$\delta_{(\%s)}$' % p
        dm_ = dm_for_lower_stochastics(data)
        X_ = np.asarray(dm_)
        delta_ = pm.Normal(name=name, mu=0, sd=1., shape=X_.shape[1])
        delta = dot(X_, delta_)

        # Make sigma of parameter.

        sigma = pm.HalfCauchy(name=r'$\sigma_{(\%s)}$' % p, beta=scale)

        # Make the parameter.

        dic[p] = pm.Deterministic(r'$\%s$' % p, mu + delta * sigma)

    # Transform parameters into meaningful decision parameters.

    zeros = np.zeros(len(data))
    k = pm.Deterministic('$k$', tt.max((dic['kappa'], zeros), axis=0))
    g = pm.Deterministic('$g$', pm.invlogit(dic['gamma']))
    z = pm.Deterministic('$z$', pm.invlogit(dic['zeta']))

    # Calculate hit and false-alarm probabilities.

    q = tt.min((k / data.set_size.values, zeros + 1), axis=0)
    f = (1 - z) * g + z * (1 - q) * g
    h = (1 - z) * g + z * q + z * (1 - q) * g

    # Construct the Likelihoods.

    pm.Binomial(name='$H$',
                p=h,
                n=data.different_trials.values,
                observed=data.hits.values)
    pm.Binomial(name='$F$',
                p=f,
                n=data.same_trials.values,
                observed=data.false_alarms.values)