def calc_trial_like(self, trial, save_post=False):
        # Compute likelihood from reaction time
        # see what response was made and map it to the choice
        if trial.Choice == 'Gamble':
            choice = np.array([1])
        elif trial.Choice == 'Certain':
            choice = np.array([0])
        else:
            # they made no choice
            # we could consider skipping these
            choice = np.array([2])

        # calc the like
        if self.ignore_non_resp and choice == np.array([2]):
            log_like = 0.0

        else:
            #### Exclusion of RT (likelihood depends only on choices)
            p_c = dists.invlogit(trial['v_mean'])  # prob. of making a choice
            likelihood = (p_c**choice) * ((1 - p_c)**(1 - choice))
            log_like = np.log(likelihood)

            ## Debug
            #import pdb; pdb.set_trace()

            # calc the log like
#             log_like = np.log(wfpt_like(choice, np.array([trial.RT]),
#                                         v_mean=trial['Ediff'], a=self.params['a'],
#                                         w_mode=self.params['w'], t0=self.params['t0'],
#                                         nsamp=self.wfpt_nsamp,
#                                         max_time=self.max_time,
#                                         trange_nsamp=self.trange_nsamp))[0]

# * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# if the trial is also a mood trial we could also add in a like calc
# for a model, too
# * * * * * * * * * * * * * * * * * * * * * * * * * * * *
        if np.isnan(trial['Mood']):
            mood_log_like = 0.0

        if not np.isnan(trial['Mood']):
            curr_mood = dists.logit(trial['Mood'] / 1000)

            ### Modification for P_win
            pred_mood = self.params['b'] + self.params['w_p'] * trial['P_win']

            log_like = log_like + np.log(
                norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))

            ##Mood_log_likelihood
            mood_log_like = np.log(
                norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))

        # see if running conditional sim
        if save_post:
            # run wfpt_gen
            choices, rts = wfpt_gen(v_mean=trial['Ediff'],
                                    a=self.params['a'],
                                    w_mode=self.params['w'],
                                    wfpt_nsamp=self.wfpt_nsamp,
                                    nsamp=self.gen_nsamp,
                                    trange=np.linspace(
                                        0, self.max_time - self.params['t0'],
                                        self.trange_nsamp))

            # calc prob of making the observed choice
            ind = choices == choice
            p_choice = ind.mean()

            # calc mean log rt
            choice_mean_log_rt = np.log(rts[ind] + self.params['t0']).mean()

            return log_like, p_choice, choice_mean_log_rt

        return log_like, mood_log_like
Esempio n. 2
0
def mmi_model_Mood_Decoup(subj_input_dir,subj_out_dir,sid):
    # import some useful libraries
    import numpy as np                # numerical analysis linear algebra
    import pandas as pd               # efficient tables
    #import matplotlib.pyplot as plt   # plotting
    from scipy import stats

    from RunDEMC.density import kdensity
    from RunDEMC import Model, Param, dists, calc_bpic, joint_plot

    from mmi_mood_Decoup_OL import MMIModel, load_mmi_data

    from joblib import Parallel, delayed

    import pickle
    from pathlib import Path

    try:
        import scoop
        from scoop import futures
    except ImportError:
        print("Error loading scoop, reverting to joblib.")
        scoop = None


    # ## Load and examine data


    subj_input_dir=Path(subj_input_dir)
    subj_out_dir=Path(subj_out_dir)
    
    
    pattern=f'{sid}.csv'
    input_file=list(subj_input_dir.glob(pattern))
    print(input_file[0])
    dat = load_mmi_data(input_file[0])


    # In[4]:


#     dat.at[2, 'Choice']=='Gamble'


    # In[5]:


    # find the minimum RT for the param range
    min_RT = dat.loc[dat.RT>0, 'RT'].min()
    print('Min RT:', min_RT)


    # ## Use RunDEMC to fit the model to a participant's data

    # In[6]:


    # define model evaluation functions for RunDEMC
    def eval_mod(params, param_names, bdat=None):
        # use global dat if none based in
        if bdat is None:
            bdat = dat

        # turn param list into dict
        mod_params = {x: params[n]
                      for n, x in enumerate(param_names)}
        
        ## Checking the param range
        if mod_params['lambda']<0 or mod_params['lambda']>=1 :
            return np.log(0), np.log(0)
        
        if mod_params['gamma']<0 or mod_params['gamma']>=np.inf :
            return np.log(0), np.log(0)

        if mod_params['beta']<0 or mod_params['beta']>1 :
            return np.log(0), np.log(0)

        if mod_params['p0']<0 or mod_params['p0']>1 :
            return np.log(0), np.log(0)

        if mod_params['w_CA']<0 or mod_params['w_CA']>=np.inf :
            return np.log(0), np.log(0)
       
        if mod_params['w_EG']<0 or mod_params['w_EG']>=np.inf :
            return np.log(0), np.log(0)

        if mod_params['w_RPE']<0 or mod_params['w_RPE']>=np.inf :
            return np.log(0), np.log(0)

        if mod_params['b']<=-np.inf or mod_params['b']>=np.inf :
            return np.log(0), np.log(0)

        if mod_params['s_v']<=0 or mod_params['s_v']>=np.inf :
            return np.log(0), np.log(0)


        if mod_params['w']<0 or mod_params['w']>1 :
            return np.log(0), np.log(0)
        if mod_params['a']<=0 or mod_params['a']>=np.inf :
            return np.log(0), np.log(0)
        if mod_params['t0']<=0 or mod_params['t0']>=min_RT  :
            return np.log(0), np.log(0)
        
        ## calculate the log_likes and mood_log_likes 
        mod_res = MMIModel(params=mod_params, 
                      ignore_non_resp=True).proc_trials(bdat.copy())
        ll = mod_res.log_like.sum()
        mood_ll = mod_res.mood_log_like.sum()    

#         # calculate the log likes
#         ll = MMIModel(params=mod_params, 
#                       ignore_non_resp=True).proc_trials(bdat.copy())['log_like'].sum()

        return ll,mood_ll

    # this is the required def for RunDEMC
    def eval_fun(pop, *args):
        bdat = args[0]
        pnames = args[1]
        if scoop and scoop.IS_RUNNING:
            likes = list(futures.map(eval_mod, [indiv for indiv in pop],
                                     [pnames]*len(pop), [bdat]*len(pop)))
        else:
            # use joblib
            res_tups= Parallel(n_jobs=-1)(delayed(eval_mod)(indiv,pnames, bdat)
                                      for indiv in pop)

            likes = np.array([rt[0] for rt in res_tups])
            mood_likes = np.array([rt[1] for rt in res_tups])


        return likes, mood_likes


    def get_best_fit(m, burnin=250, verbose=True):
        best_ind = m.weights[burnin:].argmax()
        if verbose:
            print("Weight:", m.weights[burnin:].ravel()[best_ind])
        indiv = [m.particles[burnin:,:,i].ravel()[best_ind] 
                 for i in range(m.particles.shape[-1])]
        pp = {}
        for p,v in zip(m.param_names,indiv):
            pp[p] = v
            if verbose:
                print('"%s": %f,'%(p,v))
        return pp


    # ### Base  model

    # In[20]:



    # set up model params
    params = [Param(name='gamma',
                    display_name=r'$\gamma$',
                    prior=dists.gamma(1.5, 0.5),
                    ),
              Param(name='beta',
                    display_name=r'$\beta$',
                    prior=dists.normal(0, 1.4),
                    transform=dists.invlogit
                    ),
              Param(name='w',
                    display_name=r'$w$',
                    prior=dists.normal(0, 1.4),
                    transform=dists.invlogit
                    ),
              Param(name='a',
                    display_name=r'$a$',
                    prior=dists.gamma(2.0, 0.5),
                    ),
              Param(name='t0',
                    display_name=r'$t_0$',
                    prior=dists.normal(0, 1.4),
                    transform=lambda x: dists.invlogit(x)*min_RT,
                    ),
              Param(name='p0',
                    display_name=r'$p_0$',
                    prior=dists.normal(0, 1.4),
                    transform=dists.invlogit
                    ),
              Param(name='lambda',
                    display_name=r'$lambda',
                    prior=dists.normal(0, 1.4),
                    transform=dists.invlogit
                    ),
              Param(name='w_CA',
                    display_name=r'w_{CA}',
                    prior=dists.normal(0, 1),
                    transform=np.exp,
                    inv_transform=np.log),
              Param(name='w_EG',
                    display_name=r'w_{EG}',
                    prior=dists.normal(0, 1),
                    transform=np.exp,
                    inv_transform=np.log),
              Param(name='w_RPE',
                    display_name=r'w_{RPE}',
                    prior=dists.normal(0, 1),
                    transform=np.exp,
                    inv_transform=np.log),
              Param(name='b',
                    display_name=r'b',
                    prior=dists.normal(0, 3)),
              Param(name='s_v',
                    display_name=r's_v',
                    prior=dists.exp(3))
            ]
    # grab the param names
    pnames = [p.name for p in params]
    m = Model('mmi', params=params,
              like_fun=eval_fun,
              like_args=(dat, pnames),
              init_multiplier=3,
              # num_chains=gsize,
              verbose=True)

    # do some burnin
    times = m.sample(150, burnin=True)

    # now map the posterior
    times = m.sample(650, burnin=False)




    # show the chains are mixing and converging
    #plt.plot(m.weights[30:], alpha=.3);
    pickle_name=subj_out_dir / f'mWgt_{sid}.pickle'
    print(pickle_name)
    pickle_out = open(pickle_name,"wb")
    pickle.dump(m.weights, pickle_out)
    pickle_out.close()

    #print("Best fitting params:")
    #pp = get_best_fit(m, burnin=250, verbose=True)
    print("Best fitting params:")
    pp = get_best_fit(m, burnin=250, verbose=True)
    pickle_name=subj_out_dir / f'mBFprm_{sid}.pickle'
    print(pickle_name)
    pickle_out = open(pickle_name,"wb")
    pickle.dump(pp, pickle_out)
    pickle_out.close()

    # In[42]:
    #### BPIC calculations
    burnin=250
    
    log_like_prior = m.weights - m.log_likes
    #print(log_like_prior)
    weight_mood = m.posts + log_like_prior #m.posts is log_like_Mood
    #print(weight_mood)
    print("Mood_BPIC :",calc_bpic(weight_mood[burnin:])['bpic'])
    Mood_BPIC=calc_bpic(weight_mood[burnin:])['bpic']
    pickle_name=subj_out_dir / f'Mood_BPIC_{sid}.pickle'
    print(pickle_name)
    pickle_out = open(pickle_name,"wb")
    pickle.dump(Mood_BPIC, pickle_out)
    pickle_out.close()

    log_like_RT = m.log_likes - m.posts
    weight_RT = log_like_RT + log_like_prior
    print("RT_BPIC :",calc_bpic(weight_RT[burnin:])['bpic'])
    RT_BPIC=calc_bpic(weight_RT[burnin:])['bpic']
    pickle_name=subj_out_dir / f'RT_BPIC_{sid}.pickle'
    print(pickle_name)
    pickle_out = open(pickle_name,"wb")
    pickle.dump(RT_BPIC, pickle_out)
    pickle_out.close()

   
    print(calc_bpic(m.weights[burnin:])['bpic'])  
    mBPIC=calc_bpic(m.weights[burnin:])['bpic']
    pickle_name=subj_out_dir / f'Total_BPIC_{sid}.pickle'
    print(pickle_name)
    pickle_out = open(pickle_name,"wb")
    pickle.dump(mBPIC, pickle_out)
    pickle_out.close()




    # In[46]:


    # plot the param distributions (note, we did not get full posteriors, yet)
#     plt.figure(figsize=(12,10))
#     burnin=30
#     for i in range(len(m.param_names)):
#         plt.subplot(3,4,i+1)
#         plt.hist(m.particles[burnin:, :, i].flatten(), bins='auto', alpha=.5, density=True);
#         plt.title(m.param_names[i])


    # In[ ]:

    pickle_name=subj_out_dir / f'mParticles_{sid}.pickle'
    print(pickle_name)
    pickle_out = open(pickle_name,"wb")
    pickle.dump(m.particles, pickle_out)
    pickle_out.close()
                        
    pickle_name=subj_out_dir / f'mParams_{sid}.pickle'
    print(pickle_name)
    pickle_out = open(pickle_name,"wb")
    pickle.dump(m.param_names, pickle_out)
    pickle_out.close()                    
Esempio n. 3
0
        ind = (dat.condition == c) & (dat.rt < max_rt)
        d = {
            'rt': np.log(np.array(dat[ind].rt) + log_shift),
            'resp': np.array(~dat[ind]['correct'], dtype=np.int)
        }
        ddat[c] = d

    # store minimum RT
    min_rt = dat[(dat['rt'] >= 0.)]['rt'].min()

    # define priors
    params = [
        Param(name='r',
              display_name=r'$r$',
              prior=dists.normal(-2.0, 1.0),
              transform=lambda x: dists.invlogit(x) * (20. - 0.) + (0.)),
        Param(name='p',
              display_name=r'$p$',
              prior=dists.normal(-0.8, 1.2),
              transform=lambda x: dists.invlogit(x) * (20. - 0.) + (0.)),
        Param(name='sd0',
              display_name=r'$\sigma_0$',
              prior=dists.normal(-1.0, 1.2),
              transform=lambda x: dists.invlogit(x) * (30. - 0.1) + (0.1)),
        Param(name='K',
              display_name=r'$K$',
              prior=dists.normal(0.0, 1.4),
              transform=dists.invlogit),
        Param(name='L',
              display_name=r'$L$',
              prior=dists.normal(0.0, 1.4),