コード例 #1
0
    def calc_trial_like(self, trial, save_post=False):
        # Compute likelihood from reaction time
        # see what response was made and map it to the choice
        if trial.Choice == 'Gamble':
            choice = np.array([1])
        elif trial.Choice == 'Certain':
            choice = np.array([2])
        else:
            # they made no choice
            # we could consider skipping these
            choice = np.array([0])

        # calc the like
        if self.ignore_non_resp and choice == np.array([0]):
            log_like = 0.0
        else:
            # calc the log like
            log_like = np.log(
                wfpt_like(choice,
                          np.array([trial.RT]),
                          v_mean=trial['Ediff'],
                          a=self.params['a'],
                          w_mode=self.params['w'],
                          t0=self.params['t0'],
                          nsamp=self.wfpt_nsamp,
                          max_time=self.max_time,
                          trange_nsamp=self.trange_nsamp))[0]

        # * * * * * * * * * * * * * * * * * * * * * * * * * * * *
        # if the trial is also a mood trial we could also add in a like calc
        # for a model, too
        # * * * * * * * * * * * * * * * * * * * * * * * * * * * *
        if np.isnan(trial['Mood']):
            mood_log_like = 0.0

        if not np.isnan(trial['Mood']):
            curr_mood = dists.logit(trial['Mood'] / 1000)
            pred_mood = self.params['b'] + \
                        self.params['w_LTA'] * trial['LTA_sum'] + \
                        self.params['w_RPE'] * trial['RPE_sum']
            log_like = log_like + np.log(
                norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))

            ##Mood_log_likelihood
            mood_log_like = np.log(
                norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))

        # see if running conditional sim
        if save_post:
            # run wfpt_gen
            choices, rts = wfpt_gen(v_mean=trial['Ediff'],
                                    a=self.params['a'],
                                    w_mode=self.params['w'],
                                    wfpt_nsamp=self.wfpt_nsamp,
                                    nsamp=self.gen_nsamp,
                                    trange=np.linspace(
                                        0, self.max_time - self.params['t0'],
                                        self.trange_nsamp))

            # calc prob of making the observed choice
            ind = choices == choice
            p_choice = ind.mean()

            # calc mean log rt
            choice_mean_log_rt = np.log(rts[ind] + self.params['t0']).mean()

            return log_like, p_choice, choice_mean_log_rt

        return log_like, mood_log_like
    def calc_trial_like(self, trial, save_post=False):
        # Compute likelihood from reaction time
        # see what response was made and map it to the choice
        if trial.Choice == 'Gamble':
            choice = np.array([1])
        elif trial.Choice == 'Certain':
            choice = np.array([0])
        else:
            # they made no choice
            # we could consider skipping these
            choice = np.array([2])

        # calc the like
        if self.ignore_non_resp and choice == np.array([2]):
            log_like = 0.0

        else:
            #### Exclusion of RT (likelihood depends only on choices)
            p_c = dists.invlogit(trial['v_mean'])  # prob. of making a choice
            likelihood = (p_c**choice) * ((1 - p_c)**(1 - choice))
            log_like = np.log(likelihood)

            ## Debug
            #import pdb; pdb.set_trace()

            # calc the log like
#             log_like = np.log(wfpt_like(choice, np.array([trial.RT]),
#                                         v_mean=trial['Ediff'], a=self.params['a'],
#                                         w_mode=self.params['w'], t0=self.params['t0'],
#                                         nsamp=self.wfpt_nsamp,
#                                         max_time=self.max_time,
#                                         trange_nsamp=self.trange_nsamp))[0]

# * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# if the trial is also a mood trial we could also add in a like calc
# for a model, too
# * * * * * * * * * * * * * * * * * * * * * * * * * * * *
        if np.isnan(trial['Mood']):
            mood_log_like = 0.0

        if not np.isnan(trial['Mood']):
            curr_mood = dists.logit(trial['Mood'] / 1000)

            ### Modification for P_win
            pred_mood = self.params['b'] + self.params['w_p'] * trial['P_win']

            log_like = log_like + np.log(
                norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))

            ##Mood_log_likelihood
            mood_log_like = np.log(
                norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))

        # see if running conditional sim
        if save_post:
            # run wfpt_gen
            choices, rts = wfpt_gen(v_mean=trial['Ediff'],
                                    a=self.params['a'],
                                    w_mode=self.params['w'],
                                    wfpt_nsamp=self.wfpt_nsamp,
                                    nsamp=self.gen_nsamp,
                                    trange=np.linspace(
                                        0, self.max_time - self.params['t0'],
                                        self.trange_nsamp))

            # calc prob of making the observed choice
            ind = choices == choice
            p_choice = ind.mean()

            # calc mean log rt
            choice_mean_log_rt = np.log(rts[ind] + self.params['t0']).mean()

            return log_like, p_choice, choice_mean_log_rt

        return log_like, mood_log_like
コード例 #3
0
    def calc_trial_like(self, trial, save_post=False):
        # Compute likelihood from reaction time
        # see what response was made and map it to the choice
        if trial.Choice == 'Gamble':
            choice = np.array([1])
        elif trial.Choice == 'Certain':
            choice = np.array([2])
        else:
            # they made no choice
            # we could consider skipping these
            choice = np.array([0])

        # calc the like
        if self.ignore_non_resp and choice == np.array([0]):
            log_like = 0.0
        else:
            ###### Debug ########
            #             Reac_time= np.array([trial.RT])
            #             v_mean=trial['Ediff']
            #             GAMMA=self.params['gamma']
            #             BETA = self.params['beta']
            #             A=self.params['a']
            #             W=self.params['w']
            #             T0=self.params['t0']
            #             P0=self.params['p0']
            #             Sv=self.params['s_v']
            #             LAMBDA=self.params['lambda']
            #             WLTA=self.params['w_LTA']
            #             WRPE=self.params['w_RPE']
            #             B=self.params['b']
            #             Trange_nsamp=self.trange_nsamp
            #             # calc the log like
            #             W1=self.params['w']
            #             print('w parameter line 138(calc_trial_like):',W1,flush=True)
            log_like = np.log(
                wfpt_like(choice,
                          np.array([trial.RT]),
                          v_mean=trial['Ediff'],
                          a=self.params['a'],
                          w_mode=self.params['w'],
                          t0=self.params['t0'],
                          nsamp=self.wfpt_nsamp,
                          max_time=self.max_time,
                          trange_nsamp=self.trange_nsamp))[0]

            #### Debug
#             if np.isnan(log_like):
#                 print((GAMMA, BETA, W, A, T0, P0, LAMBDA, WLTA, WRPE, B, Sv), flush=True)
#                 print((Reac_time,v_mean), flush=True)
#                 print((choice,Trange_nsamp), flush=True)
# * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# if the trial is also a mood trial we could also add in a like calc
# for a model, too
# * * * * * * * * * * * * * * * * * * * * * * * * * * * *
        if np.isnan(trial['Mood']):
            mood_log_like = 0.0

        if not np.isnan(trial['Mood']):
            curr_mood = dists.logit(trial['Mood'] / 1000)
            pred_mood = self.params['b'] + \
                        self.params['w_LTA'] * trial['LTA_sum'] + \
                        self.params['w_RPE'] * trial['RPE_sum']
            log_like = log_like + np.log(
                norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))

            ##Mood_log_likelihood
            mood_log_like = np.log(
                norm.pdf(curr_mood, pred_mood, np.sqrt(self.params['s_v'])))

        # see if running conditional sim
        if save_post:
            # run wfpt_gen
            choices, rts = wfpt_gen(v_mean=trial['Ediff'],
                                    a=self.params['a'],
                                    w_mode=self.params['w'],
                                    wfpt_nsamp=self.wfpt_nsamp,
                                    nsamp=self.gen_nsamp,
                                    trange=np.linspace(
                                        0, self.max_time - self.params['t0'],
                                        self.trange_nsamp))

            # calc prob of making the observed choice
            ind = choices == choice
            p_choice = ind.mean()

            # calc mean log rt
            choice_mean_log_rt = np.log(rts[ind] + self.params['t0']).mean()

            return log_like, p_choice, choice_mean_log_rt

        return log_like, mood_log_like