'lapse_low', 'lapse_high', 'n_trials', 'n_sessions'
])

for i, nickname in enumerate(np.unique(sessions.fetch('subject_nickname'))):
    if np.mod(i + 1, 10) == 0:
        print('Loading data of subject %d of %d' %
              (i + 1, len(np.unique(sessions.fetch('subject_nickname')))))

    # Get only the trials of the 50/50 blocks
    trials = (sessions * behavior.TrialSet.Trial
              & 'subject_nickname = "%s"' % nickname
              & 'trial_stim_prob_left = "0.5"').fetch(format='frame')
    trials = trials.reset_index()

    # Fit a psychometric function to these trials and get fit results
    fit_df = dj2pandas(trials)
    fit_result = fit_psychfunc(fit_df)

    # Get performance, reaction time and number of trials
    reaction_time = trials['rt'].median() * 1000
    perf_easy = trials['correct_easy'].mean() * 100

    # Get all the trials to get number of trials
    trials = (sessions * behavior.TrialSet.Trial
              & 'subject_nickname = "%s"' % nickname).fetch(format='frame')
    trials = trials.reset_index()
    ntrials_perday = trials.groupby('session_uuid').count()['trial_id'].mean()
    nsessions = trials.groupby('session_uuid').size().shape[0]

    # Add results to dataframe
    learned.loc[i, 'mouse'] = nickname
use_sessions, use_days = query_sessions_around_criterion(
    criterion='trained', days_from_criterion=[2, 0], as_dataframe=False)
# restrict by list of dicts with uuids for these sessions
b = use_sessions * subject.Subject * subject.SubjectLab * reference.Lab * \
    behavior.TrialSet.Trial
# reduce the size of the fetch
b2 = b.proj('institution_short', 'subject_nickname', 'task_protocol',
            'trial_stim_contrast_left', 'trial_stim_contrast_right',
            'trial_response_choice', 'task_protocol', 'trial_stim_prob_left',
            'trial_feedback_type')
bdat = b2.fetch(
    order_by=
    'institution_short, subject_nickname, session_start_time, trial_id',
    format='frame').reset_index()
behav = dj2pandas(bdat)
behav['institution_code'] = behav.institution_short.map(institution_map)
assert (~behav.empty)
print(behav.describe())

# ================================= #
# PSYCHOMETRIC FUNCTIONS
# ================================= #

# how many mice are there for each lab?
N = behav.groupby(['institution_code'])['subject_nickname'].nunique().to_dict()
behav['n_mice'] = behav.institution_code.map(N)
behav['institution_name'] = behav.institution_code + \
    ': ' + behav.n_mice.apply(str) + ' mice'

# plot one curve for each animal, one panel per lab
    behav.training_day.min() + 1  # start at session 1
days = [2, 7, 10, 14]

for didx, day in enumerate(days):

    # get data for today
    print(day)
    thisdate = behav[behav.training_day == day]['session_date'].dt.strftime(
        '%Y-%m-%d').item()
    b = (subject.Subject & 'subject_nickname = "%s"' % mouse) \
        * (subject.SubjectLab & 'lab_name="%s"' % lab) \
        * (acquisition.Session.proj(session_date='date(session_start_time)') &
           'session_date = "%s"' % thisdate) \
        * behavior.TrialSet.Trial() \
        * endcriteria.SessionEndCriteriaImplemented()
    behavtmp = dj_tools.dj2pandas(b.fetch(format='frame').reset_index())
    behavtmp['trial_start_time'] = behavtmp.trial_start_time / 60  # in minutes

    # unclear how this can be empty - but if it happens, skip
    if behavtmp.empty:
        continue

    # PSYCHOMETRIC FUNCTIONS
    fig, ax = plt.subplots(1, 1, figsize=(2.5, 2.5))
    behavior_plots.plot_psychometric(
        behavtmp.rename(columns={'signed_contrast': 'signedContrast'}),
        ax=ax,
        color='k')
    ax.set(xlabel="Signed contrast (%)", ylim=[0, 1])

    if didx == 0:
Exemplo n.º 4
0
def run_glm(behav,
            example,
            correction=True,
            bias=False,
            cross_validation=True):
    for i, nickname in enumerate(np.unique(behav['subject_nickname'])):
        if np.mod(i + 1, 10) == 0:
            print('Loading data of subject %d of %d' %
                  (i + 1, len(np.unique(behav['subject_nickname']))))

        # Get the trials of the sessions around criterion
        trials = behav.loc[behav['subject_nickname'] == nickname].copy()

        if bias == True:
            neutral_n = fit_psychfunc(
                behav[(behav['subject_nickname'] == nickname)
                      & (behav['probabilityLeft'] == 50)])
            left_fit = fit_psychfunc(
                behav[(behav['subject_nickname'] == nickname)
                      & (behav['probabilityLeft'] == 80)])
            right_fit = fit_psychfunc(
                behav[(behav['subject_nickname'] == nickname)
                      & (behav['probabilityLeft'] == 20)])

            behav.loc[behav['subject_nickname'] == nickname, 'bias_n'] = \
                neutral_n.loc[0, 'bias']
            behav.loc[behav['subject_nickname'] == nickname, 'bias_r'] = \
                right_fit.loc[0, 'bias']
            behav.loc[behav['subject_nickname'] == nickname, 'bias_l'] = \
                left_fit.loc[0, 'bias']

        else:
            fit_df = dj2pandas(trials.copy())
            fit_result = fit_psychfunc(fit_df)
            behav.loc[behav['subject_nickname'] == nickname, 'threshold'] = \
                fit_result.loc[0, 'threshold']
        ## GLM
        #make separate datafrme
        data = trials[[
            'index', 'trial_feedback_type', 'signed_contrast', 'choice',
            'probabilityLeft'
        ]].copy()

        # Rewardeded choices:
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()),
                 'rchoice'] = 0  # NoGo trials
        data.loc[(data['choice'] == -1) & (data['trial_feedback_type'] == -1),
                 'rchoice'] = 0
        data.loc[(data['choice'] == -1) & (data['trial_feedback_type'] == 1),
                 'rchoice'] = -1
        data.loc[(data['choice'] == 1) & (data['trial_feedback_type'] == 1),
                 'rchoice'] = 1
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()),
                 'rchoice'] = 0  # NoGo trials
        data.loc[(data['choice'] == 1) & (data['trial_feedback_type'] == -1),
                 'rchoice'] = 0

        # Unrewarded choices:
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()),
                 'uchoice'] = 0  # NoGo trials
        data.loc[(data['choice'] == -1) & (data['trial_feedback_type'] == -1),
                 'uchoice'] = -1
        data.loc[(data['choice'] == -1) & (data['trial_feedback_type'] == 1),
                 'uchoice'] = 0
        data.loc[(data['choice'] == 1) & (data['trial_feedback_type'] == 1),
                 'uchoice'] = 0
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()),
                 'uchoice'] = 0  # NoGo trials
        data.loc[(data['choice'] == 1) & (data['trial_feedback_type'] == -1),
                 'uchoice'] = 1

        # Apply correction
        if correction == True:
            data['rchoice+1'] = \
            data['rchoice'].shift(periods=-1).to_numpy()
            data['uchoice+1'] = \
            data['uchoice'].shift(periods=-1).to_numpy()

        # Shift rewarded and unrewarded predictors by one
        data.loc[:, ['rchoice', 'uchoice']] = \
            data[['rchoice', 'uchoice']].shift(periods=1).to_numpy()

        # Drop any nan trials
        data.dropna(inplace=True)

        # Make sensory predictors (no 0 predictor)
        contrasts = [25, 100, 12.5, 6.25]
        for i in contrasts:
            data.loc[(data['signed_contrast'].abs() == i), i] = \
                np.sign(data.loc[(data['signed_contrast'].abs() == i),
                                 'signed_contrast'].to_numpy())

            data_con = data[i].copy()
            data[i] = data_con.fillna(0)

        # If contrast missing break
        for i in contrasts:
            if np.sum(data[i]) == 0:
                print('missing contrast')
                missing_contrast = True
            else:
                missing_contrast = False

        if missing_contrast == True:
            continue

        # Make block identity (across predictors right is positive, hence logic below)
        if bias == True:
            data.loc[(data['probabilityLeft'] == 50), 'block'] = 0
            data.loc[(data['probabilityLeft'] == 20), 'block'] = 1
            data.loc[(data['probabilityLeft'] == 80), 'block'] = -1

        # Make choice in between 0 and 1 -> 1 for right and 0 for left
        data.loc[data['choice'] == -1, 'choice'] = 0

        # Store index
        index = data['index'].copy()

        # Create predictor matrix
        endog = data['choice'].copy()
        exog = data.copy()
        exog.drop(columns=[
            'trial_feedback_type', 'signed_contrast', 'choice',
            'probabilityLeft'
        ],
                  inplace=True)
        exog = sm.add_constant(exog)

        if cross_validation == False:
            X_train = exog.copy()
            X_test = exog.copy()
            y_train = endog.copy()
            y_test = endog.copy()

        else:
            X_train = exog.iloc[:int(len(exog) * 0.70), :].copy()
            X_test = exog.iloc[int(len(endog) * 0.70):, :].copy()
            y_train = endog.iloc[:int(len(endog) * 0.70)].copy()
            y_test = endog.iloc[int(len(endog) * 0.70):].copy()

        # Store index

        index = X_test['index'].to_numpy()
        X_train.drop(columns=['index'], inplace=True)
        X_test.drop(columns=['index'], inplace=True)

        # Fit model
        try:
            logit_model = sm.Logit(y_train, X_train)
            result = logit_model.fit_regularized()
            # print(result.summary2())

            # Store model weights
            behav.loc[behav['subject_nickname'] == nickname,
                      'intercept'] = result.params['const'].copy()
            behav.loc[behav['subject_nickname'] == nickname,
                      'rchoice'] = result.params['rchoice'].copy()
            behav.loc[behav['subject_nickname'] == nickname,
                      'uchoice'] = result.params['uchoice'].copy()
            mask = result.params.index.get_level_values(0)
            behav.loc[behav['subject_nickname'] == nickname,
                      '25'] = result.params[25].copy()
            behav.loc[behav['subject_nickname'] == nickname,
                      '6'] = result.params.loc[mask == 6.25][0]
            behav.loc[behav['subject_nickname'] == nickname,
                      '100'] = result.params[100].copy()
            behav.loc[behav['subject_nickname'] == nickname,
                      '12'] = result.params.loc[mask == 12.5][0]

            if bias == True:
                behav.loc[behav['subject_nickname'] == nickname,
                          'block'] = result.params['block'].copy()

            if correction == True:
                result.params['rchoice+1'] = result.params['rchoice+1'] * -1
                result.params['uchoice+1'] = result.params['uchoice+1'] * -1
                behav.loc[behav['subject_nickname'] == nickname,
                          'rchoice+1'] = result.params['rchoice+1'].copy()
                behav.loc[behav['subject_nickname'] == nickname,
                          'uchoice+1'] = result.params['uchoice+1'].copy()
            # Probabilities on test data
            prob = result.predict(X_test).to_numpy()

            if nickname == example:
                example_model = result

            # Propagate to storing dataframe
            behav.loc[behav['index'].isin(index), 'simulation_prob'] = prob
        except:
            print('singular matrix')
    return behav, example_model
Exemplo n.º 5
0
    trial_fields = ('trial_stim_contrast_left', 'trial_stim_contrast_right',
                    'trial_response_time', 'trial_stim_prob_left',
                    'trial_feedback_type', 'trial_stim_on_time',
                    'trial_response_choice')

    # query trial data for sessions and subject name and lab info
    trials = use_sessions.proj('task_protocol') * behavior.TrialSet.Trial.proj(
        *trial_fields)
    subject_info = subject.Subject.proj('subject_nickname') * \
        (subject.SubjectLab * reference.Lab).proj('institution_short')

    # Fetch, join and sort data as a pandas DataFrame
    behav_merged = dj2pandas(
        trials.fetch(format='frame').join(
            subject_info.fetch(format='frame')).sort_values(by=[
                'institution_short', 'subject_nickname', 'session_start_time',
                'trial_id'
            ]).reset_index())
    behav_merged['institution_code'] = behav_merged.institution_short.map(
        institution_map)

else:
    behav_merged = pd.read_csv(join('data', 'Fig5.csv'))

# Variable to store model parameters
if load_model == False:
    behav_merged['rchoice'] = np.nan
    behav_merged['uchoice'] = np.nan
    behav_merged['6'] = np.nan
    behav_merged['12'] = np.nan
    behav_merged['25'] = np.nan
Exemplo n.º 6
0
def model_psychometric_history(behav):
    select =  behav.copy()
    select['t-1'] = select['trial_feedback_type'].shift(periods=1).to_numpy()
    select.loc[select['choice'] == -1, 'choice'] = 0 
    select = select.iloc[1:,:]
    #select['t-1'].fillna(0,  inplace=True)
    select['t-1']  = select['t-1'].astype(int)
    
    plot_psychometric(select.loc[select['signed_contrast'],
                     select.loc[select['probabilityLeft'] ==i, 'signed_contrast'], palette = ['red', 'green'], 
                     ci = 68)

    sns.lineplot(data = select_50, hue = 't-1', x = select_50['signed_contrast'],
                     y = select_50['simulation_prob'], palette = ['red', 'green'], ci = 68)


## Functions

def run_glm(behav, example, correction = True,  bias = False, cross_validation = True):
    for i, nickname in enumerate(np.unique(behav['subject_nickname'])):
        if np.mod(i+1, 10) == 0:
            print('Loading data of subject %d of %d' % (i+1, len(
                    np.unique(behav['subject_nickname']))))
    
        # Get the trials of the sessions around criterion
        trials = behav.loc[behav['subject_nickname'] == nickname].copy()
        
        
        if bias == True:
            neutral_n = fit_psychfunc(behav[(behav['subject_nickname'] == nickname)
                                   & (behav['probabilityLeft'] == 50)])
            left_fit = fit_psychfunc(behav[(behav['subject_nickname'] == nickname)
                                           & (behav['probabilityLeft'] == 80)])
            right_fit = fit_psychfunc(behav[(behav['subject_nickname'] == nickname)
                                            & (behav['probabilityLeft'] == 20)])
            
            behav.loc[behav['subject_nickname'] == nickname, 'bias_n'] = \
                neutral_n.loc[0, 'bias']
            behav.loc[behav['subject_nickname'] == nickname, 'bias_r'] = \
                right_fit.loc[0, 'bias']
            behav.loc[behav['subject_nickname'] == nickname, 'bias_l'] = \
                left_fit.loc[0, 'bias']
    
        else:
            fit_df = dj2pandas(trials.copy())
            fit_result = fit_psychfunc(fit_df)
            behav.loc[behav['subject_nickname'] == nickname, 'threshold'] = \
                fit_result.loc[0, 'threshold']
        ## GLM
        #make separate datafrme 
        data = trials[['index', 'trial_feedback_type',
                       'signed_contrast', 'choice',
                           'probabilityLeft']].copy()
        
        #drop trials with odd probabilities of left
        data.drop(
            data['probabilityLeft'][~data['probabilityLeft'].isin([50,20,80])].index,
            inplace=True)
        
        
        # Rewardeded choices: 
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()), 'rchoice']  = 0 # NoGo trials
        data.loc[(data['choice'] == -1) &
                 (data['trial_feedback_type'] == -1), 'rchoice']  = 0
        data.loc[(data['choice'] == -1) &
                 (data['trial_feedback_type'] == 1), 'rchoice']  = -1
        data.loc[(data['choice'] == 1) &
                 (data['trial_feedback_type'] == 1), 'rchoice']  = 1
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()) , 'rchoice']  = 0 # NoGo trials
        data.loc[(data['choice'] == 1) &
                 (data['trial_feedback_type'] == -1), 'rchoice']  = 0
        
        # Unrewarded choices:
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()), 'uchoice']  = 0 # NoGo trials
        data.loc[(data['choice'] == -1) &
                 (data['trial_feedback_type'] == -1), 'uchoice']  = -1 
        data.loc[(data['choice'] == -1) &
                 (data['trial_feedback_type'] == 1), 'uchoice']  = 0 
        data.loc[(data['choice'] == 1) &
                 (data['trial_feedback_type'] == 1), 'uchoice']  = 0 
        data.loc[(data['choice'] == 0) & 
                 (data['trial_feedback_type'].isnull()) , 'uchoice']  = 0 # NoGo trials
        data.loc[(data['choice'] == 1) &
                 (data['trial_feedback_type'] == -1) , 'uchoice']  = 1
        
        # Apply correction
        if correction == True:
           data['rchoice+1'] = \
           data['rchoice'].shift(periods=-1).to_numpy()
           data['uchoice+1'] = \
           data['uchoice'].shift(periods=-1).to_numpy()
            
        # Shift rewarded and unrewarded predictors by one
        data.loc[:, ['rchoice', 'uchoice']] = \
            data[['rchoice', 'uchoice']].shift(periods=1).to_numpy()
            
    
        # Drop any nan trials
        data.dropna(inplace=True)
        
        # Make sensory predictors (no 0 predictor)
        contrasts = [ 25, 100,  12.5,   6.25]
        for i in contrasts:
            data.loc[(data['signed_contrast'].abs() == i), i] = \
                np.sign(data.loc[(data['signed_contrast'].abs() == i),
                                 'signed_contrast'].to_numpy())
            
            data_con =  data[i].copy()
            data[i] = data_con.fillna(0)
        
        # If contrast missing break
        for i in contrasts:
            if np.sum(data[i]) == 0:
                print('missing contrast')
                missing_contrast = True
            else:
                missing_contrast = False
        
        if missing_contrast == True:
            continue
        
        # Make block identity (across predictors right is positive, hence logic below)
        if bias == True:
            data.loc[(data['probabilityLeft'] == 50), 'block'] = 0
            data.loc[(data['probabilityLeft'] == 20), 'block'] = 1
            data.loc[(data['probabilityLeft'] == 80), 'block'] = -1
        
        # Make choice in between 0 and 1 -> 1 for right and 0 for left
        data.loc[data['choice'] == -1, 'choice'] = 0
        
        # Store index
        index = data['index'].copy()
        
        # Create predictor matrix
        endog = data['choice'].copy()
        exog = data.copy()
        exog.drop(columns=['trial_feedback_type', 
                       'signed_contrast', 'choice', 
                           'probabilityLeft'], inplace=True)
        exog = sm.add_constant(exog)
        
        if cross_validation == False:
            X_train = exog.copy()
            X_test = exog.copy()
            y_train = endog.copy()
            y_test = endog.copy()
            
        else:
            X_train = exog.iloc[:int(len(exog)*0.70),:].copy()
            X_test = exog.iloc[int(len(endog)*0.70):,:].copy()
            y_train = endog.iloc[:int(len(endog)*0.70)].copy()
            y_test = endog.iloc[int(len(endog)*0.70):].copy()
        
        # Store index
        
        index = X_test['index'].to_numpy()
        X_train.drop(columns=['index'], inplace=True)
        X_test.drop(columns=['index'], inplace=True)
        
        
        # Fit model
        try:
            logit_model = sm.Logit(y_train, X_train)
            result = logit_model.fit_regularized()
            # print(result.summary2())
            
            # Store model weights
            behav.loc[behav['subject_nickname'] == nickname, 'intercept'] = result.params['const'].copy()
            behav.loc[behav['subject_nickname'] == nickname, 'rchoice'] = result.params['rchoice'].copy()
            behav.loc[behav['subject_nickname'] == nickname, 'uchoice'] = result.params['uchoice'].copy()
            mask = result.params.index.get_level_values(0)
            behav.loc[behav['subject_nickname'] == nickname, '25'] = result.params[25].copy()
            behav.loc[behav['subject_nickname'] == nickname, '6'] = result.params.loc[mask == 6.25][0]
            behav.loc[behav['subject_nickname'] == nickname, '100'] = result.params[100].copy()
            behav.loc[behav['subject_nickname'] == nickname, '12'] = result.params.loc[mask == 12.5][0]
            
            if bias == True:
                behav.loc[behav['subject_nickname'] == nickname, 'block'] = result.params['block'].copy()
            
            if correction == True:
                behav.loc[behav['subject_nickname'] == nickname, 'rchoice+1'] = result.params['rchoice+1'].copy()
                behav.loc[behav['subject_nickname'] == nickname, 'uchoice+1'] = result.params['uchoice+1'].copy()
            # Probabilities on test data
            prob = result.predict(X_test).to_numpy()
            
            if nickname == example:
                example_model = result
            
            # Propagate to storing dataframe
            behav.loc[behav['index'].isin(index), 'simulation_prob'] = prob
        except:
            print('singular matrix')
    return behav, example_model


def data_2_X_test (behav, correction = True, bias = True):
        data = behav[['index','trial_feedback_type',
                       'signed_contrast', 'choice',
                           'probabilityLeft']].copy()
        
        #drop trials with odd probabilities of left
        data.drop(
            data['probabilityLeft'][~data['probabilityLeft'].isin([50,20,80])].index,
            inplace=True)
        
        
        # Rewardeded choices: 
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()), 'rchoice']  = 0 # NoGo trials
        data.loc[(data['choice'] == -1) &
                 (data['trial_feedback_type'] == -1), 'rchoice']  = 0
        data.loc[(data['choice'] == -1) &
                 (data['trial_feedback_type'] == 1), 'rchoice']  = -1
        data.loc[(data['choice'] == 1) &
                 (data['trial_feedback_type'] == 1), 'rchoice']  = 1
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()) , 'rchoice']  = 0 # NoGo trials
        data.loc[(data['choice'] == 1) &
                 (data['trial_feedback_type'] == -1), 'rchoice']  = 0
        
        # Unrewarded choices:
        data.loc[(data['choice'] == 0) &
                 (data['trial_feedback_type'].isnull()), 'uchoice']  = 0 # NoGo trials
        data.loc[(data['choice'] == -1) &
                 (data['trial_feedback_type'] == -1), 'uchoice']  = -1 
        data.loc[(data['choice'] == -1) &
                 (data['trial_feedback_type'] == 1), 'uchoice']  = 0 
        data.loc[(data['choice'] == 1) &
                 (data['trial_feedback_type'] == 1), 'uchoice']  = 0 
        data.loc[(data['choice'] == 0) & 
                 (data['trial_feedback_type'].isnull()) , 'uchoice']  = 0 # NoGo trials
        data.loc[(data['choice'] == 1) &
                 (data['trial_feedback_type'] == -1) , 'uchoice']  = 1
        
        # Apply correction
        if correction == True:
           data['rchoice+1'] = \
           data['rchoice'].shift(periods=-1).to_numpy()
           data['uchoice+1'] = \
           data['uchoice'].shift(periods=-1).to_numpy()
            
        # Shift rewarded and unrewarded predictors by one
        data.loc[:, ['rchoice', 'uchoice']] = \
            data[['rchoice', 'uchoice']].shift(periods=1).to_numpy()
            
    
        # Drop any nan trials
        data.dropna(inplace=True)
        
        # Make sensory predictors (no 0 predictor)
        contrasts = [ 25, 100,  12.5,   6.25]
        for i in contrasts:
            data.loc[(data['signed_contrast'].abs() == i), i] = \
                np.sign(data.loc[(data['signed_contrast'].abs() == i),
                                 'signed_contrast'].to_numpy())
            
            data_con =  data[i].copy()
            data[i] = data_con.fillna(0)
        
        # Make block identity (across predictors right is positive, hence logic below)
        if bias == True:
            data.loc[(data['probabilityLeft'] == 50), 'block'] = 0
            data.loc[(data['probabilityLeft'] == 20), 'block'] = 1
            data.loc[(data['probabilityLeft'] == 80), 'block'] = -1
        
        # Make choice in between 0 and 1 -> 1 for right and 0 for left
        data.loc[data['choice'] == -1, 'choice'] = 0
        
        index = data['index'].copy()

        # Create predictor matrix
        endog = data['choice'].copy()
        exog = data.copy()
        exog.drop(columns=['index', 'trial_feedback_type', 
                       'signed_contrast', 'choice', 
                           'probabilityLeft'], inplace=True)
        exog = sm.add_constant(exog)
        
        return exog, index


def plot_psychometric(x, y, col, point = False, mark = 'o', al =1):
    # summary stats - average psychfunc over observers
    df = pd.DataFrame({'signed_contrast': x, 'choice': y,
                       'choice2': y})
    df2 = df.groupby(['signed_contrast']).agg(
        {'choice2': 'count', 'choice': 'mean'}).reset_index()
    df2.rename(columns={"choice2": "ntrials",
                        "choice": "fraction"}, inplace=True)
    df2 = df2.groupby(['signed_contrast']).mean().reset_index()
    df2 = df2[['signed_contrast', 'ntrials', 'fraction']]

    # fit psychfunc
    pars, L = psy.mle_fit_psycho(df2.transpose().values,  # extract the data from the df
                                 P_model='erf_psycho_2gammas',
                                 parstart=np.array(
                                     [df2['signed_contrast'].mean(), 20., 0.05, 0.05]),
                                 parmin=np.array(
                                     [df2['signed_contrast'].min(), 5, 0., 0.]),
                                 parmax=np.array([df2['signed_contrast'].max(), 100., 1, 1]))

    # plot psychfunc
    g = sns.lineplot(np.arange(-29, 29),
                     psy.erf_psycho_2gammas(pars, np.arange(-29, 29)), color = col,  
                     alpha = al)

    # plot psychfunc: -100, +100
    sns.lineplot(np.arange(-37, -32),
                 psy.erf_psycho_2gammas(pars, np.arange(-103, -98)), color = col,
                 alpha = al)
    sns.lineplot(np.arange(32, 37),
                 psy.erf_psycho_2gammas(pars, np.arange(98, 103)), color = col,  
                 alpha = al)

    # now break the x-axis
    # if 100 in df.signed_contrast.values and not 50 in
    # df.signed_contrast.values:
    df['signed_contrast'] = df['signed_contrast'].replace(-100, -35)
    df['signed_contrast'] = df['signed_contrast'].replace(100, 35)
    
    if point == True:
        sns.lineplot(df['signed_contrast'], df['choice'], err_style="bars",
                         linewidth=0, linestyle='None', mew=0.5,
                         marker=mark, ci=68, color = col, alpha = al)

    g.set_xticks([-35, -25, -12.5, 0, 12.5, 25, 35])
    g.set_xticklabels(['-100', '-25', '-12.5', '0', '12.5', '25', '100'],
                      size='small', rotation=45)
    g.set_xlim([-40, 40])
    g.set_ylim([0, 1])
    g.set_yticks([0, 0.25, 0.5, 0.75, 1])
    g.set_yticklabels(['0', '25', '50', '75', '100'])
    
    
# FUNCTION UNDER DEVELOPMENT
def updating:
    select =  behav.copy()
    select['signed_contrast-1'] =  select['signed_contrast'].shift(periods=1).to_numpy()
    select['signed_contrast+1'] =  select['signed_contrast'].shift(periods=-1).to_numpy()
    select['t-1'] = select['trial_feedback_type'].shift(periods=1).to_numpy()
    select['t+1'] = select['trial_feedback_type'].shift(periods=-1).to_numpy()
    select = select.iloc[1:-1,:] # First and last trial will have nan for history
    select['t-1']  = select['t-1'].astype(int)
    select['simulation_prob'] = select['simulation_prob']*100
    #select = select.loc[select['signed_contrast-1'] >= 0]
    for mouse in select['subject_nickname'].unique():
        for c in select['signed_contrast-1'].unique():
            for r in select['t-1'].unique():
                sub_select = select.loc[(select['signed_contrast-1'] == c) &
                     (select['t-1'] == r) & (select['subject_nickname'] == mouse)]
                
                fit_result = fit_psychfunc(sub_select)
                select.loc[select['subject_nickname'] == nickname, 'updating'] = \
                fit_result['bias'][0]
        for c in select['signed_contrast+1'].unique():
            for r in select['t+1'].unique():
                sub_select = select.loc[(select['signed_contrast+1'] == c) &
                     (select['t+1'] == r) & (select['subject_nickname'] == mouse)]             
                fit_result = fit_psychfunc(sub_select)
                select.loc[select['subject_nickname'] == nickname, 'updating_correction'] = \
                fit_result['bias'][0]

    sns.lineplot(data = select, hue = 't-1', x = select['signed_contrast-1'],
                 y = select['simulation_prob'], ci = 68)
    
    sns.lineplot(data = select, hue = 't-1', x = select['signed_contrast-1'],
                 y = select[select['probabilityLeft'] == 80],'choice'] - 
                 select.loc[select['probabilityLeft'] == 20 ,'choice'])
                
Exemplo n.º 7
0
                                                             2, 3],
                                                         as_dataframe=False)
institution_map, col_names = institution_map()

# restrict by list of dicts with uuids for these sessions
b = (use_sessions * subject.Subject * subject.SubjectLab * reference.Lab
     * behavior.TrialSet.Trial)

# reduce the size of the fetch
b2 = b.proj('institution_short', 'subject_nickname', 'task_protocol',
            'trial_stim_contrast_left', 'trial_stim_contrast_right', 
            'trial_response_choice', 'task_protocol', 'trial_stim_prob_left', 
            'trial_feedback_type')
bdat = b2.fetch(order_by='institution_short, subject_nickname, session_start_time, trial_id',
                format='frame').reset_index()
behav_merged = dj2pandas(bdat)

behav_merged['institution_code'] = behav_merged.institution_short.map(institution_map)

# Variable to store model parameters
behav_merged['rchoice'] = np.nan
behav_merged['uchoice'] = np.nan
behav_merged['6'] = np.nan
behav_merged['12'] = np.nan
behav_merged['25'] = np.nan
behav_merged['100'] = np.nan
behav_merged['block'] = np.nan
behav_merged['intercept'] = np.nan
behav_merged['simulation_prob'] = np.nan

if correction == True: