Exemplo n.º 1
0
def uncertainty_vs_last_rewarded(f_behavior, max_duration=5000):
    ##start by getting the trial data for this session
    trial_data = ptr.get_full_trials(f_behavior, max_duration=max_duration)
    ##now determine which trials were rewarded or unrewarded
    last_rew_idx, last_unrew_idx = ptr.split_by_last_outcome(trial_data)
    ##compute the hidden markov model
    model_data = mf.fit_models_from_trial_data(trial_data)
    ##return the confidence, belief states
    uncertainty = mf.uncertainty_from_trial_data(trial_data)
    ##split into last rewarded and last unrewarded belief states
    last_rew_uncertainty = uncertainty[last_rew_idx]
    last_unrew_uncertainty = uncertainty[last_unrew_idx]
    return last_rew_uncertainty, last_unrew_uncertainty
Exemplo n.º 2
0
def get_behavior_data(f_behavior, n_back=3, max_duration=5000):
    global trial_lut
    ##start by parsing the trials
    trial_data = ptr.get_full_trials(f_behavior, max_duration=max_duration)
    ##get the session number for this session
    session_num = ptr.get_session_number(f_behavior)
    ##pre-allocation
    n_trials = len(trial_data.index)
    ##how many features we have depends on the length of our
    ##action/reward history
    features = ['training_day', 'trial_number']
    for i in range(n_back):
        features.append('action-' + str(i + 1))
        features.append('outcome-' + str(i + 1))
        features.append('interaction-' + str(i + 1))
    ##create the data arrays
    y = pd.DataFrame(index=np.arange(n_trials), columns=['value', 'action'])
    X = pd.DataFrame(index=np.arange(n_trials), columns=features)
    """
	Now parse each trial using the following values:
	reward/no reward: 1 or 0
	upper lever/lower lever: 2 or 1
	"""
    for t in range(n_trials):
        ##get the trial data for this trial
        trial = trial_data.loc[t]
        ##fill out the outcomes array first
        y['value'][t] = trial_lut[trial['action']]
        y['action'][t] = trial['action']
        X['training_day'][t] = session_num
        X['trial_number'][t] = t
        for i in range(n_back):
            if t > n_back:
                X['action-' +
                  str(i + 1)][t] = trial_lut[trial_data['action'][t - (i + 1)]]
                X['outcome-' +
                  str(i + 1)][t] = trial_lut[trial_data['outcome'][t -
                                                                   (i + 1)]]
                X['interaction-' +
                  str(i + 1)][t] = trial_lut[trial_data['action'][t - (
                      i + 1)]] * trial_lut[trial_data['outcome'][t - (i + 1)]]
            else:
                X['action-' + str(i + 1)][t] = 0
                X['outcome-' + str(i + 1)][t] = 0
                X['interaction-' + str(i + 1)][t] = 0
    return y, X
Exemplo n.º 3
0
def get_session_data(f_behavior, max_duration=5000):
    trial_data = ptr.get_full_trials(f_behavior,
                                     pad=[400, 400],
                                     max_duration=max_duration)
    n_trials = trial_data.index.size
    actions = np.zeros(n_trials)
    outcomes = np.zeros(n_trials)
    first_block = trial_data['context'][0]
    upper_levers = np.where(trial_data['action'] == 'upper_lever')[0]
    lower_levers = np.where(trial_data['action'] == 'lower_lever')[0]
    rewarded = np.where(trial_data['outcome'] == 'rewarded_poke')[0]
    unrewarded = np.where(trial_data['outcome'] == 'unrewarded_poke')[0]
    actions[upper_levers] = 2
    actions[lower_levers] = 1
    outcomes[rewarded] = 1
    outcomes[unrewarded] = 0
    ctx = np.asarray(trial_data['context'] == 'upper_rewarded').astype(int)
    switch_times = np.where(np.diff(ctx) != 0)[0]
    return actions, outcomes, switch_times, first_block
Exemplo n.º 4
0
def get_session_meta(f_behavior, max_duration=5000):
    ##start by parsing the data
    data = ptr.get_full_trials(f_behavior, max_duration=max_duration)
    metadata = {
        'unrewarded': np.where(data['outcome'] == 'unrewarded_poke')[0],
        'rewarded': np.where(data['outcome'] == 'rewarded_poke')[0],
        'upper_lever': np.where(data['action'] == 'upper_lever')[0],
        'lower_lever': np.where(data['action'] == 'lower_lever')[0],
        'upper_context': np.where(data['context'] == 'upper_rewarded')[0],
        'lower_context': np.where(data['context'] == 'lower_rewarded')[0],
    }
    trial_info = ptr.parse_trial_data(data)
    metadata['n_blocks'] = trial_info['n_blocks']
    metadata['block_lengths'] = trial_info['block_lengths']
    metadata['first_block'] = data['context'][0]
    metadata['mean_block_len'] = np.mean(trial_info['block_lengths'])
    # metadata['reward_rate'] = np.mean([len(trial_info['upper_correct_rewarded'])/(
    # 	len(trial_info['upper_correct_rewarded'])+len(trial_info['upper_correct_unrewarded'])),
    # 	len(trial_info['lower_correct_rewarded'])/(len(trial_info['lower_correct_rewarded'])+len(
    # 		trial_info['lower_correct_unrewarded']))])
    return metadata
Exemplo n.º 5
0
def session_trial_durations(f_behavior, max_duration=5000):
    data = ptr.get_full_trials(f_behavior, max_duration=max_duration)
    return np.asarray(data['outcome_ts'] - data['action_ts']).astype(int)