예제 #1
0
def calculate_perf_metrics(trial_matrix):
    """Calculate simple performance metrics on a session"""
    rec = {}

    # Trials and spoiled fraction
    rec['n_trials'] = len(trial_matrix)
    rec['spoil_frac'] = float(np.sum(trial_matrix.outcome == 'spoil')) / \
        len(trial_matrix)

    # Calculate performance
    rec['perf_all'] = float(len(my.pick(trial_matrix, outcome='hit'))) / \
        len(my.pick(trial_matrix, outcome=['hit', 'error']))

    # Calculate unforced performance, protecting against low trial count
    n_nonbad_nonspoiled_trials = len(
        my.pick(trial_matrix, outcome=['hit', 'error'], isrnd=True))
    if n_nonbad_nonspoiled_trials < 10:
        rec['perf_unforced'] = np.nan
    else:
        rec['perf_unforced'] = float(
            len(my.pick(trial_matrix, outcome='hit', isrnd=True))) / \
            n_nonbad_nonspoiled_trials

    # Anova with and without remove bad
    for remove_bad in [True, False]:
        # Numericate and optionally remove non-random trials
        numericated_trial_matrix = TrialMatrix.numericate_trial_matrix(
            trial_matrix)
        if remove_bad:
            suffix = '_unforced'
            numericated_trial_matrix = numericated_trial_matrix.ix[
                numericated_trial_matrix.isrnd == True]
        else:
            suffix = '_all'

        # Run anova
        aov_res = TrialMatrix._run_anova(numericated_trial_matrix)

        # Parse FEV
        if aov_res is not None:
            rec['fev_stay' + suffix], rec['fev_side' + suffix], \
                rec['fev_corr' + suffix] = aov_res['ess'][
                ['ess_prevchoice', 'ess_Intercept', 'ess_rewside']]
        else:
            rec['fev_stay' + suffix], rec['fev_side' + suffix], \
                rec['fev_corr' + suffix] = np.nan, np.nan, np.nan

    return rec
예제 #2
0
def get_trial_results2(pldf, logfile_lines):
    """Parse out TRLR lines using pldf and logfile_lines.
    
    Returns df pivoted on trial.
    """
    # choose trlr lines
    trlr_idxs = my.pick(pldf, command='TRLR')
    if len(trlr_idxs) == 0:
        return None

    # read into table
    trlr_strings_a = np.asarray(logfile_lines)[trlr_idxs]
    sio = StringIO.StringIO("".join(trlr_strings_a))
    trlr_df = pandas.read_table(
        sio,
        sep=' ',
        names=('time', 'command', 'trlr_name', 'trlr_value'),
    )

    # Add trial marker and pivot on trial.
    # Should we check for dups / missings?
    trlr_df['trial'] = pldf['trial'][trlr_idxs].values
    trlrs_by_trial = trlr_df.pivot_table(index='trial',
                                         values='trlr_value',
                                         columns='trlr_name')
    return trlrs_by_trial
예제 #3
0
def insert_events_and_times(ulabel, trials_info):
    """Inserts after_go, after_nogo, and times of those events"""
    # Get trials_info and define next_start as next_cpoke_start
    trials_info['next_start'] = trials_info['cpoke_start'].shift(-1)

    # Also define 'after_nogo_hit'
    trials_info['after_nogo_hit'] = 0
    after_nogo = 1 + np.asarray(
        my.pick(trials_info, outcome='hit', go_or_nogo='nogo'))
    after_nogo = filter(lambda bt: bt in trials_info.index, after_nogo)
    trials_info['after_nogo_hit'][after_nogo] = 1

    # Also define 'after_go_hit'
    trials_info['after_go_hit'] = 0
    after_go = 1 + np.asarray(
        my.pick(trials_info, outcome='hit', go_or_nogo='go'))
    after_go = filter(lambda bt: bt in trials_info.index, after_go)
    trials_info['after_go_hit'][after_go] = 1

    # If previous trial was NOGO hit, get cpoke_stop on that trial
    trials_info['prev_nogo_stop'] = trials_info['cpoke_stop'].shift(1)
    trials_info['prev_nogo_stop'][trials_info.after_nogo_hit != 1] = np.nan

    return trials_info
예제 #4
0
summary['mSpHz'] = summary['mSp'] / .050
summary['diffHz'] = summary['mEvHz'] - summary['mSpHz']
summary['diffspks'] = summary['diffHz'] * summary['dt']
summary['ratio'] = summary['mEvHz'] / summary['mSpHz']
summary['region'] = unit_db['region'][summary.index]
summary['latency'] = 1000 * unit_db[['audresp_t1', 'audresp_t2'
                                     ]].ix[summary.index].mean(1)
assert unit_db['include'][summary.index].all()

# Comparison of prevalence of audresp cells across regions
sdump = ''
A1_cells = my.pick_rows(unit_db, region='A1', include=True)
PFC_cells = my.pick_rows(unit_db, region='PFC', include=True)
n_A1_cells, n_audresp_A1_cells = map(
    len, [A1_cells,
          my.pick(A1_cells, audresp=['good', 'weak', 'sustained'])])
n_PFC_cells, n_audresp_PFC_cells = map(
    len,
    [PFC_cells,
     my.pick(PFC_cells, audresp=['good', 'weak', 'sustained'])])
sdump += "* A1: %d/%d\n" % (n_audresp_A1_cells, n_A1_cells)
sdump += "* PFC: %d/%d\n" % (n_audresp_PFC_cells, n_PFC_cells)
sdump += "* p=%0.4f, Fisher's Exact Test\n" % scipy.stats.fisher_exact([
    [n_audresp_A1_cells, n_A1_cells - n_audresp_A1_cells],
    [n_audresp_PFC_cells, n_PFC_cells - n_audresp_PFC_cells],
])[1]
with file('stat__comparison_of_prevalence_of_audresp_cells_across_regions',
          'w') as fi:
    fi.write(sdump)
print sdump
예제 #5
0
    def assign_trial_type_to_trials_info(self, trials_info):
        """Returns a copy of trials_info with a column called trial_type.
        
        We match the srvpos and stppos variables in trials_info to the 
        corresponding rows of self.trial_types. The index of the matching row
        is the trial type for that trial.
        
        Warnings are issued if keywords are missing, multiple matches are 
        found (in which case the first is used), or no match is found
        (in which case the first trial type is used, although this should
        probably be changed to None).
        """
        trials_info = trials_info.copy()

        # Set up the pick kwargs for how we're going to pick the matching type
        # The key is the name in self.trial_types, and the value is the name
        # in trials_info
        pick_kwargs = {'isgo': 'isgo'}

        # Test for missing kwargs
        warn_missing_kwarg = []
        for key, val in pick_kwargs.items():
            if val not in trials_info.columns:
                pick_kwargs.pop(key)
                warn_missing_kwarg.append(key)
        if len(warn_missing_kwarg) > 0:
            print "warning: missing kwargs to match trial type:" + \
                ' '.join(warn_missing_kwarg)

        # Iterate over trials
        # Could probably be done more efficiently with a groupby
        trial_types_l = []
        warn_no_matches = []
        warn_multiple_matches = []
        warn_missing_data = []
        warn_type_error = []
        for idx, ti_row in trials_info.iterrows():
            # Pick the matching row in trial_types
            trial_pick_kwargs = dict([(k, ti_row[v])
                                      for k, v in pick_kwargs.items()
                                      if not pandas.isnull(ti_row[v])])

            # Try to pick
            try:
                pick_idxs = my.pick(self.trial_types, **trial_pick_kwargs)
            except TypeError:
                # typically, comparing string with int
                warn_type_error.append(idx)
                pick_idxs = [0]

            # error check missing data
            if len(trial_pick_kwargs) < len(pick_kwargs):
                warn_missing_data.append(idx)

            # error-check and reduce to single index
            if len(pick_idxs) == 0:
                # no match, use the first trial type
                1 / 0
                warn_no_matches.append(idx)
                pick_idx = 0
            elif len(pick_idxs) > 1:
                # multiple match
                warn_multiple_matches.append(idx)
                pick_idx = pick_idxs[0]
            else:
                # no error
                pick_idx = pick_idxs[0]

            # Store result
            trial_types_l.append(pick_idx)

        # issue warnings
        if len(warn_type_error) > 0:
            print "error: type error in pick on trials " + \
                ' '.join(map(str, warn_type_error))
        if len(warn_missing_data) > 0:
            print "error: missing data on trials " + \
                ' '.join(map(str, warn_missing_data))
        if len(warn_no_matches) > 0:
            print "error: no matches found in some trials " + \
                ' '.join(map(str, warn_no_matches))
        elif len(warn_multiple_matches) > 0:
            print "error: multiple matches found on some trials"

        # Put into trials_info and return
        trials_info['trial_type'] = trial_types_l
        return trials_info
예제 #6
0
                      hspace=.6,
                      wspace=.5,
                      top=.95,
                      bottom=.05)
    bottom_edge = -2

    # Iterate over axes
    for (region, gng), prefblock in itertools.product(region_gng_l,
                                                      prefblock_l):
        ## PLOT TRACES
        # Get axis object for this set of parameters
        ax = axa[region_gng_l.index((region, gng)),
                 prefblock_l.index(prefblock)]

        # Which ulabels have this prefblock
        ax_ulabels = my.pick(hold_results, prefblock=prefblock, region=region)

        # Get rates for this set of parameters
        subdf = rates.ix[region].xs(gng, level='gng', axis=1).ix[ax_ulabels]

        # Iterate over blocks and plot them
        for label, color in label2color.items():
            my.plot.errorbar_data(data=subdf[label].values,
                                  x=bincenters,
                                  ax=ax,
                                  fill_between=True,
                                  color=color,
                                  label=label)

        ## DISTRS
        # Take same ulabels, combine across blocks
예제 #7
0
# Run each session
for session_name in sessions_to_include:
    # skip catch trial session
    if session_name == 'CR20B_120613_001_behaving':
        continue

    # Load
    ratname = kkpandas.kkrs.ulabel2ratname(session_name)
    trials_info = trials_info_dd[ratname][session_name]
    
    # Drop the forced trials
    trials_info = trials_info.ix[trials_info.nonrandom == 0]

    # Identify trials with a GO distracter
    trials_info['distracter_means_go'] = 0
    trials_info['distracter_means_go'][my.pick(trials_info,
        block=2, stim_name=('le_lo_lc', 'ri_lo_lc'))] = 1
    trials_info['distracter_means_go'][my.pick(trials_info,
        block=4, stim_name=['le_lo_pc', 'le_hi_pc'])] = 1            
    
    # Categorize errors more finely than usual
    trials_info['outcome2'] = trials_info['outcome'].copy()
    trials_info['outcome2'][my.pick(trials_info, 
        outcome='wrong_port', distracter_means_go=1)] = 'interference'
    
    # Store the loaded trials info (for later calculation of non-blolded)
    trials_info_l.append(trials_info.copy())
    
    # Optionally drop the first block for blolding purpose
    if drop_first_block:
        trials_info = trials_info.ix[trials_info.index > 20]
    
예제 #8
0
def identify_state_change_times(parsed_df_by_trial,
                                state0=None,
                                state1=None,
                                show_warnings=True,
                                error_on_multi=False,
                                command='ST_CHG'):
    """Return time that state changed from state0 to state1
    
    This should be replaced with identify_state_change_times_new, which
    uses a combination of
    ArduFSM.TrialSpeak.read_logfile_into_df
    ArduFSM.TrialSpeak.get_commands_from_parsed_lines
    
    parsed_df_by_trial : result of parse_lines_into_df_split_by_trial
        (May be more efficient to rewrite this to operate on the whole thing?)
    state0 and state1 : any argument that pick_rows can work on, so
        13 works or [13, 14] works
    command : the state change token
        ST_CHG2 uses the time at the end, instead of beginning, of the loop
    
    If multiple hits per trial found:
        returns first one
    
    If no hits per trial found:
        return nan
    """
    multi_warn_flag = False
    res = []

    # Iterate over trials
    for df in parsed_df_by_trial:
        # Get st_chg commands
        st_chg_rows = my.pick_rows(df, command=command)

        # Split the argument string and intify
        if len(st_chg_rows) > 0:
            split_arg = pandas.DataFrame(
                st_chg_rows['argument'].str.split().tolist(),
                dtype=np.int,
                columns=['state0', 'state1'],
                index=st_chg_rows.index)

            # Match to state0, state1
            picked = my.pick(split_arg, state0=state0, state1=state1)
        else:
            picked = []

        # Split on number of hits per trial
        if len(picked) == 0:
            res.append(np.nan)
        elif len(picked) == 1:
            res.append(df['time'][picked[0]])
        else:
            res.append(df['time'][picked[0]])
            multi_warn_flag = True
            if error_on_multi:
                raise (ValueError("multiple events on this trial"))

    if show_warnings and multi_warn_flag:
        print "warning: multiple target state changes found on some trial"

    return np.array(res, dtype=np.float) / 1000.0
예제 #9
0
# Within each ax:
#  without subhold: first A1 4 snames; then PFC 4 snames
#  with subhold: first A1 4 snames; then PFC 4 snames
f, axa = plt.subplots(2, 1, figsize=(12, 10))
f.subplots_adjust(hspace=.6, left=.125, right=.95)

for subhold in [False, True]:
    # Each row is abs or not
    for take_abs in [False, True]:
        # Get the ax
        ax = axa[int(take_abs)]

        # First plot A1, then PFC
        for region in ['A1', 'PFC']:
            # Units for this region
            sub_mer = mer.ix[my.pick(units_to_analyze, region=region)]

            # Grab the meaned response by sname
            sub_mer_LB = sub_mer[[sname + '_lc' for sname in snames]]
            sub_mer_LB.columns = snames
            sub_mer_PB = sub_mer[[sname + '_pc' for sname in snames]]
            sub_mer_PB.columns = snames

            # Subtract the hold if necessary
            if subhold:
                sub_mer_LB = sub_mer_LB.sub(sub_mer['hold_LB_hz'], axis=0)
                sub_mer_PB = sub_mer_PB.sub(sub_mer['hold_PB_hz'], axis=0)

            # Now actually take diff
            diff = sub_mer_PB - sub_mer_LB