Exemple #1
0
def pipeline(trials_info,
    spike_server=None, spike_server_kwargs=None,
    sort_spikes=True,
    time_picker=None, time_picker_kwargs=None,
    trial_picker=TrialPicker, trial_picker_kwargs=None,
    folding_kwargs=None,
    label_with_btrial_numbers=True):
    """Replacement for pipeline_overblock_oneevent
    
    In particular, this allows the use of a different time picker.
    time_picker must be something that responds to:
        time_picker.pick(trial_numbers)
    with a list of times corresponding to each trial number.
    
    The trial numbers match those obtained from trial_picker, so
    typically behavioral trial numbers.
    
    The previous version required EventTimePicker, which operated on a Folded
    of events. Also, some special logic and RS_syncing was required to handle
    the case of behavioral trials not present in neural. This version
    offloads that to time_picker.
    """
    # Defaults
    if folding_kwargs is None: 
        folding_kwargs = {}
    if spike_server_kwargs is None:
        spike_server_kwargs = {}
    if trial_picker_kwargs is None:
        trial_picker_kwargs = {}
    
    # Get the spikes and optionally resort just to be sure
    spikes = np.asarray(spike_server.get(**spike_server_kwargs))
    if sort_spikes:
        spikes = np.sort(spikes)
    
    # Select trials from behavior
    # This object returns a list of tuples, one for each category
    # Each tuple is (label, trial_numbers)
    # TODO: make trial_picker load trials_info itself, if it needs it
    picked_trials_l = trial_picker.pick(trials_info, **trial_picker_kwargs)
    
    # Iterate over categories and store in dfolded
    dfolded = {}
    for label, trial_numbers in picked_trials_l:
       # Pick times for these trials
        trial_times = time_picker.pick(trial_numbers, **time_picker_kwargs)
    
        # Optionally label
        if label_with_btrial_numbers:
            trial_labels = np.asarray(trial_numbers)
        else:
            trial_labels = None
        
        # Fold and store
        dfolded[label] = Folded.from_flat(
            flat=spikes, centers=trial_times, labels=trial_labels, 
            **folding_kwargs)

    return dfolded
Exemple #2
0
def fold_for_tuning_curve(spikes,
                          timestamps,
                          tones,
                          attens,
                          tc_freqs=None,
                          tc_attens=None,
                          freq_min=None,
                          freq_max=None,
                          n_freq_bins=None,
                          dstart=-.05,
                          dstop=.14):
    """Fold spikes into freq/atten bins for tuning curve
    
    spikes : times in seconds, I will sort
    timestamps : time in seconds
    tones : frequency of stimulus, same shape as timestamps
    attens : attenuation of stimulus, same shape as timestamps
    
    tc_freqs, tc_attens : bin edges
        If None, will generate from freq_min, ferq_max, n_freq_bins
    
    dstart, dstop: seconds of time around each timestamp
    
    Returns:
        dfolded, tc_freqs, tc_attens, tc_freq_labels, tc_atten_labels
        dfolded : dict of Folded, keyed by index (fb, ab) into freq and 
            atten labels
    """
    # Set up bin edges ... one more than the number of bins
    if tc_freqs is None:
        tc_freqs = np.logspace(np.log10(freq_min), np.log10(freq_max),
                               n_freq_bins + 1)
    if tc_attens is None:
        tc_attens = np.concatenate([np.sort(np.unique(attens)), [np.inf]])

    # Labels of the bins, ie bin "centers"
    tc_freq_labels = 10**(np.log10(tc_freqs[:-1]) +
                          np.diff(np.log10(tc_freqs)) / 2)
    tc_atten_labels = tc_attens[:-1]

    # Place each stimulus in a bin
    # These will range from (0, len(edges) - 1)
    tone_freq_bin = np.searchsorted(tc_freqs, tones) - 1
    tone_atten_bin = np.searchsorted(tc_attens, attens)

    # Sort spikes for folding
    spikes = np.sort(spikes)

    # Create folded for each bin
    dfolded = {}
    for fb in range(len(tc_freq_labels)):
        for ab in range(len(tc_atten_labels)):
            seln = ((tone_freq_bin == fb) & (tone_atten_bin == ab))
            dfolded[(fb, ab)] = Folded.from_flat(spikes,
                                                 centers=timestamps[seln],
                                                 dstart=dstart,
                                                 dstop=dstop)

    return dfolded, tc_freqs, tc_attens, tc_freq_labels, tc_atten_labels
Exemple #3
0
 def fold_spikes_on_times(self, interval_names, starts_d, stops_d):
     # For each interval, fold spikes on starts_d and stops_d
     dfolded = {}
     for statename in interval_names:
         dfolded[statename] = Folded.from_flat(
             np.asarray(self.spikes),
             starts=starts_d[statename], stops=stops_d[statename])    
 
     return dfolded
Exemple #4
0
def split_events_by_state_name(events, split_state, subtract_off_center=False,
    **kwargs):
    """Divides up events based on a state name that you specify
    
    Returns Folded, split on split_state
    """
    starts = np.asarray(events[events.event == split_state].time)
    res = Folded.from_flat(flat=events, starts=starts, 
        subtract_off_center=subtract_off_center, **kwargs)
    
    return res
Exemple #5
0
def fold_for_tuning_curve(spikes, timestamps, tones, attens,
    tc_freqs=None, tc_attens=None, freq_min=None, freq_max=None,
    n_freq_bins=None, dstart=-.05, dstop=.14):
    """Fold spikes into freq/atten bins for tuning curve
    
    spikes : times in seconds, I will sort
    timestamps : time in seconds
    tones : frequency of stimulus, same shape as timestamps
    attens : attenuation of stimulus, same shape as timestamps
    
    tc_freqs, tc_attens : bin edges
        If None, will generate from freq_min, ferq_max, n_freq_bins
    
    dstart, dstop: seconds of time around each timestamp
    
    Returns:
        dfolded, tc_freqs, tc_attens, tc_freq_labels, tc_atten_labels
        dfolded : dict of Folded, keyed by index (fb, ab) into freq and 
            atten labels
    """
    # Set up bin edges ... one more than the number of bins
    if tc_freqs is None:    
        tc_freqs = np.logspace(np.log10(freq_min), np.log10(freq_max), 
            n_freq_bins + 1)
    if tc_attens is None:
        tc_attens = np.concatenate([np.sort(np.unique(attens)), [np.inf]])

    # Labels of the bins, ie bin "centers"
    tc_freq_labels = 10 ** (
        np.log10(tc_freqs[:-1]) + np.diff(np.log10(tc_freqs)) / 2)
    tc_atten_labels = tc_attens[:-1]

    # Place each stimulus in a bin
    # These will range from (0, len(edges) - 1)
    tone_freq_bin = np.searchsorted(tc_freqs, tones) - 1
    tone_atten_bin = np.searchsorted(tc_attens, attens) 

    # Sort spikes for folding
    spikes = np.sort(spikes)

    # Create folded for each bin
    dfolded = {}
    for fb in range(len(tc_freq_labels)):
        for ab in range(len(tc_atten_labels)):
            seln = ((tone_freq_bin == fb) & (tone_atten_bin == ab))        
            dfolded[(fb, ab)] = Folded.from_flat(spikes, 
                centers=timestamps[seln], dstart=dstart, dstop=dstop)
    
    return dfolded, tc_freqs, tc_attens, tc_freq_labels, tc_atten_labels
Exemple #6
0
def split_events_by_state_name(events,
                               split_state,
                               subtract_off_center=False,
                               **kwargs):
    """Divides up events based on a state name that you specify
    
    Returns Folded, split on split_state
    """
    starts = np.asarray(events[events.event == split_state].time)
    res = Folded.from_flat(flat=events,
                           starts=starts,
                           subtract_off_center=subtract_off_center,
                           **kwargs)

    return res
Exemple #7
0
def pipeline_overblock_oneevent(kkserver, session, unit, rs,
    trial_picker=TrialPicker, trial_picker_kwargs=None,
    evname='play_stimulus_in', folding_kwargs=None, sort_spikes=True,
    final_folded_map=None, final_folded_map_dtype=np.int,
    label_with_btrial_numbers=True):
    """This aims to be the all-encompassing pipeline
    
    See IntervalPipeline for a different design philosophy.

    Each 'category' of trials is folded together.
    
    trial_picker_kwargs : dict
        Definition of each category. It has the following items:
        'labels' : list
            Name of each category (keys in returned dict)
        'label_kwargs' : list, of same length as 'labels'
            Definition of each category. Passed as keyword arguments to
            `trial_picker`. For the default picker: each key, value pair is applied
            to trials_info to select trials for this category. For example,
            {'outcome': 'hit', 'block': 2}
            
            This can also be a MultiIndex with suitably defined attribute
            `names`. That way actually makes more sense to me in the long 
            run. For now it is converted to the above dict-like syntax.
        Any other key, value pairs in this dict are passed to `trial_picker`
        for EVERY category. Ex: {'nonrandom': 0}

    trial_picker : object
        Object that picks the trials for each category, according to
        `trial_picker_kwargs` and using TRIALS_INFO
    
    label_with_btrial_numbers : bool
        If True, then an attribute called 'labels' is stored in each returned
        Folded. 'labels' is the behavioral trial number of each entry in
        the Folded.
    
    Example: Bin by block
    # Set up the pipeline
    # How to parse out trials
    trial_picker_kwargs = {
        'labels':['LB', 'PB'], 
        'label_kwargs': [{'block':2}, {'block':4}],
        'outcome': 'hit', 'nonrandom' : 0
        }
    
    # How to fold the window around each trial
    folding_kwargs = {'dstart': -.25, 'dstop': 0.}
    
    # Run the pipeline
    res = kkpandas.pipeline.pipeline_overblock_oneevent(
        kk_server, session, unit2unum(unit), rs, 
        trial_picker_kwargs=trial_picker_kwargs,
        folding_kwargs=folding_kwargs)    
    
    sort_spikes : whether to sort the spike times after loading.
        Certainly the spikes should be sorted before processing
        This defaults to False because it's often the case that they're
        pre-sorted
    """
 
    from ns5_process.RS_Sync import RS_Syncer # remove this dependency
    
    # And a trial_server object?
    trials_info = io.load_trials_info(rs.full_path)
    events = io.load_events(rs.full_path)

    # Spike selection
    spikes = np.asarray(
        kkserver.get(session=session, unit=unit).time)
    
    # Have to sort them if they aren't already
    if sort_spikes:
        spikes = np.sort(spikes)
    
    # Convert trial_picker_kwargs from MultiIndex if necessary
    if hasattr(trial_picker_kwargs['label_kwargs'], 'names'):
        trial_picker_kwargs = trial_picker_kwargs.copy()
        mi = trial_picker_kwargs['label_kwargs']
        trial_picker_kwargs['label_kwargs'] = [
            dict([(name, val) for name, val in zip(mi.names, val2)]) 
            for val2 in list(mi)]
    
    # Select trials from behavior
    if trial_picker_kwargs is None:
        trial_picker_kwargs = {}
    picked_trials_l = trial_picker.pick(trials_info, **trial_picker_kwargs)
    
    # Fold events structure on start times
    rss = RS_Syncer(rs)
    f = Folded.from_flat(flat=events, starts=rss.trialstart_nbase, 
        subtract_off_center=False)
    
    # Convert to dict Folded representation with trial numbers as labels
    tn2ev = dict(zip(rss.btrial_numbers, f))
    
    # Here is the link between behavior and neural
    # We have picked_trials_l, a list of trial numbers selected from behavior
    # And tn2ev, a dict keyed on trial numbers that actually occurred in
    # neural recording
    # We need to pick out events from each of the trials in each category
    # But first we need to drop trials that never actually occurred from
    # pick_trials_l
    for n in range(len(picked_trials_l)):
        picked_trials_l[n] = (
            picked_trials_l[n][0],
            picked_trials_l[n][1][
            np.in1d(picked_trials_l[n][1], rss.btrial_numbers)])

    # Iterate over picked_trials_l and extract time from each trial
    label2timelocks, label2btrial_numbers = {}, {}    
    for label, trial_numbers in picked_trials_l:
        # Extract trials by number (put this as accessor method in Folded
        trials = [tn2ev[tn] for tn in trial_numbers] # go in folded
    
        # Store the trial numbers that we picked (for labeling the Folded later)
        label2btrial_numbers[label] = np.asarray(trial_numbers)
    
        # Get timelock times by applying a function to each entry (put this in Folded)
        times = EventTimePicker.pick(evname, trials)
        
        # Store the timelock times
        label2timelocks[label] = times
    
    # Now fold spikes over timelocked times
    if folding_kwargs is None: 
        folding_kwargs = {}
    res = {}    
    for label, timelocks in label2timelocks.items():
        # Optionally label with trial labels
        if label_with_btrial_numbers:
            trial_labels = label2btrial_numbers[label]
        else:
            trial_labels = None
        res[label] = Folded.from_flat(
            flat=spikes, centers=timelocks, labels=trial_labels, 
            **folding_kwargs)
        
        # Optionally apply a map to each folded
        if final_folded_map is not None:
            res[label] = np.asarray(map(final_folded_map, res[label]),
                dtype=final_folded_map_dtype)

    return res