Exemple #1
0
# Test with eid that does not have any probe planned/histology values in Alyx
# eid = 'da188f2c-553c-4e04-879b-c9ea2d1b9a93'

# ----- RECOMMENDED: Option 1 (default) ------
# 1. Get spikes, cluster (with brain regions assigned to clusters) and channels
spikes, clusters, channels = bbone.load_spike_sorting_with_channel(eid,
                                                                   one=one)
del spikes, clusters, channels  # Delete for the purpose of the example

# ---------------------------------------------
# 2. Get only spikes and clusters (without brain regions assigned to clusters)
#    data separately from channels
#    Use merger function to get channels information into clusters
#    Adding feature x, y from default
spikes, clusters = bbone.load_spike_sorting(eid, one=one)
channels = bbone.load_channel_locations(eid, one=one)
keys = ['x', 'y']
clusters_brain = bbone.merge_clusters_channels(clusters,
                                               channels,
                                               keys_to_add_extra=keys)
del spikes, clusters, clusters_brain, channels  # Delete for the purpose of the example

# ---------------------------------------------
# 3. I don't want to connect to ONE and I already know my session path
session_path = one.path_from_eid(eid)  # replace by your local path
spikes, clusters = bbone.load_spike_sorting(session_path, one=one)
# TODO offline loading of channel locations ? Probably by caching the queries.

# ---------------- WIP ---------------------
Exemple #2
0
for _, metric_name in metric_funcs:
    metrics[metric_name] = []

for i, (eid, probe) in enumerate(zip(eids, probes)):
    print(eid)
    if eid in bad_eids: continue
    print("{} from {}".format(i, len(eids)))
    print(one.list(eid, 'subjects'))
    coords = one.load(eid, dataset_types=['probes.trajectory'])
    for c in coords[0]:
        if c['label'] == probe:
            print("{}, x: {}, y: {}, z: {}".format(c['label'], c['x'], c['y'],
                                                   c['z']))

    continue
    spikes, _ = load_spike_sorting(eid, one=one)
    spikes = spikes[0]

    if spikes[probe]['times'] is None:
        print('empty times skip')
        continue

    fr = calc_fr(spikes[probe]['times'], spikes[probe]['clusters'])
    labs.append(one.list(eid, 'labs'))

    for j, (metric, metric_name) in enumerate(metric_funcs):

        if str.endswith(metric_name, '_fr'):
            metrics[metric_name].append(metric(fr))
        else:
            metrics[metric_name].append(
Exemple #3
0
from brainbox.io.one import load_spike_sorting, load_channel_locations
from oneibl.one import ONE

one = ONE(base_url="https://dev.alyx.internationalbrainlab.org")
eids = one.search(subject='ZM_2407', task_protocol='ephys')

channels = load_channel_locations(eids[0], one=one)
spikes, clusters = load_spike_sorting(eids[0], one=one)
Exemple #4
0
# Regressing against continuous variables is similar. Note that because .add_covariate() is the
# Core function for adding regressors, it will need an explicit pd.Series to operate, and not a
# column name, for the second argument
design.add_covariate('wheel',
                     trialsdf['wheel_velocity'],
                     shortbases,
                     offset=-SHORT_KL,
                     desc='Anti-causal regressor for wheel velocity')
# We can also regress while omitting basis functions:
design.add_covariate_raw('wheelraw',
                         'wheel_velocity',
                         desc='Wheel velocity, no bases')
design.compile_design_matrix()

# Now let's load in some spikes and fit them
spikes, clusters = bbone.load_spike_sorting(eid, one, probe='probe00')
spk_times = spikes.probe00.times
spk_clu = spikes.probe00.clusters

# We will build a linear model and a poisson model:
lm = LinearGLM(design, spk_times, spk_clu, binwidth=BINSIZE)
pm = PoissonGLM(design, spk_times, spk_clu, binwidth=BINSIZE)

# Running the .fit() method is enough to start the fitting procedure:
lm.fit()
pm.fit()

# After which we can assess the score of each model on our data:
lm.score()
pm.score()
Exemple #5
0
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import KFold
from one.api import ONE

from brainbox.population import get_spike_counts_in_bins, classify
import brainbox.io.one as bbone

# %% Load in data
one = ONE()
eid = one.search(subject='ZM_2240', date_range=['2020-01-23', '2020-01-23'])
spikes, clusters = bbone.load_spike_sorting(eid[0], one=one)
trials = one.load_object(eid[0], 'trials', collection='alf')

# Use probe00
spikes = spikes['probe00']
clusters = clusters['probe00']

# %% Do decoding
print('\nDecoding whether the stimulus was on the left or the right..')

# Get population response matrix of all trials
times = np.column_stack(
    ((trials.goCue_times), (trials.goCue_times + 0.3)))  # 0-300 ms timewindow
population_activity, cluster_ids = get_spike_counts_in_bins(
    spikes.times, spikes.clusters, times)
population_activity = population_activity.T

# Get decoding target
stim_sides = np.isnan(trials.contrastLeft).astype(int)