コード例 #1
0
def run():
    from oneibl.onelight import ONE, repository

    # Get a session
    one = ONE()
    one.set_figshare_url = "https://figshare.com/articles/steinmetz/9974357"

    sessions = one.search(['spikes'])

    sess_n = 36

    try:
        sess = sessions[36]
    except:
        pass

    # ----------------------------- Get metadata/data ---------------------------- #
    df_fp = os.path.join('data', f'sess{sess_n}_alldata.h5')
    if not os.path.isfile(df_fp):
        # Download some data
        channels_loc = one.load_dataset(sess, 'channels.brainLocation')

        spikes_clust = one.load_dataset(sess, 'spikes.clusters')
        spikes_times = (one.load_dataset(sess, 'spikes.times').ravel() *
                        1000).astype(np.int32)  # convert to msecs

        clust_probe = one.load_dataset(sess, 'clusters.probes')
        clust_channel = one.load_dataset(sess, 'clusters.peakChannel')

        print(
            f"Regions in session: {sorted(set(channels_loc.allen_ontology))}")

        # Assign a spike to each cluster and each cluster to a brain region
        spikes = pd.DataFrame(
            dict(cluster=spikes_clust.ravel(), times=spikes_times.ravel()))

        spikes['channel'] = [
            clust_channel[s[0]].ravel()[0] for s in spikes_clust
        ]

        channels_loc_dic = {
            i: row.allen_ontology
            for i, row in channels_loc.iterrows()
        }
        spikes['region'] = [
            channels_loc_dic[int(ch - 1)] for ch in spikes['channel']
        ]

        print(spikes)
        print(set(spikes.region.values))

        spikes.to_hdf(df_fp, key='hdf')
    else:
        spikes = pd.read_hdf(df_fp)

    # Get spikes for one brain region at the time
    max_T = 10 * 60 * 1000  # 10 minutes
    time = np.zeros((max_T, 1))
    time_vals = np.arange(0, len(time))
    sigma = 100  # 100ms std

    for region in ['SCm']:
        # prep some file paths
        frates_fp = os.path.join('data', f'sess_{sess_n}_{region}_frates.npy')

        reg = spikes.loc[spikes.region == region]
        reg_clusters = list(set(reg.cluster.values))
        print(f"Found {len(reg_clusters)} clusters for {region}")

        # Create an array that is N_samples x N_cells and is 1 when a cell fires
        print('Getting cells spikes')
        cells = np.zeros((time.shape[0], len(reg_clusters)))

        for n, clust in enumerate(reg_clusters):
            clust_spikes = spikes.loc[(spikes.cluster == clust)
                                      & (spikes.times < max_T)]
            cells[clust_spikes.times, n] = 1
        cells = cells[:max_T, :]

        # Convert spikes to rates by summing with a gaussian kernel
        print('Getting firing rates')
        if not os.path.isfile(frates_fp):
            frates = np.zeros_like(cells)

            pool = mp.Pool(mp.cpu_count() - 2)
            res = pool.map(get_cell_frate,
                           [(time, cells, cell, sigma)
                            for cell in np.arange(frates.shape[1])])
            pool.close()

            for n, rate in enumerate(res):
                frates[:, n] = rate

            np.save(frates_fp, frates)

        else:
            frates = np.load(frates_fp)

        # TODO Bin spike counts in 1s bins for persistent homology

        # embedd data into lower dimensional embeddiing spaces to facilitate future analysi
        print('Reducing dimensionality')
        pklfile = os.path.join('data', 'SCM_iso20.pkl')

        if os.path.isfile(pklfile):
            print(
                'Skipping isomap because pickled isomap instance already exists'
            )
        else:
            if N_samples < max_T:
                idxs = choices(np.arange(max_T), k=N_samples)
                sel_frates = frates[idxs, :]
            else:
                sel_frates = frates.copy()

            stable_frates = np.sqrt(sel_frates)

            iso_instance = manifold.Isomap(
                5, 20, n_jobs=10
            )  # 5 nearest neightbours and 20 dimensional embedding
            iso_instance = iso_instance.fit(stable_frates)
            proj_data = iso_instance.transform(stable_frates)
            np.save(os.path.join('data', 'SCm_iso_20.npy'), proj_data)

            save_pickle(pklfile, iso_instance)
コード例 #2
0
 def __initialise_one_session():
     one = ONE()
     one.set_figshare_url(DataLoader.steinmetz_url)
     return one
コード例 #3
0
    n_neurons = len(cluster_ids)
    n_intervals = intervals.shape[0]
    counts = np.zeros((n_neurons, n_intervals), dtype=np.uint32)
    for j in range(n_intervals):
        t0, t1 = intervals[j, :]
        # Count the number of spikes in the window, for each neuron.
        x = np.bincount(spike_clusters[intervals_idx[j, 0]:intervals_idx[j,
                                                                         1]],
                        minlength=cluster_ids.max() + 1)
        counts[:, j] = x[cluster_ids]
    return counts, cluster_ids


# %%
# Set up ONE
one = ONE()
one.set_figshare_url('https://figshare.com/articles/steinmetz/9974357')
one.set_download_dir(join(DATA_PATH, '{subject}', '{date}'))

# Query all sessions with ephys and behavior
sessions = one.search(['spikes', 'trials'])

# Loop over recording sessions
timeconstant = pd.DataFrame()
for i, ses in enumerate(sessions):
    print('Calculating time constants for session %d of %d' %
          (i + 1, len(sessions)))

    # Get subject and date
    ses_path = normpath(ses)
    ses_date = ses_path.split(os.sep)[-2]
コード例 #4
0
        scene.add_silhouette(spheres, lw=LW + 1)

    # Interpolate cameras
    anim.segment_fact = (end - framen) / (end - prev)
    cam = anim._interpolate_cameras(cam1, cam2)
    return cam


# ------------------------------- create scene ------------------------------- #

scene = Scene(inset=INSET, screenshots_folder="paper/screenshots")
scene.root._needs_silhouette = True
scene.add_brain_region("TH", "MOs", alpha=0.6, silhouette=True)

# download and process probe data
one = ONE()
one.set_figshare_url("https://figshare.com/articles/steinmetz/9974357")

# select session
sessions = one.search(["trials"])
sess = sessions[7]  # 3

# Get spikes data
probes_locs = one.load_dataset(sess, "channels.brainLocation")
clu = one.load_dataset(sess, "clusters")
clu_probes = one.load_dataset(sess, "clusters.probes")
clu_channel = one.load_dataset(sess, "clusters.peakChannel")

spikes_clu = one.load_dataset(sess, "spikes.clusters")
spikes_times = one.load_dataset(sess, "spikes.times")
max_t = np.max(spikes_times)
コード例 #5
0
# import modules
# =============================================================================
from oneibl.onelight import ONE
import numpy as np
import matplotlib.pyplot as plt
import seaborn
from ibllib.misc import pprint
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
#%%=============================================================================

# load data
# =============================================================================

# create ONE object
one = ONE()

# list all dataset
eids = one.search(['_ibl_trials.*'])

# try one session
eid = eids[10]

# see all data set types
dset_types = one.list(eid)

# load a single dateset

#f1
choice = one.load_dataset(eid, dset_types[0])
コード例 #6
0
ファイル: load_data.py プロジェクト: SaraMati/NMA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%matplotlib inline

from oneibl.onelight import ONE
one = ONE()

one.set_figshare_url("https://figshare.com/articles/steinmetz/9974357")
sessions = one.search(['spikes'])
session = sessions[11]
one.list(session)

# loads all the objects from the session
trials = one.load_object(session, 'trials')
spikes = one.load_object(session, 'spikes')
clusters = one.load_object(session, 'clusters')
channels = one.load_object(session, 'channels')
probes = one.load_object(session, 'probes')

'''
See https://github.com/nsteinme/steinmetz-et-al-2019/wiki/data-files for description of dataset

Session 11 has:
channels: 1122
clusters: 1219
spikes: 11,757,530

trial timeline:
1. mouse holds wheel still for a short interval (0.2-0.5s)
2. trial initiates with stimulus onset
コード例 #7
0
"""
#%% =============================================================================
# import modules
# =============================================================================
from oneibl.onelight import ONE
import numpy as np
import matplotlib.pyplot as plt
import seaborn
from ibllib.misc import pprint

# =============================================================================
# load data
# =============================================================================

# create ONE object
one = ONE()

# search terms
one.search_terms()

# list all dataset
eids = one.search(['_ibl_trials.*'])

# try one session
eid = eids[9]

# see all data set types
dset_types = one.list(eid)

# load a single dateset
choice = one.load_dataset(eid, dset_types[0])
コード例 #8
0
ファイル: playground.py プロジェクト: FedeClaudi/playground
# %%
import pandas as pd
import numpy as np
from oneibl.onelight import ONE

one = ONE()
one.set_figshare_url = "https://figshare.com/articles/steinmetz/9974357"

sessions =  one.search(['spikes'])
sess = sessions[2]
# print(one.list(sess))

# %%
channels_loc = one.load_dataset(sess, 'channels.brainLocation')

spikes_clust = one.load_dataset(sess, 'spikes.clusters')
spikes_times = one.load_dataset(sess, 'spikes.times')

clust_probe = one.load_dataset(sess, 'clusters.probes')
clust_channel = one.load_dataset(sess, 'clusters.peakChannel')

print(f"Regions in session: {set(sorted(channels_loc))}")

# %%
spikes = pd.DataFrame(dict(cluster=spikes_clust.ravel(), times=spikes_times.ravel()))

# %%
spikes['channel']= [clust_channel[s].ravel()[0] for s in spikes_clust]
spikes

# %%
コード例 #9
0
ファイル: probes.py プロジェクト: deepakasaragod/BrainRender
cam = {
    "pos": (-16170, -7127, 31776),
    "viewup": (0, -1, 0),
    "clippingRange": (27548, 67414),
    "focalPoint": (7319, 2861, -3942),
    "distance": 43901,
}

# create scene and edit root
scene = Scene(inset=INSET, screenshots_folder="paper/screenshots")
scene.root._needs_silhouette = True
scene.root._silhouette_kwargs["lw"] = 1
scene.root.alpha(0.2)

# download probe data from ONE
one = ONE()
one.set_figshare_url("https://figshare.com/articles/steinmetz/9974357")

# select sessions with trials
sessions = one.search(["trials"])

# get probe locations
probes_locs = []
for sess in sessions:
    probes_locs.append(one.load_dataset(sess, "channels.brainLocation"))

# get single probe tracks
for locs in probes_locs:
    k = int(len(locs) / 374.0)

    for i in range(k):