Esempio n. 1
0
def get_children_region_names(acronyms, return_full_name=False):
    br = BrainRegions()
    children_region_names = []
    for i, acronym in enumerate(acronyms):
        try:
            regid = br.id[np.argwhere(br.acronym == acronym)]
            descendants = br.descendants(regid)
            targetlevel = 8
            if sum(descendants.level == targetlevel) == 0:
                if return_full_name:
                    children_region_names.append(descendants.name[-1])
                else:
                    children_region_names.append(descendants.acronym[-1])
            else:
                if return_full_name:
                    children_region_names.append(
                        descendants.name[(descendants.level == targetlevel)
                                         & (descendants.id > 0)])
                else:
                    children_region_names.append(
                        descendants.acronym[(descendants.level == targetlevel)
                                            & (descendants.id > 0)])
        except IndexError:
            children_region_names.append(acronym)
    if len(children_region_names) == 1:
        return children_region_names[0]
    else:
        return children_region_names
Esempio n. 2
0
def remap(ids, source='Allen', dest='Beryl', output='acronym'):
    br = BrainRegions()
    _, inds = ismember(ids, br.id[br.mappings[source]])
    ids = br.id[br.mappings[dest][inds]]
    if output == 'id':
        return br.id[br.mappings[dest][inds]]
    elif output == 'acronym':
        return br.get(br.id[br.mappings[dest][inds]])['acronym']
Esempio n. 3
0
def get_parent_region_name(acronyms):
    brainregions = BrainRegions()
    parent_region_names = []
    for i, acronym in enumerate(acronyms):
        try:
            regid = brainregions.id[np.argwhere(
                brainregions.acronym == acronym)]
            ancestors = brainregions.ancestors(regid)
            targetlevel = 6
            if sum(ancestors.level == targetlevel) == 0:
                parent_region_names.append(ancestors.name[-1])
            else:
                parent_region_names.append(ancestors.name[np.argwhere(
                    ancestors.level == targetlevel)[0, 0]])
        except IndexError:
            parent_region_names.append(acronym)
    if len(parent_region_names) == 1:
        return parent_region_names[0]
    else:
        return parent_region_names
Esempio n. 4
0
def get_full_region_name(acronyms):
    brainregions = BrainRegions()
    full_region_names = []
    for i, acronym in enumerate(acronyms):
        try:
            regname = brainregions.name[np.argwhere(
                brainregions.acronym == acronym).flatten()][0]
            full_region_names.append(regname)
        except IndexError:
            full_region_names.append(acronym)
    if len(full_region_names) == 1:
        return full_region_names[0]
    else:
        return full_region_names
def process_df(controller,
               file_path=None,
               aggregator='median',
               grouper='acronym'):
    """
    Process priors data and get color map and scalar values
    """
    if file_path is None:
        br = BrainRegions()
        data = pd.DataFrame(data={'acronym': br.acronym})
        data['value'] = np.random.uniform(size=len(data))
        data.loc[data['acronym'] == 'CLA', 'value'] = 1.5

    filtered_df = data.groupby(grouper).agg({'value': aggregator})
    filtered_df.dropna(inplace=True)
    min_value = float(np.amin(filtered_df, axis=0).to_numpy()[0])
    max_value = float(np.amax(filtered_df, axis=0).to_numpy()[0])
    print('Min prior value ' + str(min_value))
    print('Max prior value ' + str(max_value))

    scalars_map = {}

    # This code is to be modified if you have split data for left and right hemispheres
    # The concept is pretty simple: scalars_map is a 1D list that maps to brain regions.
    # With the lateralized brain mapping, the standard region id in Allen CCF is negated
    # on the right hemisphere.
    # Currently this code uses standard acronym lookup, which yields a region on both
    # hemispheres. The value you assign to an acronym will thus be mirrored.

    # Or for i in range(0, len(df)): which preserves data types
    for acronym, row in filtered_df.iterrows():
        value = row.iloc[0]
        if value is None:
            continue
        region_ids, row_ids = controller.model.get_region_and_row_id(acronym)
        if region_ids is None:
            print('Acronym', acronym, 'was not found in Atlas')
            continue
        for r_id in range(len(region_ids)):
            region_id = region_ids[r_id]
            row_id = row_ids[r_id]
            if region_id is None:
                print('Error, could not find acronym (ignoring it)', acronym)
                continue
            if row_id == 0:  #or value.isnull().values.any():
                # We ignore void acronym and nan values
                continue
            scalars_map[int(row_id)] = value
    return scalars_map
Esempio n. 6
0
def _load_spikes(eid, probe_idx=0, fs=3e4):
    one = ONE()
    br = BrainRegions()

    probe_name = 'probe%02d' % probe_idx
    spikes, clusters, channels = load_spike_sorting_fast(
        eid=eid,
        one=one,
        probe=probe_name,
        # spike_sorter='pykilosort',
        dataset_types=['spikes.samples', 'spikes.amps', 'spikes.depths'],
        brain_regions=br)

    st = spikes[probe_name]['samples'] / fs
    sc = spikes[probe_name]['clusters']
    sa = spikes[probe_name]['amps']
    sd = spikes[probe_name]['depths']
    n = len(st)

    sd[np.isnan(sd)] = sd[~np.isnan(sd)].min()

    # Colored or gray spikes?
    # color = colorpal(sc.astype(np.int32), cpal='glasbey')
    color = np.tile(np.array([127, 127, 127, 32]), (n, 1))

    # assert 100 < len(cr) < 1000
    # # Brain region colors
    # atlas = AllenAtlas(25)
    # n = len(atlas.regions.rgb)
    # alpha = 255 * np.ones((n, 1))
    # rgb = np.hstack((atlas.regions.rgb, alpha)).astype(np.uint8)
    # spike_regions = cr[sc]
    # # HACK: spurious values
    # spike_regions[spike_regions > 2000] = 0
    # color = rgb[spike_regions]

    return SpikeData(st, sc, sd, color)
import numpy as np
from brainbox.task.closed_loop import generate_pseudo_session
from brainbox.population.decode import get_spike_counts_in_bins, regress
import pandas as pd
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
from my_functions import paths, query_sessions, check_trials, combine_layers_cortex, load_trials
from models.expSmoothing_prevAction import expSmoothing_prevAction as exp_prev_action
from models.expSmoothing_stimside import expSmoothing_stimside as exp_stimside
import brainbox.io.one as bbone
from oneibl.one import ONE
from ibllib.atlas import BrainRegions
from sklearn.metrics import mean_squared_error
from brainbox.numerical import ismember
one = ONE()
br = BrainRegions()

# Settings
REMOVE_OLD_FIT = False
OVERWRITE = False
TARGET = 'prior-prevaction'
MIN_NEURONS = 5  # min neurons per region
REGULARIZATION = 'L2'
DECODER = 'linear-regression-%s' % REGULARIZATION
VALIDATION = 'kfold'
INCL_NEURONS = 'all'  # all or pass-QC
INCL_SESSIONS = 'aligned-behavior'  # all, aligned, resolved, aligned-behavior or resolved-behavior
ATLAS = 'beryl-atlas'
NUM_SPLITS = 5
CHANCE_LEVEL = 'other-trials'
ITERATIONS = 20  # for null distribution estimation
            'atlas_id': np.array([ch['brain_region'] for ch in channels]),
            'x': np.array([ch['x'] for ch in channels]) / 1e6,
            'y': np.array([ch['y'] for ch in channels]) / 1e6,
            'z': np.array([ch['z'] for ch in channels]) / 1e6,
            'axial_um': np.array([ch['axial'] for ch in channels]),
            'lateral_um': np.array([ch['lateral'] for ch in channels])
        }

    else:
        print(
            f'No histology or ephys aligned trajectory for session: {eid} and '
            f'probe: {probe_label}, no channels available')
        chans = None

if chans is not None:
    r = BrainRegions()
    chans['acronym'] = r.get(ids=chans['atlas_id']).acronym
    chans['rgb'] = r.get(ids=chans['atlas_id']).rgb
    cluster_brain_region = chans['acronym'][cluster_chans]
    cluster_colour = chans['rgb'][cluster_chans]
    cluster_xyz = np.c_[chans['x'], chans['y'], chans['z']][cluster_chans]
    regions, idx, n_clust = np.unique(cluster_brain_region,
                                      return_counts=True,
                                      return_index=True)

    region_cols = cluster_colour[idx, :]
    fig, ax = plt.subplots()
    ax.bar(x=np.arange(len(regions)),
           height=n_clust,
           tick_label=regions,
           color=region_cols / 255)