예제 #1
0
 def _navigate_tree(self, ids, direction='down', return_indices=False):
     """
     Private method to navigate the tree and get all related objects either up, down or along the branch.
     By convention the provided id is returned in the list of regions
     :param ids: array or single allen id (int32)
     :param direction: 'up' returns ancestors, 'down' descendants
     :param return indices: Bool (False), if true returns a second argument with indices mapping
     to the current br object
     :return: Bunch
     """
     indices = ismember(self.id, ids)[0]
     count = np.sum(indices)
     while True:
         if direction == 'down':
             indices |= ismember(self.parent, self.id[indices])[0]
         elif direction == 'up':
             indices |= ismember(self.id, self.parent[indices])[0]
         else:
             raise ValueError("direction should be either 'up' or 'down'")
         if count == np.sum(
                 indices):  # last iteration didn't find any match
             break
         else:
             count = np.sum(indices)
     if return_indices:
         return self.get(self.id[indices]), np.where(indices)[0]
     else:
         return self.get(self.id[indices])
예제 #2
0
 def _mapping_from_regions_list(self, new_map, lateralize=False):
     """
     From a vector of regions id, creates a mapping such as
     newids = self.mapping
     :param new_map: np.array: vector of regions id
     """
     I_ROOT = 1
     I_VOID = 0
     # to lateralize we make sure all regions are represented in + and -
     new_map = np.unique(np.r_[-new_map, new_map])
     assert np.all(np.isin(new_map, self.id)), \
         "All mapping ids should be represented in the Allen ids"
     # with the lateralization, self.id may have duplicate values so ismember is necessary
     iid, inm = ismember(self.id, new_map)
     iid = np.where(iid)[0]
     mapind = np.zeros_like(
         self.id) + I_ROOT  # non assigned regions are root
     # TO DO should root be lateralised?
     mapind[iid] = iid  # regions present in the list have the same index
     # Starting by the higher up levels in the hierarchy, assign all descendants to the mapping
     for i in np.argsort(self.level[iid]):
         descendants = self.descendants(self.id[iid[i]]).id
         _, idesc, _ = np.intersect1d(self.id,
                                      descendants,
                                      return_indices=True)
         mapind[idesc] = iid[i]
     mapind[0] = I_VOID  # void stays void
     # to delateralize the regions, assign the positive index to all mapind elements
     if lateralize is False:
         _, iregion = ismember(np.abs(self.id), self.id)
         mapind = mapind[iregion]
     return mapind
예제 #3
0
def multiple_spike_trains(firing_rates=None, rec_len_secs=1000, cluster_ids=None,
                          amplitude_noise=20 * 1e-6):
    """
    :param firing_rates: list or np.array of firing rates (spikes per second)
    :param rec_len_secs: recording length in seconds
    :return: spike_times, spike_amps, spike_clusters
    """
    if firing_rates is None:
        firing_rates = np.random.randint(150, 600, 10)
    if cluster_ids is None:
        cluster_ids = np.arange(firing_rates.size)
    ca = np.exp(np.random.normal(5.5, 0.5, firing_rates.size)) / 1e6  # output is in V
    st = np.empty(0)
    sc = np.empty(0)
    for i, firing_rate in enumerate(firing_rates):
        t = generate_spike_train(firing_rate=firing_rate, rec_len_secs=rec_len_secs)
        st = np.r_[st, t]
        sc = np.r_[sc, np.zeros(t.size, dtype=np.int32) + cluster_ids[i]]

    ordre = st.argsort()
    st = st[ordre]
    sc = np.int32(sc[ordre])
    _, isc = ismember(sc, cluster_ids)  # clusters ids may be arbitrary: re-index
    sa = np.maximum(ca[isc] + np.random.randn(st.size) * amplitude_noise, 25 * 1e-6)
    return st, sa, sc
예제 #4
0
def check_up_to_date(subj_path, df):
    """
    Check which sessions on local file system are missing from the computed training table
    :param subj_path:
    :return:
    """
    session_dates = subj_path.glob('*')
    df_session = pd.DataFrame()

    for sess_date in session_dates:
        sess_paths = list(sess_date.glob('00*'))
        date = sess_date.stem
        if len(sess_paths) > 0:
            for sess in sess_paths:
                if is_session_path(sess):
                    df_session = pd.concat([
                        df_session,
                        pd.DataFrame({
                            'date': date,
                            'session_path': str(sess)
                        },
                                     index=[0])
                    ],
                                           ignore_index=True)

    if df is None:
        return df_session
    else:
        # recorded_session_paths = df['session_path'].values
        isin, _ = ismember(df_session.date.unique(), df.date.unique())
        missing_dates = df_session.date.unique()[~isin]
        return df_session[df_session['date'].isin(missing_dates)].sort_values(
            'date')
예제 #5
0
    def _run(self):
        """runs for initiated PID, streams data, destripe and check bad channels"""
        assert self.pid
        self.eqcs = []
        T0 = 60 * 30
        SNAPSHOT_LABEL = "raw_ephys_bad_channels"
        output_files = list(self.output_directory.glob(f'{SNAPSHOT_LABEL}*'))
        if len(output_files) == 4:
            return output_files

        self.output_directory.mkdir(exist_ok=True, parents=True)

        if self.location != 'server':
            self.histology_status = self.get_histology_status()
            electrodes = self.get_channels('electrodeSites',
                                           f'alf/{self.pname}')

            if 'atlas_id' in electrodes.keys():
                electrodes['ibr'] = ismember(electrodes['atlas_id'],
                                             self.brain_regions.id)[1]
                electrodes['acronym'] = self.brain_regions.acronym[
                    electrodes['ibr']]
                electrodes['name'] = self.brain_regions.name[electrodes['ibr']]
                electrodes['title'] = self.histology_status
            else:
                electrodes = None

            sr, t0 = stream(self.pid, T0, nsecs=1, one=self.one)
            raw = sr[:, :-sr.nsync].T
        else:
            electrodes = None
            ap_file = next(
                self.session_path.joinpath('raw_ephys_data',
                                           self.pname).glob('*ap.*bin'), None)
            if ap_file is not None:
                sr = spikeglx.Reader(ap_file)
                raw = sr[int((sr.fs * T0)):int((sr.fs *
                                                (T0 + 1))), :-sr.nsync].T
            else:
                return []

        channel_labels, channel_features = voltage.detect_bad_channels(
            raw, sr.fs)
        _, eqcs, output_files = ephys_bad_channels(
            raw=raw,
            fs=sr.fs,
            channel_labels=channel_labels,
            channel_features=channel_features,
            channels=electrodes,
            title=SNAPSHOT_LABEL,
            destripe=True,
            save_dir=self.output_directory,
            br=self.brain_regions,
            pid_info=self.pid_label)
        self.eqcs = eqcs
        return output_files
예제 #6
0
 def remap(self, region_ids, source_map='Allen', target_map='Beryl'):
     """
     Remap atlas regions ids from source map to target map
     :param region_ids: atlas ids to map
     :param source_map: map name which original region_ids are in
     :param target_map: map name onto which to map
     :return:
     """
     _, inds = ismember(region_ids, self.id[self.mappings[source_map]])
     return self.id[self.mappings[target_map][inds]]
예제 #7
0
def prepare_lr_data(acronyms_lh, values_lh, acronyms_rh, values_rh):
    """
    Prepare data in format needed for plotting when providing different region values per hemisphere

    :param acronyms_lh: array of acronyms on left hemisphere
    :param values_lh: values for each acronym on left hemisphere
    :param acronyms_rh: array of acronyms on right hemisphere
    :param values_rh: values for each acronym on left hemisphere
    :return: combined acronyms and two column array of values
    """

    acronyms = np.unique(np.r_[acronyms_lh, acronyms_rh])
    values = np.nan * np.ones((acronyms.shape[0], 2))
    _, l_idx = ismember(acronyms_lh, acronyms)
    _, r_idx = ismember(acronyms_rh, acronyms)
    values[l_idx, 0] = values_lh
    values[r_idx, 1] = values_rh

    return acronyms, values
예제 #8
0
 def test_reorder_data(self):
     acronyms = np.array(['AUDp1', 'AUDpo1', 'AUDv1', 'SSp-m1', 'SSp-n1'])
     values = np.array([0, 1, 2, 3, 4])
     _, idx = ismember(acronyms, self.brs.acronym)
     expected_acronyms = acronyms[np.argsort(self.brs.order[idx])]
     expected_values = values[np.argsort(self.brs.order[idx])]
     values = np.array([0, 1, 2, 3, 4])
     acronnyms_ordered, values_ordered = reorder_data(acronyms, values)
     assert np.array_equal(acronnyms_ordered, expected_acronyms)
     assert np.array_equal(values_ordered, expected_values)
예제 #9
0
def test_clusters_metrics():
    np.random.seed(54)
    rec_length = 1000
    frs = np.array([3, 100, 80, 40])  # firing rates
    cid = [0, 1, 3, 4]  # here we make sure one of the clusters has no spike
    t, a, c = multiple_spike_trains(firing_rates=frs, rec_len_secs=rec_length, cluster_ids=cid)
    d = np.sin(2 * np.pi * c / rec_length * t) * 100  # sinusoidal shift where cluster id drives f

    def _assertions(dfm, idf, target_cid):
        # dfm: qc dataframe, idf: indices of existing clusters in dfm, cid: cluster ids
        assert np.allclose(dfm['amp_median'][idf] / np.exp(5.5) * 1e6, 1, rtol=1.1)
        assert np.allclose(dfm['amp_std_dB'][idf] / 20 * np.log10(np.exp(0.5)), 1, rtol=1.1)
        assert np.allclose(dfm['drift'][idf], np.array(cid) * 100 * 4 * 3.6, rtol=1.1)
        assert np.allclose(dfm['firing_rate'][idf], frs, rtol=1.1)
        assert np.allclose(dfm['cluster_id'], target_cid)

    # check with missing clusters
    dfm = quick_unit_metrics(c, t, a, d, cluster_ids=np.arange(5), tbounds=[100, 900])
    idf, _ = ismember(np.arange(5), cid)
    _assertions(dfm, idf, np.arange(5))
예제 #10
0
def plot_scalar_on_barplot(acronyms,
                           values,
                           errors=None,
                           order=True,
                           ylim=None,
                           ax=None,
                           brain_regions=None):
    br = brain_regions or BrainRegions()

    if order:
        acronyms, values = reorder_data(acronyms, values, brain_regions)

    _, idx = ismember(acronyms, br.acronym)
    colours = br.rgb[idx]

    if ax:
        fig = ax.get_figure()
    else:
        fig, ax = plt.subplots()

    ax.bar(np.arange(acronyms.size), values, color=colours)

    return fig, ax
예제 #11
0
def reorder_data(acronyms, values, brain_regions=None):
    """
    Reorder list of acronyms and values to match the Allen ordering
    :param acronyms: array of acronyms
    :param values: array of values
    :param brain_regions: BrainRegions object
    :return: ordered array of acronyms and values
    """

    br = brain_regions or BrainRegions()
    atlas_id = br.acronym2id(acronyms, hemisphere='right')
    all_ids = br.id[br.order][:br.n_lr + 1]
    ordered_ids = np.zeros_like(all_ids) * np.nan
    ordered_values = np.zeros_like(all_ids) * np.nan
    _, idx = ismember(atlas_id, all_ids)
    ordered_ids[idx] = atlas_id
    ordered_values[idx] = values

    ordered_ids = ordered_ids[~np.isnan(ordered_ids)]
    ordered_values = ordered_values[~np.isnan(ordered_values)]
    ordered_acronyms = br.id2acronym(ordered_ids)

    return ordered_acronyms, ordered_values
예제 #12
0
def quick_unit_metrics(spike_clusters,
                       spike_times,
                       spike_amps,
                       spike_depths,
                       params=METRICS_PARAMS,
                       cluster_ids=None,
                       tbounds=None):
    """
    Computes single unit metrics from only the spike times, amplitudes, and
    depths for a set of units.

    Metrics computed:
        'amp_max',
        'amp_min',
        'amp_median',
        'amp_std_dB',
        'contamination',
        'contamination_alt',
        'drift',
        'missed_spikes_est',
        'noise_cutoff',
        'presence_ratio',
        'presence_ratio_std',
        'slidingRP_viol',
        'spike_count'

    Parameters (see the METRICS_PARAMS constant)
    ----------
    spike_clusters : ndarray_like
        A vector of the unit ids for a set of spikes.
    spike_times : ndarray_like
        A vector of the timestamps for a set of spikes.
    spike_amps : ndarray_like
        A vector of the amplitudes for a set of spikes.
    spike_depths : ndarray_like
        A vector of the depths for a set of spikes.
    clusters_id: (optional) lists of cluster ids. If not all clusters are represented in the
    spikes_clusters (ie. cluster has no spike), this will ensure the output size is consistent
    with the input arrays.
    tbounds: (optional) list or 2 elements array containing a time-selection to perform the
     metrics computation on.
    params : dict (optional)
        Parameters used for computing some of the metrics in the function:
            'presence_window': float
                The time window (in s) used to look for spikes when computing the presence ratio.
            'refractory_period': float
                The refractory period used when computing isi violations and the contamination
                estimate.
            'min_isi': float
                The minimum interspike-interval (in s) for counting duplicate spikes when computing
                the contamination estimate.
            'spks_per_bin_for_missed_spks_est': int
                The number of spikes per bin used to compute the spike amplitude pdf for a unit,
                when computing the missed spikes estimate.
            'std_smoothing_kernel_for_missed_spks_est': float
                The standard deviation for the gaussian kernel used to compute the spike amplitude
                pdf for a unit, when computing the missed spikes estimate.
            'min_num_bins_for_missed_spks_est': int
                The minimum number of bins used to compute the spike amplitude pdf for a unit,
                when computing the missed spikes estimate.

    Returns
    -------
    r : bunch
        A bunch whose keys are the computed spike metrics.

    Notes
    -----
    This function is called by `ephysqc.unit_metrics_ks2` which is called by `spikes.ks2_to_alf`
    during alf extraction of an ephys dataset in the ibl ephys extraction pipeline.

    Examples
    --------
    1) Compute quick metrics from a ks2 output directory:
        >>> from ibllib.ephys.ephysqc import phy_model_from_ks2_path
        >>> m = phy_model_from_ks2_path(path_to_ks2_out)
        >>> cluster_ids = m.spike_clusters
        >>> ts = m.spike_times
        >>> amps = m.amplitudes
        >>> depths = m.depths
        >>> r = bb.metrics.quick_unit_metrics(cluster_ids, ts, amps, depths)
    """
    metrics_list = [
        'cluster_id', 'amp_max', 'amp_min', 'amp_median', 'amp_std_dB',
        'contamination', 'contamination_alt', 'drift', 'missed_spikes_est',
        'noise_cutoff', 'presence_ratio', 'presence_ratio_std',
        'slidingRP_viol', 'spike_count'
    ]
    if tbounds:
        ispi = between_sorted(spike_times, tbounds)
        spike_times = spike_times[ispi]
        spike_clusters = spike_clusters[ispi]
        spike_amps = spike_amps[ispi]
        spike_depths = spike_depths[ispi]

    if cluster_ids is None:
        cluster_ids = np.unique(spike_clusters)
    nclust = cluster_ids.size

    r = Bunch({k: np.full((nclust, ), np.nan) for k in metrics_list})
    r['cluster_id'] = cluster_ids

    # vectorized computation of basic metrics such as presence ratio and firing rate
    tmin = spike_times[0]
    tmax = spike_times[-1]
    presence_ratio = bincount2D(spike_times,
                                spike_clusters,
                                xbin=params['presence_window'],
                                ybin=cluster_ids,
                                xlim=[tmin, tmax])[0]
    r.presence_ratio = np.sum(presence_ratio > 0,
                              axis=1) / presence_ratio.shape[1]
    r.presence_ratio_std = np.std(presence_ratio, axis=1)
    r.spike_count = np.sum(presence_ratio, axis=1)
    r.firing_rate = r.spike_count / (tmax - tmin)

    # computing amplitude statistical indicators by aggregating over cluster id
    camp = pd.DataFrame(np.c_[spike_amps, 20 * np.log10(spike_amps),
                              spike_clusters],
                        columns=['amps', 'log_amps', 'clusters'])
    camp = camp.groupby('clusters')
    ir, ib = ismember(r.cluster_id, camp.clusters.unique())
    r.amp_min[ir] = np.array(camp['amps'].min())
    r.amp_max[ir] = np.array(camp['amps'].max())
    # this is the geometric median
    r.amp_median[ir] = np.array(10**(camp['log_amps'].median() / 20))
    r.amp_std_dB[ir] = np.array(camp['log_amps'].std())

    # loop over each cluster to compute the rest of the metrics
    for ic in np.arange(nclust):
        # slice the spike_times array
        ispikes = spike_clusters == cluster_ids[ic]
        if np.all(~ispikes):  # if this cluster has no spikes, continue
            continue
        ts = spike_times[ispikes]
        amps = spike_amps[ispikes]
        depths = spike_depths[ispikes]

        # compute metrics
        r.contamination_alt[ic] = contamination_alt(
            ts, rp=params['refractory_period'])
        r.contamination[ic], _ = contamination(ts,
                                               tmin,
                                               tmax,
                                               rp=params['refractory_period'],
                                               min_isi=params['min_isi'])
        r.slidingRP_viol[ic] = slidingRP_viol(
            ts,
            bin_size=params['bin_size'],
            thresh=params['RPslide_thresh'],
            acceptThresh=params['acceptable_contamination'])
        r.noise_cutoff[ic] = noise_cutoff(
            amps,
            quartile_length=params['nc_quartile_length'],
            n_bins=params['nc_bins'],
            n_low_bins=params['nc_n_low_bins'])
        r.missed_spikes_est[ic], _, _ = missed_spikes_est(
            amps,
            spks_per_bin=params['spks_per_bin_for_missed_spks_est'],
            sigma=params['std_smoothing_kernel_for_missed_spks_est'],
            min_num_bins=params['min_num_bins_for_missed_spks_est'])

        # wonder if there is a need to low-cut this
        r.drift[ic] = np.sum(np.abs(np.diff(depths))) / (tmax - tmin) * 3600

    r.label = compute_labels(r)
    return r
예제 #13
0
def plot_swanson(acronyms=None,
                 values=None,
                 ax=None,
                 hemisphere=None,
                 br=None,
                 orientation='landscape',
                 annotate=False,
                 **kwargs):
    """
    Displays the 2D image corresponding to the swanson flatmap.
    This case is different from the others in the sense that only a region maps to another regions, there
    is no correspondency from the spatial 3D coordinates.
    :param acronyms:
    :param values:
    :param hemisphere: hemisphere to display, options are 'left', 'right', 'both' or 'mirror'
    :param br: ibllib.atlas.BrainRegions object
    :param ax: matplotlib axis object to plot onto
    :param orientation: 'landscape' (default) or 'portrait'
    :param annotate: (False) if True, labels regions with acronyms
    :param kwargs: arguments for imshow
    :return:
    """
    mapping = 'Swanson'
    br = BrainRegions() if br is None else br
    s2a = swanson()
    # both hemishpere
    if hemisphere == 'both':
        _s2a = s2a + np.sum(br.id > 0)
        _s2a[s2a == 0] = 0
        _s2a[s2a == 1] = 1
        s2a = np.r_[s2a, np.flipud(_s2a)]
        mapping = 'Swanson-lr'
    elif hemisphere == 'mirror':
        s2a = np.r_[s2a, np.flipud(s2a)]
    if orientation == 'portrait':
        s2a = np.transpose(s2a)
    if acronyms is None:
        regions = br.mappings[mapping][s2a]
        im = br.rgba[regions]
    else:
        user_aids = br.parse_acronyms_argument(acronyms)
        # if the user provided inputs are higher level than swanson propagate down
        swaids = br.id[np.unique(s2a)]
        maids = np.setdiff1d(user_aids,
                             swaids)  # those are the indices not in Swanson
        for i, maid in enumerate(maids):
            if maid <= 1:
                continue
            childs_in_sw = np.intersect1d(
                br.descendants(maid)['id'][1:], swaids)
            if childs_in_sw.size > 0:
                user_aids = np.r_[user_aids, childs_in_sw]
                values = np.r_[values, values[i] + childs_in_sw * 0]
        # the user may have input non-unique regions
        df = pd.DataFrame(dict(aid=user_aids,
                               value=values)).groupby('aid').mean()
        aids, vals = (df.index.values, df['value'].values)
        # apply mapping and perform another round of aggregation
        _, _, ibr = np.intersect1d(aids, br.id, return_indices=True)
        ibr = br.mappings['Swanson-lr'][ibr]
        df = pd.DataFrame(dict(ibr=ibr, value=vals)).groupby('ibr').mean()
        ibr, vals = (df.index.values, df['value'].values)
        # we now have the mapped regions and aggregated values, map values onto swanson map
        iswan, iv = ismember(s2a, ibr)
        im = np.zeros_like(s2a, dtype=np.float32)
        im[iswan] = vals[iv]
        im[~iswan] = np.nan
    if not ax:
        ax = plt.gca()
        ax.set_axis_off()  # unless provided we don't need scales here
    ax.imshow(im, **kwargs)
    # overlay the boundaries if value plot
    imb = np.zeros((*s2a.shape[:2], 4), dtype=np.uint8)
    imb[s2a == 0] = 255
    # imb[s2a == 1] = np.array([167, 169, 172, 255])
    imb[s2a == 1] = np.array([0, 0, 0, 255])
    ax.imshow(imb)
    if annotate:
        annotate_swanson(ax=ax, orientation=orientation, br=br)

    # provides the mean to see the region on axis
    def format_coord(x, y):
        acronym = br.acronym[s2a[int(y), int(x)]]
        return f'x={x:1.4f}, y={x:1.4f}, {acronym}'

    ax.format_coord = format_coord
    return ax
예제 #14
0
    def __init__(self,
                 res_um=25,
                 scaling=np.array([1, 1, 1]),
                 mock=False,
                 hist_path=None):
        """
        :param res_um: 10, 25 or 50 um
        :param scaling: scale factor along ml, ap, dv for squeeze and stretch ([1, 1, 1])
        :param mock: for testing purpose
        :param hist_path
        :return: atlas.BrainAtlas
        """

        par = one.params.get(silent=True)
        FLAT_IRON_ATLAS_REL_PATH = PurePosixPath('histology', 'ATLAS',
                                                 'Needles', 'Allen')
        LUT_VERSION = "v01"  # version 01 is the lateralized version
        regions = BrainRegions()
        xyz2dims = np.array([1, 0, 2])  # this is the c-contiguous ordering
        dims2xyz = np.array([1, 0, 2])
        # we use Bregma as the origin
        self.res_um = res_um
        ibregma = (ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / self.res_um)
        dxyz = self.res_um * 1e-6 * np.array([1, -1, -1]) * scaling
        if mock:
            image, label = [
                np.zeros((528, 456, 320), dtype=np.int16) for _ in range(2)
            ]
            label[:, :, 100:
                  105] = 1327  # lookup index for retina, id 304325711 (no id 1327)
        else:
            path_atlas = Path(par.CACHE_DIR).joinpath(FLAT_IRON_ATLAS_REL_PATH)
            file_image = hist_path or path_atlas.joinpath(
                f'average_template_{res_um}.nrrd')
            # get the image volume
            if not file_image.exists():
                _download_atlas_allen(file_image, FLAT_IRON_ATLAS_REL_PATH,
                                      par)
            # get the remapped label volume
            file_label = path_atlas.joinpath(f'annotation_{res_um}.nrrd')
            if not file_label.exists():
                _download_atlas_allen(file_label, FLAT_IRON_ATLAS_REL_PATH,
                                      par)
            file_label_remap = path_atlas.joinpath(
                f'annotation_{res_um}_lut_{LUT_VERSION}.npz')
            if not file_label_remap.exists():
                label = self._read_volume(file_label).astype(dtype=np.int32)
                _logger.info("computing brain atlas annotations lookup table")
                # lateralize atlas: for this the regions of the left hemisphere have primary
                # keys opposite to to the normal ones
                lateral = np.zeros(label.shape[xyz2dims[0]])
                lateral[int(np.floor(ibregma[0]))] = 1
                lateral = np.sign(
                    np.cumsum(lateral)[np.newaxis, :, np.newaxis] - 0.5)
                label = label * lateral.astype(np.int32)
                # the 10 um atlas is too big to fit in memory so work by chunks instead
                if res_um == 10:
                    first, ncols = (0, 10)
                    while True:
                        last = np.minimum(first + ncols, label.shape[-1])
                        _logger.info(
                            f"Computing... {last} on {label.shape[-1]}")
                        _, im = ismember(label[:, :, first:last], regions.id)
                        label[:, :, first:last] = np.reshape(
                            im, label[:, :, first:last].shape)
                        if last == label.shape[-1]:
                            break
                        first += ncols
                    label = label.astype(dtype=np.uint16)
                    _logger.info("Saving npz, this can take a long time")
                else:
                    _, im = ismember(label, regions.id)
                    label = np.reshape(im.astype(np.uint16), label.shape)
                np.savez_compressed(file_label_remap, label)
                _logger.info(f"Cached remapping file {file_label_remap} ...")
            # loads the files
            label = self._read_volume(file_label_remap)
            image = self._read_volume(file_image)

        super().__init__(image,
                         label,
                         dxyz,
                         regions,
                         ibregma,
                         dims2xyz=dims2xyz,
                         xyz2dims=xyz2dims)
예제 #15
0
    def _find_inds(self, values, all_values):
        if not isinstance(values, list) and not isinstance(values, np.ndarray):
            values = np.array([values])
        _, inds = ismember(np.array(values), all_values)

        return inds
예제 #16
0
def find_trial_ids(trials,
                   side='all',
                   choice='all',
                   order='trial num',
                   sort='idx',
                   contrast=(1, 0.5, 0.25, 0.125, 0.0625, 0),
                   event=None):
    """
    Finds trials that match criterion
    :param trials: trials object. Must contain attributes contrastLeft, contrastRight and
    feedbackType
    :param side: stimulus side, options are 'all', 'left' or 'right'
    :param choice: trial choice, options are 'all', 'correct' or 'incorrect'
    :param contrast: contrast of stimulus, pass in list/tuple of all contrasts that want to be
    considered e.g [1, 0.5] would only look for trials with 100 % and 50 % contrast
    :param order: how to order the trials, options are 'trial num' or 'reaction time'
    :param sort: how to sort the trials, options are 'side' (split left right trials), 'choice'
    (split correct incorrect trials), 'choice and side' (split left right and correct incorrect)
    :param event: trial event to align to (in order to remove nan trials for this event)
    :return: np.array of trial ids, list of dividers to indicate how trials are sorted
    """
    if event:
        idx = ~np.isnan(trials[event])
        nan_idx = np.where(idx)[0]
    else:
        idx = np.ones_like(trials['feedbackType'], dtype=bool)

    # Find trials that have specified contrasts
    cont = np.bitwise_or(
        ismember(trials['contrastLeft'][idx], np.array(contrast))[0],
        ismember(trials['contrastRight'][idx], np.array(contrast))[0])

    # Find different permutations of trials
    # correct right
    cor_r = np.where(
        np.bitwise_and(
            cont,
            np.bitwise_and(trials['feedbackType'][idx] == 1,
                           np.isfinite(trials['contrastRight'][idx]))))[0]
    # correct left
    cor_l = np.where(
        np.bitwise_and(
            cont,
            np.bitwise_and(trials['feedbackType'][idx] == 1,
                           np.isfinite(trials['contrastLeft'][idx]))))[0]
    # incorrect right
    incor_r = np.where(
        np.bitwise_and(
            cont,
            np.bitwise_and(trials['feedbackType'][idx] == -1,
                           np.isfinite(trials['contrastRight'][idx]))))[0]
    # incorrect left
    incor_l = np.where(
        np.bitwise_and(
            cont,
            np.bitwise_and(trials['feedbackType'][idx] == -1,
                           np.isfinite(trials['contrastLeft'][idx]))))[0]

    reaction_time = trials['response_times'][idx] - trials['goCue_times'][idx]

    def _order_by(_trials, order):
        # Returns subset of trials either ordered by trial number or by reaction time
        sorted_trials = np.sort(_trials)
        if order == 'trial num':
            return sorted_trials
        elif order == 'reaction time':
            sorted_reaction = np.argsort(reaction_time[sorted_trials])
            return sorted_trials[sorted_reaction]

    dividers = []

    # Find the trial id for all possible combinations
    if side == 'all' and choice == 'all':
        if sort == 'idx':
            trial_id = _order_by(np.r_[cor_r, cor_l, incor_r, incor_l], order)
        elif sort == 'choice':
            trial_id = np.r_[_order_by(np.r_[cor_l, cor_r], order),
                             _order_by(np.r_[incor_l, incor_r], order)]
            dividers.append(np.r_[cor_l, cor_r].shape[0])
        elif sort == 'side':
            trial_id = np.r_[_order_by(np.r_[cor_l, incor_l], order),
                             _order_by(np.r_[cor_r, incor_r], order)]
            dividers.append(np.r_[cor_l, incor_l].shape[0])
        elif sort == 'choice and side':
            trial_id = np.r_[_order_by(cor_l, order),
                             _order_by(incor_l, order),
                             _order_by(cor_r, order),
                             _order_by(incor_r, order)]
            dividers.append(cor_l.shape[0])
            dividers.append(np.r_[cor_l, incor_l].shape[0])
            dividers.append(np.r_[cor_l, incor_l, cor_r].shape[0])

    if side == 'left' and choice == 'all':
        if sort in ['idx', 'side']:
            trial_id = _order_by(np.r_[cor_l, incor_l], order)
        elif sort in ['choice', 'choice and side']:
            trial_id = np.r_[_order_by(cor_l, order),
                             _order_by(incor_l, order)]
            dividers.append(cor_l.shape[0])

    if side == 'right' and choice == 'all':
        if sort in ['idx', 'side']:
            trial_id = _order_by(np.r_[cor_r, incor_r], order)
        elif sort in ['choice', 'choice and side']:
            trial_id = np.r_[_order_by(cor_r, order),
                             _order_by(incor_r, order)]
            dividers.append(cor_r.shape[0])

    if side == 'all' and choice == 'correct':
        if sort in ['idx', 'choice']:
            trial_id = _order_by(np.r_[cor_l, cor_r], order)
        elif sort in ['side', 'choice and side']:
            trial_id = np.r_[_order_by(cor_l, order), _order_by(cor_r, order)]
            dividers.append(cor_l.shape[0])

    if side == 'all' and choice == 'incorrect':
        if sort in ['idx', 'choice']:
            trial_id = _order_by(np.r_[incor_l, incor_r], order)
        elif sort in ['side', 'choice and side']:
            trial_id = np.r_[_order_by(incor_l, order),
                             _order_by(incor_r, order)]
            dividers.append(incor_l.shape[0])

    if side == 'left' and choice == 'correct':
        trial_id = _order_by(cor_l, order)

    if side == 'left' and choice == 'incorrect':
        trial_id = _order_by(incor_l, order)

    if side == 'right' and choice == 'correct':
        trial_id = _order_by(cor_r, order)

    if side == 'right' and choice == 'incorrect':
        trial_id = _order_by(incor_r, order)

    if event:
        trial_id = nan_idx[trial_id]

    return trial_id, dividers