示例#1
0
def _assign_update_info(spike_ids, old_spike_clusters, new_spike_clusters):
    old_clusters = _unique(old_spike_clusters)
    new_clusters = _unique(new_spike_clusters)
    largest_old_cluster = np.bincount(old_spike_clusters).argmax()
    descendants = list(set(zip(old_spike_clusters, new_spike_clusters)))
    update_info = UpdateInfo(
        description='assign',
        spike_ids=list(spike_ids),
        spike_clusters=list(new_spike_clusters),
        added=list(new_clusters),
        deleted=list(old_clusters),
        descendants=descendants,
        largest_old_cluster=int(largest_old_cluster),
    )
    return update_info
示例#2
0
文件: clustering.py 项目: zsong30/phy
    def _do_assign(self, spike_ids, new_spike_clusters):
        """Make spike-cluster assignments after the spike selection has
        been extended to full clusters."""

        # Ensure spike_clusters has the right shape.
        spike_ids = _as_array(spike_ids)
        if len(new_spike_clusters) == 1 and len(spike_ids) > 1:
            new_spike_clusters = np.ones(
                len(spike_ids), dtype=np.int64) * new_spike_clusters[0]
        old_spike_clusters = self._spike_clusters[spike_ids]

        assert len(spike_ids) == len(old_spike_clusters)
        assert len(new_spike_clusters) == len(spike_ids)

        # Update the spikes per cluster structure.
        old_clusters = _unique(old_spike_clusters)

        # NOTE: shortcut to a merge if this assignment is effectively a merge
        # i.e. if all spikes are assigned to a single cluster.
        # The fact that spike selection has been previously extended to
        # whole clusters is critical here.
        new_clusters = _unique(new_spike_clusters)
        if len(new_clusters) == 1:
            return self._do_merge(spike_ids, old_clusters, new_clusters[0])

        # We return the UpdateInfo structure.
        up = _assign_update_info(spike_ids, old_spike_clusters,
                                 new_spike_clusters)

        # We update the new cluster id (strictly increasing during a session).
        self._new_cluster_id = max(self._new_cluster_id, max(up.added) + 1)

        # We make the assignments.
        self._spike_clusters[spike_ids] = new_spike_clusters
        # OPTIM: we update spikes_per_cluster manually.
        new_spc = _spikes_per_cluster(new_spike_clusters, spike_ids)
        self._update_cluster_ids(to_remove=old_clusters, to_add=new_spc)
        up.all_cluster_ids = list(self.cluster_ids)
        return up
示例#3
0
def _extend_spikes(spike_ids, spike_clusters):
    """Return all spikes belonging to the clusters containing the specified
    spikes."""
    # We find the spikes belonging to modified clusters.
    # What are the old clusters that are modified by the assignment?
    old_spike_clusters = spike_clusters[spike_ids]
    unique_clusters = _unique(old_spike_clusters)
    # Now we take all spikes from these clusters.
    changed_spike_ids = _spikes_in_clusters(spike_clusters, unique_clusters)
    # These are the new spikes that need to be reassigned.
    extended_spike_ids = np.setdiff1d(changed_spike_ids,
                                      spike_ids,
                                      assume_unique=True)
    return extended_spike_ids
示例#4
0
文件: ccg.py 项目: CINPLA/phylib
def firing_rate(spike_clusters,
                cluster_ids=None,
                bin_size=None,
                duration=None):
    """Compute the average number of spikes per cluster per bin."""

    # Take the cluster order into account.
    if cluster_ids is None:
        clusters = _unique(spike_clusters)
    else:
        clusters = _as_array(cluster_ids)

    # Like spike_clusters, but with 0..n_clusters-1 indices.
    spike_clusters_i = _index_of(spike_clusters, clusters)

    assert duration > 0
    assert bin_size > 0
    bc = np.bincount(spike_clusters_i)
    return bc * np.c_[bc] * (bin_size / duration)
示例#5
0
 def _update_cluster_ids(self, to_remove=None, to_add=None):
     # Update the list of non-empty cluster ids.
     self._cluster_ids = _unique(self._spike_clusters)
     # Clusters to remove.
     if to_remove is not None:
         for clu in to_remove:
             self._spikes_per_cluster.pop(clu, None)
     # Clusters to add.
     if to_add:
         for clu, spk in to_add.items():
             self._spikes_per_cluster[clu] = spk
     # If spikes_per_cluster is invalid, recompute the entire
     # spikes_per_cluster array.
     coherent = np.all(
         np.in1d(self._cluster_ids, sorted(self._spikes_per_cluster)))
     if not coherent:
         logger.debug(
             "Recompute spikes_per_cluster manually: this might take a while."
         )
         sc = self._spike_clusters
         self._spikes_per_cluster = _spikes_per_cluster(sc)
示例#6
0
文件: ccg.py 项目: cortex-lab/phylib
def firing_rate(spike_clusters,
                cluster_ids=None,
                bin_size=None,
                duration=None):
    """Compute the average number of spikes per cluster per bin."""

    # Take the cluster order into account.
    if cluster_ids is None:
        cluster_ids = _unique(spike_clusters)
    else:
        cluster_ids = _as_array(cluster_ids)

    # Like spike_clusters, but with 0..n_clusters-1 indices.
    spike_clusters_i = _index_of(spike_clusters, cluster_ids)

    assert bin_size > 0
    bc = np.bincount(spike_clusters_i)
    # Handle the case where the last cluster(s) are empty.
    if len(bc) < len(cluster_ids):
        n = len(cluster_ids) - len(bc)
        bc = np.concatenate((bc, np.zeros(n, dtype=bc.dtype)))
    assert bc.shape == (len(cluster_ids), )
    return bc * np.c_[bc] * (bin_size / (duration or 1.))
示例#7
0
    def make_depths(self):
        """Make spikes.depths.npy and clusters.depths.npy."""
        channel_positions = self.model.channel_positions
        assert channel_positions.ndim == 2

        spike_clusters = self.model.spike_clusters
        assert spike_clusters.ndim == 1
        n_spikes = spike_clusters.shape[0]
        self.cluster_ids = _unique(self.model.spike_clusters)

        cluster_channels = np.load(self.out_path / 'clusters.peakChannel.npy')
        assert cluster_channels.ndim == 1
        n_clusters = cluster_channels.shape[0]

        clusters_depths = channel_positions[cluster_channels, 1]
        assert clusters_depths.shape == (n_clusters, )

        spike_clusters_rel = _index_of(spike_clusters, self.cluster_ids)
        assert spike_clusters_rel.max() < clusters_depths.shape[0]
        spikes_depths = clusters_depths[spike_clusters_rel]
        assert spikes_depths.shape == (n_spikes, )

        self._save_npy('spikes.depths.npy', spikes_depths)
        self._save_npy('clusters.depths.npy', clusters_depths)
示例#8
0
文件: alf.py 项目: balefebvre/phylib
 def __init__(self, model):
     self.model = model
     self.dir_path = Path(model.dir_path)
     self.spc = _spikes_per_cluster(model.spike_clusters)
     self.cluster_ids = _unique(self.model.spike_clusters)
示例#9
0
文件: spikes.py 项目: eejd/ibllib
def merge_probes(ses_path):
    """
    Merge spike sorting output from 2 probes and output in the session ALF folder the combined
    output in IBL format
    :param ses_path: session containing probes to be merged
    :return: None
    """
    def _sr(ap_file):
        md = spikeglx.read_meta_data(ap_file.with_suffix('.meta'))
        return spikeglx._get_fs_from_meta(md)

    ses_path = Path(ses_path)
    out_dir = ses_path.joinpath('alf').joinpath('tmp_merge')
    ephys_files = glob_ephys_files(ses_path)
    subdirs, labels, efiles_sorted, srates = zip(
        *sorted([(ep.ap.parent, ep.label, ep, _sr(ep.ap)) for ep in ephys_files if ep.get('ap')]))

    # if there is only one file, just convert the output to IBL format et basta
    if len(subdirs) == 1:
        ks2_to_alf(subdirs[0], ses_path / 'alf')
        return
    else:
        _logger.info('converting individual spike-sorting outputs to ALF')
        for subdir, label, ef, sr in zip(subdirs, labels, efiles_sorted, srates):
            ks2alf_path = subdir / 'ks2_alf'
            if ks2alf_path.exists():
                shutil.rmtree(ks2alf_path, ignore_errors=True)
            ks2_to_alf(subdir, ks2alf_path, label=label, sr=sr, force=True)

    probe_info = [{'label': lab} for lab in labels]
    mt = merge.Merger(subdirs=subdirs, out_dir=out_dir, probe_info=probe_info).merge()
    # Create the cluster channels file, this should go in the model template as 2 methods
    tmp = mt.sparse_templates.data
    n_templates, n_samples, n_channels = tmp.shape
    template_peak_channels = np.argmax(tmp.max(axis=1) - tmp.min(axis=1), axis=1)
    cluster_probes = mt.channel_probes[template_peak_channels]
    spike_clusters_rel = _index_of(mt.spike_clusters, _unique(mt.spike_clusters))
    spike_probes = cluster_probes[spike_clusters_rel]

    # sync spikes according to the probes
    # how do you make sure they match the files:
    for ind, probe in enumerate(efiles_sorted):
        assert(labels[ind] == probe.label)  # paranoid, make sure they are sorted
        if not probe.get('ap'):
            continue
        sync_file = probe.ap.parent.joinpath(probe.ap.name.replace('.ap.', '.sync.')
                                             ).with_suffix('.npy')
        if not sync_file.exists():
            error_msg = f'No synchronisation file for {sync_file}'
            _logger.error(error_msg)
            raise FileNotFoundError(error_msg)
        sync_points = np.load(sync_file)
        fcn = interp1d(sync_points[:, 0] * srates[ind],
                       sync_points[:, 1], fill_value='extrapolate')
        mt.spike_times[spike_probes == ind] = fcn(mt.spike_samples[spike_probes == ind])

    # And convert to ALF
    ac = alf.EphysAlfCreator(mt)
    ac.convert(ses_path / 'alf', force=True)
    # remove the temporary directory
    shutil.rmtree(out_dir)
示例#10
0
文件: ccg.py 项目: CINPLA/phylib
def correlograms(
    spike_times,
    spike_clusters,
    cluster_ids=None,
    sample_rate=1.,
    bin_size=None,
    window_size=None,
    symmetrize=True,
):
    """Compute all pairwise cross-correlograms among the clusters appearing
    in `spike_clusters`.

    Parameters
    ----------

    spike_times : array-like
        Spike times in seconds.
    spike_clusters : array-like
        Spike-cluster mapping.
    cluster_ids : array-like
        The list of *all* unique clusters, in any order. That order will be used
        in the output array.
    bin_size : float
        Size of the bin, in seconds.
    window_size : float
        Size of the window, in seconds.

    Returns
    -------

    correlograms : array
        A `(n_clusters, n_clusters, winsize_samples)` array with all pairwise
        CCGs.

    """
    assert sample_rate > 0.
    assert np.all(np.diff(spike_times) >= 0), ("The spike times must be "
                                               "increasing.")

    # Get the spike samples.
    spike_times = np.asarray(spike_times, dtype=np.float64)
    spike_samples = (spike_times * sample_rate).astype(np.int64)

    spike_clusters = _as_array(spike_clusters)

    assert spike_samples.ndim == 1
    assert spike_samples.shape == spike_clusters.shape

    # Find `binsize`.
    bin_size = np.clip(bin_size, 1e-5, 1e5)  # in seconds
    binsize = int(sample_rate * bin_size)  # in samples
    assert binsize >= 1

    # Find `winsize_bins`.
    window_size = np.clip(window_size, 1e-5, 1e5)  # in seconds
    winsize_bins = 2 * int(.5 * window_size / bin_size) + 1

    assert winsize_bins >= 1
    assert winsize_bins % 2 == 1

    # Take the cluster order into account.
    if cluster_ids is None:
        clusters = _unique(spike_clusters)
    else:
        clusters = _as_array(cluster_ids)
    n_clusters = len(clusters)

    # Like spike_clusters, but with 0..n_clusters-1 indices.
    spike_clusters_i = _index_of(spike_clusters, clusters)

    # Shift between the two copies of the spike trains.
    shift = 1

    # At a given shift, the mask precises which spikes have matching spikes
    # within the correlogram time window.
    mask = np.ones_like(spike_samples, dtype=np.bool)

    correlograms = _create_correlograms_array(n_clusters, winsize_bins)

    # The loop continues as long as there is at least one spike with
    # a matching spike.
    while mask[:-shift].any():
        # Number of time samples between spike i and spike i+shift.
        spike_diff = _diff_shifted(spike_samples, shift)

        # Binarize the delays between spike i and spike i+shift.
        spike_diff_b = spike_diff // binsize

        # Spikes with no matching spikes are masked.
        mask[:-shift][spike_diff_b > (winsize_bins // 2)] = False

        # Cache the masked spike delays.
        m = mask[:-shift].copy()
        d = spike_diff_b[m]

        # # Update the masks given the clusters to update.
        # m0 = np.in1d(spike_clusters[:-shift], clusters)
        # m = m & m0
        # d = spike_diff_b[m]
        d = spike_diff_b[m]

        # Find the indices in the raveled correlograms array that need
        # to be incremented, taking into account the spike clusters.
        indices = np.ravel_multi_index(
            (spike_clusters_i[:-shift][m], spike_clusters_i[+shift:][m], d),
            correlograms.shape)

        # Increment the matching spikes in the correlograms array.
        _increment(correlograms.ravel(), indices)

        shift += 1

    # Remove ACG peaks.
    correlograms[np.arange(n_clusters), np.arange(n_clusters), 0] = 0

    if symmetrize:
        return _symmetrize_correlograms(correlograms)
    else:
        return correlograms