Пример #1
0
def select_spikes(cluster_ids=None,
                  max_n_spikes_per_cluster=None,
                  spikes_per_cluster=None,
                  batch_size=None,
                  ):
    """Return a selection of spikes belonging to the specified clusters."""
    assert _is_array_like(cluster_ids)
    if not len(cluster_ids):
        return np.array([], dtype=np.int64)
    if max_n_spikes_per_cluster in (None, 0):
        selection = {c: spikes_per_cluster(c) for c in cluster_ids}
    else:
        assert max_n_spikes_per_cluster > 0
        selection = {}
        n_clusters = len(cluster_ids)
        for cluster in cluster_ids:
            # Decrease the number of spikes per cluster when there
            # are more clusters.
            n = int(max_n_spikes_per_cluster * exp(-.1 * (n_clusters - 1)))
            n = max(1, n)
            spikes = spikes_per_cluster(cluster)
            # Regular subselection.
            if batch_size is None or len(spikes) <= max(batch_size, n):
                spikes = regular_subset(spikes, n_spikes_max=n)
            else:
                # Batch selections of spikes.
                spikes = get_excerpts(spikes, n // batch_size, batch_size)
            selection[cluster] = spikes
    return _flatten_per_cluster(selection)
Пример #2
0
    def merge(self, cluster_ids, to=None):
        """Merge several clusters to a new cluster.

        Parameters
        ----------

        cluster_ids : array-like
            List of clusters to merge.
        to : integer or None
            The id of the new cluster. By default, this is `new_cluster_id()`.

        Returns
        -------

        up : UpdateInfo instance

        """

        if not _is_array_like(cluster_ids):
            raise ValueError("The first argument should be a list or "
                             "an array.")

        cluster_ids = sorted(cluster_ids)
        if not set(cluster_ids) <= set(self.cluster_ids):
            raise ValueError("Some clusters do not exist.")

        # Find the new cluster number.
        if to is None:
            to = self.new_cluster_id()
        if to < self.new_cluster_id():
            raise ValueError("The new cluster numbers should be higher than "
                             "{0}.".format(self.new_cluster_id()))

        # NOTE: we could have called self.assign() here, but we don't.
        # We circumvent self.assign() for performance reasons.
        # assign() is a relatively costly operation, whereas merging is a much
        # cheaper operation.

        # Find all spikes in the specified clusters.
        spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids)

        up = self._do_merge(spike_ids, cluster_ids, to)
        undo_state = self.emit('request_undo_state', up)

        # Add to stack.
        self._undo_stack.add((spike_ids, [to], undo_state))

        self.emit('cluster', up)
        return up
Пример #3
0
    def merge(self, cluster_ids, to=None):
        """Merge several clusters to a new cluster.

        Parameters
        ----------

        cluster_ids : array-like
            List of clusters to merge.
        to : integer or None
            The id of the new cluster. By default, this is `new_cluster_id()`.

        Returns
        -------

        up : UpdateInfo instance

        """

        if not _is_array_like(cluster_ids):
            raise ValueError("The first argument should be a list or "
                             "an array.")

        cluster_ids = sorted(cluster_ids)
        if not set(cluster_ids) <= set(self.cluster_ids):
            raise ValueError("Some clusters do not exist.")

        # Find the new cluster number.
        if to is None:
            to = self.new_cluster_id()
        if to < self.new_cluster_id():
            raise ValueError("The new cluster numbers should be higher than "
                             "{0}.".format(self.new_cluster_id()))

        # NOTE: we could have called self.assign() here, but we don't.
        # We circumvent self.assign() for performance reasons.
        # assign() is a relatively costly operation, whereas merging is a much
        # cheaper operation.

        # Find all spikes in the specified clusters.
        spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids)

        up = self._do_merge(spike_ids, cluster_ids, to)
        undo_state = self.emit('request_undo_state', up)

        # Add to stack.
        self._undo_stack.add((spike_ids, [to], undo_state))

        self.emit('cluster', up)
        return up
Пример #4
0
def select_spikes(cluster_ids=None,
                  max_n_spikes_per_cluster=None,
                  spikes_per_cluster=None):
    """Return a selection of spikes belonging to the specified clusters."""
    assert _is_array_like(cluster_ids)
    if not len(cluster_ids):
        return np.array([], dtype=np.int64)
    if max_n_spikes_per_cluster in (None, 0):
        selection = {c: spikes_per_cluster(c) for c in cluster_ids}
    else:
        assert max_n_spikes_per_cluster > 0
        selection = {}
        n_clusters = len(cluster_ids)
        for cluster in cluster_ids:
            # Decrease the number of spikes per cluster when there
            # are more clusters.
            n = int(max_n_spikes_per_cluster * exp(-.1 * (n_clusters - 1)))
            n = max(1, n)
            spikes = spikes_per_cluster(cluster)
            selection[cluster] = regular_subset(spikes, n_spikes_max=n)
    return _flatten_per_cluster(selection)
Пример #5
0
def select_spikes(cluster_ids=None,
                  max_n_spikes_per_cluster=None,
                  spikes_per_cluster=None):
    """Return a selection of spikes belonging to the specified clusters."""
    assert _is_array_like(cluster_ids)
    if not len(cluster_ids):
        return np.array([], dtype=np.int64)
    if max_n_spikes_per_cluster in (None, 0):
        selection = {c: spikes_per_cluster(c) for c in cluster_ids}
    else:
        assert max_n_spikes_per_cluster > 0
        selection = {}
        n_clusters = len(cluster_ids)
        for cluster in cluster_ids:
            # Decrease the number of spikes per cluster when there
            # are more clusters.
            n = int(max_n_spikes_per_cluster * exp(-.1 * (n_clusters - 1)))
            n = max(1, n)
            spikes = spikes_per_cluster(cluster)
            selection[cluster] = regular_subset(spikes, n_spikes_max=n)
    return _flatten_per_cluster(selection)
Пример #6
0
def select_spikes(cluster_ids=None,
                  max_n_spikes_per_cluster=None,
                  spikes_per_cluster=None,
                  batch_size=None,
                  subset=None,
                  ):
    """Return a selection of spikes belonging to the specified clusters."""
    subset = subset or 'regular'
    assert _is_array_like(cluster_ids)
    if not len(cluster_ids):
        return np.array([], dtype=np.int64)
    if max_n_spikes_per_cluster in (None, 0):
        selection = {c: spikes_per_cluster(c) for c in cluster_ids}
    else:
        assert max_n_spikes_per_cluster > 0
        selection = {}
        n_clusters = len(cluster_ids)
        for cluster in cluster_ids:
            # Decrease the number of spikes per cluster when there
            # are more clusters.
            n = int(max_n_spikes_per_cluster * exp(-.1 * (n_clusters - 1)))
            n = max(1, n)
            spike_ids = spikes_per_cluster(cluster)
            if subset == 'regular':
                # Regular subselection.
                if batch_size is None or len(spike_ids) <= max(batch_size, n):
                    spike_ids = regular_subset(spike_ids, n_spikes_max=n)
                else:
                    # Batch selections of spikes.
                    spike_ids = get_excerpts(spike_ids,
                                             n // batch_size,
                                             batch_size)
            elif subset == 'random' and len(spike_ids) > n:
                # Random subselection.
                spike_ids = np.random.choice(spike_ids, n, replace=False)
                spike_ids = np.unique(spike_ids)
            selection[cluster] = spike_ids
    return _flatten_per_cluster(selection)