Exemple #1
0
    def redo(self):
        """Redo the last cluster assignment operation.

        Returns
        -------

        up : UpdateInfo instance of the changes done by this operation.

        """
        # Go forward in the stack, and retrieve the new assignment.
        item = self._undo_stack.forward()
        if item is None:
            # No redo has been performed: abort.
            return

        # NOTE: the undo_state object is only returned when undoing.
        # It represents data associated to the state
        # *before* the action. What might be more useful would be the
        # undo_state object of the next item in the list (if it exists).
        spike_ids, cluster_ids, undo_state = item
        assert spike_ids is not None

        # We apply the new assignment.
        up = self._do_assign(spike_ids, cluster_ids)
        up.history = 'redo'

        emit('cluster', self, up)
        return up
Exemple #2
0
    def undo(self):
        """Undo the last cluster assignment operation.

        Returns
        -------

        up : UpdateInfo instance of the changes done by this operation.

        """
        _, _, undo_state = self._undo_stack.back()

        # Retrieve the initial spike_cluster structure.
        spike_clusters_new = self._spike_clusters_base.copy()

        # Loop over the history (except the last item because we undo).
        for spike_ids, cluster_ids, _ in self._undo_stack:
            # We update the spike clusters accordingly.
            if spike_ids is not None:
                spike_clusters_new[spike_ids] = cluster_ids

        # What are the spikes affected by the last changes?
        changed = np.nonzero(self._spike_clusters != spike_clusters_new)[0]
        clusters_changed = spike_clusters_new[changed]

        up = self._do_assign(changed, clusters_changed)
        up.history = 'undo'
        # Add the undo_state object from the undone object.
        up.undo_state = undo_state

        emit('cluster', self, up)
        return up
Exemple #3
0
 def on_mouse_click(self, e):
     """Select a time from the amplitude view to display in the trace view."""
    # from pdb import set_trace
    # set_trace()  
     if 'Shift' in e.modifiers:
         mouse_pos = self.canvas.panzoom.window_to_ndc(e.pos)
         time = Range(NDC, self.data_bounds).apply(mouse_pos)[0][0]
         emit('select_time', self, time)
Exemple #4
0
    def merge(self, cluster_ids, to=None):
        """Merge several clusters to a new cluster.

        Parameters
        ----------

        cluster_ids : array-like
            List of clusters to merge.
        to : integer
            The id of the new cluster. By default, this is `new_cluster_id()`.

        Returns
        -------

        up : UpdateInfo instance

        """

        if not _is_array_like(cluster_ids):
            raise ValueError(
                "The first argument should be a list or an array.")

        cluster_ids = sorted(cluster_ids)
        if not set(cluster_ids) <= set(self.cluster_ids):
            raise ValueError("Some clusters do not exist.")

        # Find the new cluster number.
        if to is None:
            to = self.new_cluster_id()
        if to < self.new_cluster_id():
            raise ValueError(
                "The new cluster numbers should be higher than {0}.".format(
                    self.new_cluster_id()))

        # NOTE: we could have called self.assign() here, but we don't.
        # We circumvent self.assign() for performance reasons.
        # assign() is a relatively costly operation, whereas merging is a much
        # cheaper operation.

        # Find all spikes in the specified clusters.
        spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids)

        up = self._do_merge(spike_ids, cluster_ids, to)
        undo_state = emit('request_undo_state', self, up)

        # Add to stack.
        self._undo_stack.add((spike_ids, [to], undo_state))

        emit('cluster', self, up)
        return up
Exemple #5
0
    def assign(self, spike_ids, spike_clusters_rel=0):
        """Make new spike cluster assignments.

        Parameters
        ----------

        spike_ids : array-like
            List of spike ids.
        spike_clusters_rel : array-like
            Relative cluster ids of the spikes in `spike_ids`. This
            must have the same size as `spike_ids`.

        Returns
        -------

        up : UpdateInfo instance

        Note
        ----

        `spike_clusters_rel` contain *relative* cluster indices. Their values
        don't matter: what matters is whether two give spikes
        should end up in the same cluster or not. Adding a constant number
        to all elements in `spike_clusters_rel` results in exactly the same
        operation.

        The final cluster ids are automatically generated by the `Clustering`
        class. This is because we must ensure that all modified clusters
        get brand new ids. The whole library is based on the assumption that
        cluster ids are unique and "disposable". Changing a cluster always
        results in a new cluster id being assigned.

        If a spike is assigned to a new cluster, then all other spikes
        belonging to the same cluster are assigned to a brand new cluster,
        even if they were not changed explicitely by the `assign()` method.

        In other words, the list of spikes affected by an `assign()` is almost
        always a strict superset of the `spike_ids` parameter. The only case
        where this is not true is when whole clusters change: this is called
        a merge. It is implemented in a separate `merge()` method because it
        is logically much simpler, and faster to execute.

        """

        assert not isinstance(spike_ids, slice)

        # Ensure `spike_clusters_rel` is an array-like.
        if not hasattr(spike_clusters_rel, '__len__'):
            spike_clusters_rel = spike_clusters_rel * np.ones(len(spike_ids),
                                                              dtype=np.int64)

        spike_ids = _as_array(spike_ids)
        if len(spike_ids) == 0:
            return UpdateInfo()
        assert len(spike_ids) == len(spike_clusters_rel)
        assert spike_ids.min() >= 0
        assert spike_ids.max() < self._n_spikes, "Some spikes don't exist."

        # Normalize the spike-cluster assignment such that
        # there are only new or dead clusters, not modified clusters.
        # This implies that spikes not explicitly selected, but that
        # belong to clusters affected by the operation, will be assigned
        # to brand new clusters.
        spike_ids, cluster_ids = _extend_assignment(spike_ids,
                                                    self._spike_clusters,
                                                    spike_clusters_rel,
                                                    self.new_cluster_id())

        up = self._do_assign(spike_ids, cluster_ids)
        undo_state = emit('request_undo_state', self, up)

        # Add the assignment to the undo stack.
        self._undo_stack.add((spike_ids, cluster_ids, undo_state))

        emit('cluster', self, up)
        return up