Exemplo n.º 1
0
def test_recompute_correlation():
    l, c = load()

    clusters_unique = l.get_clusters_unique()

    # Select three clusters
    clusters_selected = [2, 4, 6]
    spikes = l.get_spikes(clusters=clusters_selected)
    # cluster_spikes = l.get_clusters(clusters=clusters_selected)
    # Select half of the spikes in these clusters.
    spikes_sample = spikes[::2]

    # Get the correlation matrix parameters.
    features = get_array(l.get_features('all'))
    masks = get_array(l.get_masks('all', full=True))
    clusters0 = get_array(l.get_clusters('all'))
    clusters_all = l.get_clusters_unique()

    similarity_matrix = CacheMatrix()
    correlations0 = compute_correlations(features, clusters0, masks)
    similarity_matrix.update(clusters_unique, correlations0)
    matrix0 = normalize(similarity_matrix.to_array().copy())



    # Merge these clusters.
    action, output = c.merge_clusters(clusters_selected)
    cluster_new = output['cluster_merged']

    # Compute the new matrix
    similarity_matrix.invalidate([2, 4, 6, cluster_new])
    clusters1 = get_array(l.get_clusters('all'))
    correlations1 = compute_correlations(features, clusters1, masks,#)
        [cluster_new])
    similarity_matrix.update([cluster_new], correlations1)
    matrix1 = normalize(similarity_matrix.to_array().copy())


    # Undo.
    assert c.can_undo()
    action, output = c.undo()


    # Compute the new matrix
    similarity_matrix.invalidate([2, 4, 6, cluster_new])
    clusters2 = get_array(l.get_clusters('all'))
    correlations2 = compute_correlations(features, clusters2, masks,)
    correlations2b = compute_correlations(features, clusters2, masks,#)
        clusters_selected)

    for (clu0, clu1) in correlations2b.keys():
        assert np.allclose(correlations2[clu0, clu1], correlations2b[clu0, clu1]), (clu0, clu1, correlations2[clu0, clu1], correlations2b[clu0, clu1])

    similarity_matrix.update(clusters_selected, correlations2b)
    matrix2 = normalize(similarity_matrix.to_array().copy())

    assert np.array_equal(clusters0, clusters2)
    # assert np.allclose(matrix0, matrix2)

    l.close()
Exemplo n.º 2
0
 def _similarity_matrix_computed(self, clusters_selected, matrix, clusters,
         cluster_groups, target_next=None):
     self.mainwindow.set_busy(computing_matrix=False)
     # spikes_slice = _get_similarity_matrix_slice(
         # self.loader.nspikes, 
         # len(self.loader.get_clusters_unique()))
     # clusters_now = self.loader.get_clusters(
         # spikes=self.loader.background_spikes)
     # if not np.array_equal(clusters, clusters_now):
         # return False
     if len(matrix) == 0:
         return []
     self.statscache.similarity_matrix.update(clusters_selected, matrix)
     self.statscache.similarity_matrix_normalized = normalize(
         self.statscache.similarity_matrix.to_array(copy=True))
     # Update the cluster view with cluster quality.
     quality = np.diag(self.statscache.similarity_matrix_normalized).copy()
     self.statscache.cluster_quality = pd.Series(
         quality,
         index=self.statscache.similarity_matrix.indices,
         )
     self.get_view('ClusterView').set_quality(
         self.statscache.cluster_quality)
     return [('_wizard_update', (target_next,)),
             ('_update_similarity_matrix_view',),
             ]
Exemplo n.º 3
0
 def _similarity_matrix_computed(self,
                                 clusters_selected,
                                 matrix,
                                 clusters,
                                 cluster_groups,
                                 target_next=None):
     self.mainwindow.set_busy(computing_matrix=False)
     # spikes_slice = _get_similarity_matrix_slice(
     # self.loader.nspikes,
     # len(self.loader.get_clusters_unique()))
     clusters_now = self.loader.get_clusters(
         spikes=self.loader.background_spikes)
     if not np.array_equal(clusters, clusters_now):
         return False
     self.statscache.similarity_matrix.update(clusters_selected, matrix)
     self.statscache.similarity_matrix_normalized = normalize(
         self.statscache.similarity_matrix.to_array(copy=True))
     # Update the cluster view with cluster quality.
     quality = np.diag(self.statscache.similarity_matrix_normalized)
     self.statscache.cluster_quality = pd.Series(
         quality,
         index=self.statscache.similarity_matrix.indices,
     )
     self.get_view('ClusterView').set_quality(
         self.statscache.cluster_quality)
     return [
         ('_wizard_update', (target_next, )),
         ('_update_similarity_matrix_view', ),
     ]
Exemplo n.º 4
0
def test_recompute_correlation():
    l, c = load()

    clusters_unique = l.get_clusters_unique()

    # Select three clusters
    clusters_selected = [2, 4, 6]
    spikes = l.get_spikes(clusters=clusters_selected)
    # cluster_spikes = l.get_clusters(clusters=clusters_selected)
    # Select half of the spikes in these clusters.
    spikes_sample = spikes[::2]

    # Get the correlation matrix parameters.
    features = get_array(l.get_features('all'))
    masks = get_array(l.get_masks('all', full=True))
    clusters0 = get_array(l.get_clusters('all'))
    clusters_all = l.get_clusters_unique()

    similarity_matrix = CacheMatrix()
    correlations0 = compute_correlations(features, clusters0, masks)
    similarity_matrix.update(clusters_unique, correlations0)
    matrix0 = normalize(similarity_matrix.to_array().copy())

    # Merge these clusters.
    action, output = c.merge_clusters(clusters_selected)
    cluster_new = output['cluster_merged']

    # Compute the new matrix
    similarity_matrix.invalidate([2, 4, 6, cluster_new])
    clusters1 = get_array(l.get_clusters('all'))
    correlations1 = compute_correlations(
        features,
        clusters1,
        masks,  #)
        [cluster_new])
    similarity_matrix.update([cluster_new], correlations1)
    matrix1 = normalize(similarity_matrix.to_array().copy())

    # Undo.
    assert c.can_undo()
    action, output = c.undo()

    # Compute the new matrix
    similarity_matrix.invalidate([2, 4, 6, cluster_new])
    clusters2 = get_array(l.get_clusters('all'))
    correlations2 = compute_correlations(
        features,
        clusters2,
        masks,
    )
    correlations2b = compute_correlations(
        features,
        clusters2,
        masks,  #)
        clusters_selected)

    for (clu0, clu1) in correlations2b.keys():
        assert np.allclose(correlations2[clu0, clu1],
                           correlations2b[clu0,
                                          clu1]), (clu0, clu1,
                                                   correlations2[clu0, clu1],
                                                   correlations2b[clu0, clu1])

    similarity_matrix.update(clusters_selected, correlations2b)
    matrix2 = normalize(similarity_matrix.to_array().copy())

    assert np.array_equal(clusters0, clusters2)
    assert np.allclose(matrix0, matrix2)

    l.close()