def get_correlogramsview_data(loader, statscache): clusters_selected0 = loader.get_clusters_selected() # Subset of selected clusters if there are too many clusters. max_nclusters = USERPREF['correlograms_max_nclusters'] if len(clusters_selected0) < max_nclusters: clusters_selected = clusters_selected0 else: clusters_selected = clusters_selected0[:max_nclusters] correlograms = statscache.correlograms.submatrix( clusters_selected) # Compute the baselines. sizes = get_array(select(loader.get_cluster_sizes(), clusters_selected)) colors = select(loader.get_cluster_colors(), clusters_selected) corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = corrbin * ncorrbins baselines = get_baselines(sizes, duration, corrbin) data = dict( correlograms=correlograms, baselines=baselines, clusters_selected=clusters_selected, cluster_colors=colors, ncorrbins=ncorrbins, corrbin=corrbin, ) return data
def get_correlogramsview_data(loader, statscache): clusters_selected0 = loader.get_clusters_selected() # Subset of selected clusters if there are too many clusters. max_nclusters = USERPREF['correlograms_max_nclusters'] if len(clusters_selected0) < max_nclusters: clusters_selected = clusters_selected0 else: clusters_selected = clusters_selected0[:max_nclusters] correlograms = statscache.correlograms.submatrix(clusters_selected) # Compute the baselines. sizes = get_array(select(loader.get_cluster_sizes(), clusters_selected)) colors = select(loader.get_cluster_colors(), clusters_selected) corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = corrbin * ncorrbins baselines = get_baselines(sizes, duration, corrbin) data = dict( correlograms=correlograms, baselines=baselines, clusters_selected=clusters_selected, cluster_colors=colors, ncorrbins=ncorrbins, corrbin=corrbin, ) return data
def test_klusters_loader_2(): # Open the mock data. dir = TEST_FOLDER xmlfile = os.path.join(dir, 'test.xml') l = KlustersLoader(filename=xmlfile) # Get full data sets. features = l.get_features() masks = l.get_masks() waveforms = l.get_waveforms() clusters = l.get_clusters() spiketimes = l.get_spiketimes() nclusters = len(Counter(clusters)) probe = l.get_probe() cluster_colors = l.get_cluster_colors() cluster_groups = l.get_cluster_groups() group_colors = l.get_group_colors() group_names = l.get_group_names() cluster_sizes = l.get_cluster_sizes() # Check selection. # ---------------- index = nspikes / 2 waveform = select(waveforms, index) cluster = clusters[index] spikes_in_cluster = np.nonzero(clusters == cluster)[0] nspikes_in_cluster = len(spikes_in_cluster) l.select(clusters=[cluster]) # Check the size of the selected data. # ------------------------------------ assert check_shape(l.get_features(), (nspikes_in_cluster, nchannels * fetdim + 1)) assert check_shape(l.get_masks(full=True), (nspikes_in_cluster, nchannels * fetdim + 1)) assert check_shape(l.get_waveforms(), (nspikes_in_cluster, nsamples, nchannels)) assert check_shape(l.get_clusters(), (nspikes_in_cluster,)) assert check_shape(l.get_spiketimes(), (nspikes_in_cluster,)) # Check waveform sub selection. # ----------------------------- waveforms_selected = l.get_waveforms() assert np.array_equal(get_array(select(waveforms_selected, index)), get_array(waveform)) l.close()
def test_klusters_loader_2(): # Open the mock data. dir = TEST_FOLDER xmlfile = os.path.join(dir, 'test.xml') l = KlustersLoader(filename=xmlfile) # Get full data sets. features = l.get_features() masks = l.get_masks() waveforms = l.get_waveforms() clusters = l.get_clusters() spiketimes = l.get_spiketimes() nclusters = len(Counter(clusters)) probe = l.get_probe() cluster_colors = l.get_cluster_colors() cluster_groups = l.get_cluster_groups() group_colors = l.get_group_colors() group_names = l.get_group_names() cluster_sizes = l.get_cluster_sizes() # Check selection. # ---------------- index = nspikes / 2 waveform = select(waveforms, index) cluster = clusters[index] spikes_in_cluster = np.nonzero(clusters == cluster)[0] nspikes_in_cluster = len(spikes_in_cluster) l.select(clusters=[cluster]) # Check the size of the selected data. # ------------------------------------ assert check_shape(l.get_features(), (nspikes_in_cluster, nchannels * fetdim + 1)) assert check_shape(l.get_masks(full=True), (nspikes_in_cluster, nchannels * fetdim + 1)) assert check_shape(l.get_waveforms(), (nspikes_in_cluster, nsamples, nchannels)) assert check_shape(l.get_clusters(), (nspikes_in_cluster, )) assert check_shape(l.get_spiketimes(), (nspikes_in_cluster, )) # Check waveform sub selection. # ----------------------------- waveforms_selected = l.get_waveforms() assert np.array_equal(get_array(select(waveforms_selected, index)), get_array(waveform)) l.close()
def test_renumber_clusters(): # Create clusters. clusters = np.random.randint(size=20, low=10, high=100) clusters_unique = np.unique(clusters) n = len(clusters_unique) # Create cluster info. cluster_info = np.zeros((n, 3), dtype=np.int32) cluster_info[:, 0] = clusters_unique cluster_info[:, 1] = np.mod(np.arange(n, dtype=np.int32), 35) + 1 # Set groups. k = n // 3 cluster_info[:k, 2] = 1 cluster_info[k:2 * n // 3, 2] = 0 cluster_info[2 * k:, 2] = 2 cluster_info[n // 2, 2] = 1 cluster_info = pd.DataFrame({ 'color': cluster_info[:, 1], 'group': cluster_info[:, 2]}, dtype=np.int32, index=cluster_info[:, 0]) # Renumber clusters_renumbered, cluster_info_renumbered = renumber_clusters(clusters, cluster_info) # Test. c0 = clusters_unique[k] # group 0 c1 = clusters_unique[0] # group 1 c2 = clusters_unique[2 * k] # group 2 cm = clusters_unique[n // 2] # group 1 c0next = clusters_unique[k + 1] c1next = clusters_unique[0 + 1] c2next = clusters_unique[2 * k + 1] # New order: # c0 ... cm-1, cm+1, ..., c2-1, c1, ..., c0-1, cm, c2, ... assert np.array_equal(clusters == c0, clusters_renumbered == 0 + 2) assert np.array_equal(clusters == c0next, clusters_renumbered == 1 + 2) assert np.array_equal(clusters == c1, clusters_renumbered == k - 1 + 2) assert np.array_equal(clusters == c1next, clusters_renumbered == k + 2) assert np.array_equal(clusters == c2, clusters_renumbered == 2 * k + 2) assert np.array_equal(clusters == c2next, clusters_renumbered == 2 * k + 1 + 2) assert np.array_equal(get_indices(cluster_info_renumbered), np.arange(n) + 2) # Increasing groups with the new numbering. assert np.all(np.diff(get_array(cluster_info_renumbered)[:,1]) >= 0) assert np.all(select(cluster_info_renumbered, 0 + 2) == select(cluster_info, c0)) assert np.all(select(cluster_info_renumbered, 1 + 2) == select(cluster_info, c0next)) assert np.all(select(cluster_info_renumbered, k - 1 + 2) == select(cluster_info, c1)) assert np.all(select(cluster_info_renumbered, k + 2) == select(cluster_info, c1next)) assert np.all(select(cluster_info_renumbered, 2 * k + 2) == select(cluster_info, c2)) assert np.all(select(cluster_info_renumbered, 2 * k + 1 + 2) == select(cluster_info, c2next))
def te_SKIP_st_renumber_clusters(): # Create clusters. clusters = np.random.randint(size=20, low=10, high=100) clusters_unique = np.unique(clusters) n = len(clusters_unique) # Create cluster info. cluster_info = np.zeros((n, 3), dtype=np.int32) cluster_info[:, 0] = clusters_unique cluster_info[:, 1] = np.mod(np.arange(n, dtype=np.int32), 35) + 1 # Set groups. k = n // 3 cluster_info[:k, 2] = 1 cluster_info[k:2 * n // 3, 2] = 0 cluster_info[2 * k:, 2] = 2 cluster_info[n // 2, 2] = 1 cluster_info = pd.DataFrame( { 'color': cluster_info[:, 1], 'group': cluster_info[:, 2] }, dtype=np.int32, index=cluster_info[:, 0]) # Renumber clusters_renumbered, cluster_info_renumbered = renumber_clusters( clusters, cluster_info) # Test. c0 = clusters_unique[k] # group 0 c1 = clusters_unique[0] # group 1 c2 = clusters_unique[2 * k] # group 2 cm = clusters_unique[n // 2] # group 1 c0next = clusters_unique[k + 1] c1next = clusters_unique[0 + 1] c2next = clusters_unique[2 * k + 1] # New order: # c0 ... cm-1, cm+1, ..., c2-1, c1, ..., c0-1, cm, c2, ... assert np.array_equal(clusters == c0, clusters_renumbered == 0 + 2) assert np.array_equal(clusters == c0next, clusters_renumbered == 1 + 2) assert np.array_equal(clusters == c1, clusters_renumbered == k - 1 + 2) assert np.array_equal(clusters == c1next, clusters_renumbered == k + 2) assert np.array_equal(clusters == c2, clusters_renumbered == 2 * k + 2) assert np.array_equal(clusters == c2next, clusters_renumbered == 2 * k + 1 + 2) assert np.array_equal(get_indices(cluster_info_renumbered), np.arange(n) + 2) # Increasing groups with the new numbering. assert np.all(np.diff(get_array(cluster_info_renumbered)[:, 1]) >= 0) assert np.all( select(cluster_info_renumbered, 0 + 2) == select(cluster_info, c0)) assert np.all( select(cluster_info_renumbered, 1 + 2) == select(cluster_info, c0next)) assert np.all( select(cluster_info_renumbered, k - 1 + 2) == select(cluster_info, c1)) assert np.all( select(cluster_info_renumbered, k + 2) == select(cluster_info, c1next)) assert np.all( select(cluster_info_renumbered, 2 * k + 2) == select(cluster_info, c2)) assert np.all( select(cluster_info_renumbered, 2 * k + 1 + 2) == select(cluster_info, c2next))