def test_klusters_loader_2(): # Open the mock data. dir = TEST_FOLDER xmlfile = os.path.join(dir, 'test.xml') l = KlustersLoader(filename=xmlfile) # Get full data sets. features = l.get_features() masks = l.get_masks() waveforms = l.get_waveforms() clusters = l.get_clusters() spiketimes = l.get_spiketimes() nclusters = len(Counter(clusters)) probe = l.get_probe() cluster_colors = l.get_cluster_colors() cluster_groups = l.get_cluster_groups() group_colors = l.get_group_colors() group_names = l.get_group_names() cluster_sizes = l.get_cluster_sizes() # Check selection. # ---------------- index = nspikes / 2 waveform = select(waveforms, index) cluster = clusters[index] spikes_in_cluster = np.nonzero(clusters == cluster)[0] nspikes_in_cluster = len(spikes_in_cluster) l.select(clusters=[cluster]) # Check the size of the selected data. # ------------------------------------ assert check_shape(l.get_features(), (nspikes_in_cluster, nchannels * fetdim + 1)) assert check_shape(l.get_masks(full=True), (nspikes_in_cluster, nchannels * fetdim + 1)) assert check_shape(l.get_waveforms(), (nspikes_in_cluster, nsamples, nchannels)) assert check_shape(l.get_clusters(), (nspikes_in_cluster,)) assert check_shape(l.get_spiketimes(), (nspikes_in_cluster,)) # Check waveform sub selection. # ----------------------------- waveforms_selected = l.get_waveforms() assert np.array_equal(get_array(select(waveforms_selected, index)), get_array(waveform)) l.close()
def _compute_correlograms(self, clusters_selected, wizard=None): # Get the correlograms parameters. spiketimes = get_array(self.loader.get_spiketimes("all")) sample_rate = self.loader.freq # print spiketimes.dtype # Make a copy of the array so that it does not change before the # computation of the correlograms begins. clusters = np.array(get_array(self.loader.get_clusters("all"))) # Get excerpts nexcerpts = USERPREF.get("correlograms_nexcerpts", 50) excerpt_size = USERPREF.get("correlograms_excerpt_size", 10000) spiketimes_excerpts = get_excerpts(spiketimes, nexcerpts=nexcerpts, excerpt_size=excerpt_size) clusters_excerpts = get_excerpts(clusters, nexcerpts=nexcerpts, excerpt_size=excerpt_size) # corrbin = self.loader.corrbin # ncorrbins = self.loader.ncorrbins corrbin = SETTINGS.get("correlograms.corrbin", 0.001) ncorrbins = SETTINGS.get("correlograms.ncorrbins", 101) # Ensure ncorrbins is odd. if ncorrbins % 2 == 0: ncorrbins += 1 # Get cluster indices that need to be updated. # clusters_to_update = (self.statscache.correlograms. # not_in_key_indices(clusters_selected)) clusters_to_update = clusters_selected # If there are pairs that need to be updated, launch the task. if len(clusters_to_update) > 0: # Set wait cursor. self.mainwindow.set_busy(computing_correlograms=True) # Launch the task. self.tasks.correlograms_task.compute( spiketimes_excerpts, clusters_excerpts, clusters_to_update=clusters_to_update, clusters_selected=clusters_selected, ncorrbins=ncorrbins, corrbin=corrbin, sample_rate=sample_rate, wizard=wizard, ) # Otherwise, update directly the correlograms view without launching # the task in the external process. else: # self.update_correlograms_view() return ("_update_correlograms_view", (wizard,), {})
def test_klusters_loader_2(): # Open the mock data. dir = TEST_FOLDER xmlfile = os.path.join(dir, 'test.xml') l = KlustersLoader(filename=xmlfile) # Get full data sets. features = l.get_features() masks = l.get_masks() waveforms = l.get_waveforms() clusters = l.get_clusters() spiketimes = l.get_spiketimes() nclusters = len(Counter(clusters)) probe = l.get_probe() cluster_colors = l.get_cluster_colors() cluster_groups = l.get_cluster_groups() group_colors = l.get_group_colors() group_names = l.get_group_names() cluster_sizes = l.get_cluster_sizes() # Check selection. # ---------------- index = nspikes / 2 waveform = select(waveforms, index) cluster = clusters[index] spikes_in_cluster = np.nonzero(clusters == cluster)[0] nspikes_in_cluster = len(spikes_in_cluster) l.select(clusters=[cluster]) # Check the size of the selected data. # ------------------------------------ assert check_shape(l.get_features(), (nspikes_in_cluster, nchannels * fetdim + 1)) assert check_shape(l.get_masks(full=True), (nspikes_in_cluster, nchannels * fetdim + 1)) assert check_shape(l.get_waveforms(), (nspikes_in_cluster, nsamples, nchannels)) assert check_shape(l.get_clusters(), (nspikes_in_cluster, )) assert check_shape(l.get_spiketimes(), (nspikes_in_cluster, )) # Check waveform sub selection. # ----------------------------- waveforms_selected = l.get_waveforms() assert np.array_equal(get_array(select(waveforms_selected, index)), get_array(waveform)) l.close()
def _wizard_show_pair(self, target=None, candidate=None): if target is None: target = (self.wizard.current_target(), get_array(self.loader.get_cluster_color(self.wizard.current_target()))[0]) if candidate is None: try: candidate = (self.wizard.current_candidate(), get_array(self.loader.get_cluster_color(self.wizard.current_candidate()))[0]) # HACK: this can fail because when merging clusters, the merged # cluster (candidate) is deleted, and its color does not exist # anymore. except: candidate = (self.wizard.current_candidate(), 0) [view.set_wizard_pair(target, candidate) for view in self.get_views('FeatureView')]
def get_correlogramsview_data(loader, statscache): clusters_selected0 = loader.get_clusters_selected() # Subset of selected clusters if there are too many clusters. max_nclusters = USERPREF['correlograms_max_nclusters'] if len(clusters_selected0) < max_nclusters: clusters_selected = clusters_selected0 else: clusters_selected = clusters_selected0[:max_nclusters] correlograms = statscache.correlograms.submatrix(clusters_selected) # Compute the baselines. sizes = get_array(select(loader.get_cluster_sizes(), clusters_selected)) colors = select(loader.get_cluster_colors(), clusters_selected) corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = corrbin * ncorrbins baselines = get_baselines(sizes, duration, corrbin) data = dict( correlograms=correlograms, baselines=baselines, clusters_selected=clusters_selected, cluster_colors=colors, ncorrbins=ncorrbins, corrbin=corrbin, ) return data
def get_correlogramsview_data(loader, statscache): clusters_selected0 = loader.get_clusters_selected() # Subset of selected clusters if there are too many clusters. max_nclusters = USERPREF['correlograms_max_nclusters'] if len(clusters_selected0) < max_nclusters: clusters_selected = clusters_selected0 else: clusters_selected = clusters_selected0[:max_nclusters] correlograms = statscache.correlograms.submatrix( clusters_selected) # Compute the baselines. sizes = get_array(select(loader.get_cluster_sizes(), clusters_selected)) colors = select(loader.get_cluster_colors(), clusters_selected) corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = corrbin * ncorrbins baselines = get_baselines(sizes, duration, corrbin) data = dict( correlograms=correlograms, baselines=baselines, clusters_selected=clusters_selected, cluster_colors=colors, ncorrbins=ncorrbins, corrbin=corrbin, ) return data
def _compute_correlograms(self, clusters_selected, wizard=None): # Get the correlograms parameters. spiketimes = get_array(self.loader.get_spiketimes('all')) # print spiketimes.dtype # Make a copy of the array so that it does not change before the # computation of the correlograms begins. clusters = np.array(get_array(self.loader.get_clusters('all'))) # Get excerpts nexcerpts = USERPREF.get('correlograms_nexcerpts', 100) excerpt_size = USERPREF.get('correlograms_excerpt_size', 20000) spiketimes_excerpts = get_excerpts(spiketimes, nexcerpts=nexcerpts, excerpt_size=excerpt_size) clusters_excerpts = get_excerpts(clusters, nexcerpts=nexcerpts, excerpt_size=excerpt_size) # corrbin = self.loader.corrbin # ncorrbins = self.loader.ncorrbins corrbin = SETTINGS.get('correlograms.corrbin', .001) ncorrbins = SETTINGS.get('correlograms.ncorrbins', 100) # Get cluster indices that need to be updated. clusters_to_update = ( self.statscache.correlograms.not_in_key_indices(clusters_selected)) # If there are pairs that need to be updated, launch the task. if len(clusters_to_update) > 0: # Set wait cursor. self.mainwindow.set_busy(computing_correlograms=True) # Launch the task. self.tasks.correlograms_task.compute( spiketimes_excerpts, clusters_excerpts, clusters_to_update=clusters_to_update, clusters_selected=clusters_selected, ncorrbins=ncorrbins, corrbin=corrbin, wizard=wizard) # Otherwise, update directly the correlograms view without launching # the task in the external process. else: # self.update_correlograms_view() return ('_update_correlograms_view', (wizard, ), {})
def slice_loaded(self, samples, bounds, size): self.samples = samples self.bounds = bounds self.size = size self.channel_index = np.repeat(self.channels, self.samples.shape[0] / self.nchannels) self.color_index = np.repeat(get_array(self.channel_colors), self.samples.shape[0] / self.nchannels) self.position = self.samples self.paint_manager.update_slice() self.paint_manager.updateGL()
def get_ticks_text(self, x0, y0, x1, y1): ticksx, nfracx = self.get_ticks(x0, x1) ticksy = np.linspace(-0.9, 0.9, self.parent.data_manager.nchannels) n = len(ticksx) text = [self.format_number(x, nfracx) for x in ticksx] text += [str(get_array(self.parent.data_manager.channel_names)[y]) for y in reversed(range(self.parent.data_manager.nchannels))] # position of the ticks coordinates = np.zeros((len(text), 2)) coordinates[:n, 0] = ticksx coordinates[n:, 1] = ticksy return text, coordinates, n
def load_new_slice(self, trace, slice, xlim, totalduration, duration_initial, spiketimes, channel_colors, spikes_visible, cluster_colors, spikemasks, spikeclusters, s_before, s_after): total_size = trace.shape[0] samples = trace[slice, :] # Convert the data into floating points. samples = np.array(samples, dtype=np.float32) # Normalize the data. samples *= (1. / 65535) # Size of the slice. nsamples, nchannels = samples.shape # Create the data array for the plot visual. M = np.empty((nsamples * nchannels, 2)) samples = samples.T M[:, 1] = samples.ravel() # Generate the x coordinates. x = np.arange(slice.start, slice.stop, slice.step) / float(total_size - 1) x = x * 2 * totalduration/ duration_initial - 1 M[:, 0] = np.tile(x, nchannels) bounds = np.arange(nchannels + 1) * nsamples size = bounds[-1] color_index = np.repeat(get_array(channel_colors), M.shape[0] / nchannels) color_index_spikes = np.full((nchannels, M.shape[0]/nchannels), COLORS_COUNT+1) spikestart = bisect.bisect_left(spiketimes, slice.start) spikestop = bisect.bisect_right(spiketimes, slice.stop, lo=spikestart) + 1 spikeclusters = spikeclusters[spikestart:spikestop] spikemasks = spikemasks[spikestart:spikestop] spiketimes = spiketimes[spikestart:spikestop] nds = ((spiketimes - slice.start)/slice.step).astype(int) # nearest displayed sample, rounded to integer s_before = max(int(s_before / slice.step), 2) s_after = max(int(s_after / slice.step), 2) for x in range(0, nds.shape[0]-1): color_index_spikes[spikemasks[x], max(nds[x]-s_before, 0):\ min(nds[x]+s_after, color_index_spikes.shape[1])] = cluster_colors[spikeclusters[x]] color_index_spikes = np.ravel(color_index_spikes) self.sliceLoaded.emit(M, bounds, size, slice, color_index, color_index_spikes)
def get_ticks_text(self, x0, y0, x1, y1): ticksx, nfracx = self.get_ticks(x0, x1) ticksy = np.linspace(-0.9, 0.9, self.parent.data_manager.nchannels) n = len(ticksx) text = [self.format_number(x, nfracx) for x in ticksx] text += [ str(get_array(self.parent.data_manager.channel_names)[y]) for y in reversed(range(self.parent.data_manager.nchannels)) ] # position of the ticks coordinates = np.zeros((len(text), 2)) coordinates[:n, 0] = ticksx coordinates[n:, 1] = ticksy return text, coordinates, n
def load(self, cluster_colors=None, cluster_groups=None, group_colors=None, group_names=None, cluster_sizes=None, cluster_quality=None, background={}): if group_names is None or cluster_colors is None: return # Create the tree. # go through all groups for groupidx, groupname in group_names.iteritems(): spkcount = np.sum(cluster_sizes[cluster_groups == groupidx]) groupitem = self.add_group_node( groupidx=groupidx, name=groupname, # color=group_colors[groupidx], spkcount=spkcount) color=select(group_colors, groupidx), spkcount=spkcount) # go through all clusters for clusteridx, color in cluster_colors.iteritems(): if cluster_quality is not None: try: quality = get_array(select(cluster_quality, clusteridx))[0] except IndexError: quality = 0. else: quality = 0. # add cluster bgcolor = background.get(clusteridx, None) clusteritem = self.add_cluster( clusteridx=clusteridx, # name=info.names[clusteridx], color=color, bgcolor=bgcolor, quality=quality, # spkcount=cluster_sizes[clusteridx], spkcount=select(cluster_sizes, clusteridx), # assign the group as a parent of this cluster parent=self.get_group(select(cluster_groups, clusteridx)))
def load(self, cluster_colors=None, cluster_groups=None, group_colors=None, group_names=None, cluster_sizes=None, cluster_quality=None, background={}): if group_names is None or cluster_colors is None: return # Create the tree. # go through all groups for groupidx, groupname in group_names.iteritems(): spkcount = np.sum(cluster_sizes[cluster_groups == groupidx]) groupitem = self.add_group_node(groupidx=groupidx, name=groupname, # color=group_colors[groupidx], spkcount=spkcount) color=select(group_colors, groupidx), spkcount=spkcount) # go through all clusters for clusteridx, color in cluster_colors.iteritems(): if cluster_quality is not None: try: quality = get_array(select(cluster_quality, clusteridx))[0] except IndexError: quality = 0. else: quality = 0. # add cluster bgcolor = background.get(clusteridx, None) clusteritem = self.add_cluster( clusteridx=clusteridx, # name=info.names[clusteridx], color=color, bgcolor=bgcolor, quality=quality, # spkcount=cluster_sizes[clusteridx], spkcount=select(cluster_sizes, clusteridx), # assign the group as a parent of this cluster parent=self.get_group(select(cluster_groups, clusteridx)))
def te_SKIP_st_renumber_clusters(): # Create clusters. clusters = np.random.randint(size=20, low=10, high=100) clusters_unique = np.unique(clusters) n = len(clusters_unique) # Create cluster info. cluster_info = np.zeros((n, 3), dtype=np.int32) cluster_info[:, 0] = clusters_unique cluster_info[:, 1] = np.mod(np.arange(n, dtype=np.int32), 35) + 1 # Set groups. k = n // 3 cluster_info[:k, 2] = 1 cluster_info[k:2 * n // 3, 2] = 0 cluster_info[2 * k:, 2] = 2 cluster_info[n // 2, 2] = 1 cluster_info = pd.DataFrame( { 'color': cluster_info[:, 1], 'group': cluster_info[:, 2] }, dtype=np.int32, index=cluster_info[:, 0]) # Renumber clusters_renumbered, cluster_info_renumbered = renumber_clusters( clusters, cluster_info) # Test. c0 = clusters_unique[k] # group 0 c1 = clusters_unique[0] # group 1 c2 = clusters_unique[2 * k] # group 2 cm = clusters_unique[n // 2] # group 1 c0next = clusters_unique[k + 1] c1next = clusters_unique[0 + 1] c2next = clusters_unique[2 * k + 1] # New order: # c0 ... cm-1, cm+1, ..., c2-1, c1, ..., c0-1, cm, c2, ... assert np.array_equal(clusters == c0, clusters_renumbered == 0 + 2) assert np.array_equal(clusters == c0next, clusters_renumbered == 1 + 2) assert np.array_equal(clusters == c1, clusters_renumbered == k - 1 + 2) assert np.array_equal(clusters == c1next, clusters_renumbered == k + 2) assert np.array_equal(clusters == c2, clusters_renumbered == 2 * k + 2) assert np.array_equal(clusters == c2next, clusters_renumbered == 2 * k + 1 + 2) assert np.array_equal(get_indices(cluster_info_renumbered), np.arange(n) + 2) # Increasing groups with the new numbering. assert np.all(np.diff(get_array(cluster_info_renumbered)[:, 1]) >= 0) assert np.all( select(cluster_info_renumbered, 0 + 2) == select(cluster_info, c0)) assert np.all( select(cluster_info_renumbered, 1 + 2) == select(cluster_info, c0next)) assert np.all( select(cluster_info_renumbered, k - 1 + 2) == select(cluster_info, c1)) assert np.all( select(cluster_info_renumbered, k + 2) == select(cluster_info, c1next)) assert np.all( select(cluster_info_renumbered, 2 * k + 2) == select(cluster_info, c2)) assert np.all( select(cluster_info_renumbered, 2 * k + 1 + 2) == select(cluster_info, c2next))
def test_renumber_clusters(): # Create clusters. clusters = np.random.randint(size=20, low=10, high=100) clusters_unique = np.unique(clusters) n = len(clusters_unique) # Create cluster info. cluster_info = np.zeros((n, 3), dtype=np.int32) cluster_info[:, 0] = clusters_unique cluster_info[:, 1] = np.mod(np.arange(n, dtype=np.int32), 35) + 1 # Set groups. k = n // 3 cluster_info[:k, 2] = 1 cluster_info[k:2 * n // 3, 2] = 0 cluster_info[2 * k:, 2] = 2 cluster_info[n // 2, 2] = 1 cluster_info = pd.DataFrame({ 'color': cluster_info[:, 1], 'group': cluster_info[:, 2]}, dtype=np.int32, index=cluster_info[:, 0]) # Renumber clusters_renumbered, cluster_info_renumbered = renumber_clusters(clusters, cluster_info) # Test. c0 = clusters_unique[k] # group 0 c1 = clusters_unique[0] # group 1 c2 = clusters_unique[2 * k] # group 2 cm = clusters_unique[n // 2] # group 1 c0next = clusters_unique[k + 1] c1next = clusters_unique[0 + 1] c2next = clusters_unique[2 * k + 1] # New order: # c0 ... cm-1, cm+1, ..., c2-1, c1, ..., c0-1, cm, c2, ... assert np.array_equal(clusters == c0, clusters_renumbered == 0 + 2) assert np.array_equal(clusters == c0next, clusters_renumbered == 1 + 2) assert np.array_equal(clusters == c1, clusters_renumbered == k - 1 + 2) assert np.array_equal(clusters == c1next, clusters_renumbered == k + 2) assert np.array_equal(clusters == c2, clusters_renumbered == 2 * k + 2) assert np.array_equal(clusters == c2next, clusters_renumbered == 2 * k + 1 + 2) assert np.array_equal(get_indices(cluster_info_renumbered), np.arange(n) + 2) # Increasing groups with the new numbering. assert np.all(np.diff(get_array(cluster_info_renumbered)[:,1]) >= 0) assert np.all(select(cluster_info_renumbered, 0 + 2) == select(cluster_info, c0)) assert np.all(select(cluster_info_renumbered, 1 + 2) == select(cluster_info, c0next)) assert np.all(select(cluster_info_renumbered, k - 1 + 2) == select(cluster_info, c1)) assert np.all(select(cluster_info_renumbered, k + 2) == select(cluster_info, c1next)) assert np.all(select(cluster_info_renumbered, 2 * k + 2) == select(cluster_info, c2)) assert np.all(select(cluster_info_renumbered, 2 * k + 1 + 2) == select(cluster_info, c2next))
def test_hdf5_loader1(): # Open the mock data. dir = TEST_FOLDER filename = os.path.join(dir, 'test.xml') global nspikes nspikes_total = nspikes # Convert in HDF5. klusters_to_hdf5(filename) # Open the file. filename_h5 = os.path.join(dir, 'test.kwik') l = HDF5Loader(filename=filename_h5) lk = KlustersLoader(filename=filename) # Open probe. probe = l.get_probe() assert np.array_equal(probe[1]['channels'], np.arange(nchannels)) # Select cluster. cluster = 3 l.select(clusters=[cluster]) lk.select(clusters=[cluster]) # Get clusters. clusters = l.get_clusters('all') clusters_k = lk.get_clusters('all') nspikes = np.sum(clusters == cluster) # Check the clusters are correct. assert np.array_equal(get_array(clusters), get_array(clusters_k)) # Get the spike times. spiketimes = l.get_spiketimes() spiketimes_k = lk.get_spiketimes() assert np.all(spiketimes <= 60) # Check the spiketimes are correct. assert np.allclose(get_array(spiketimes), get_array(spiketimes_k)) # Get features. features = l.get_features() spikes = l.get_spikes() # Assert the indices in the features Pandas object correspond to the # spikes in the selected cluster. assert np.array_equal(features.index, spikes) # Assert the array has the right number of spikes. assert features.shape[0] == nspikes assert l.fetdim == fetdim assert l.nextrafet == 1 # Get all features. features = l.get_features('all') features_k = lk.get_features('all') assert type(features) == pd.DataFrame assert features.shape[0] == nspikes_total # Check the features are correct. f = get_array(features)[:,:-1] f_k = get_array(features_k)[:,:-1] normalize_inplace(f) normalize_inplace(f_k) assert np.allclose(f, f_k, atol=1e-5) # Get masks. masks = l.get_masks('all') masks_k = lk.get_masks('all') assert masks.shape[0] == nspikes_total # Check the masks. assert np.allclose(masks.values, masks_k.values, atol=1e-2) # Get waveforms. waveforms = l.get_waveforms().values waveforms_k = lk.get_waveforms().values assert np.array_equal(waveforms.shape, (nspikes, nsamples, nchannels)) # Check waveforms normalize_inplace(waveforms) normalize_inplace(waveforms_k) assert np.allclose(waveforms, waveforms_k, atol=1e-4) l.close()