def get_correlogramsview_data(exp, correlograms, clusters=[], channel_group=0, clustering='main', wizard=None, nclusters_max=None, ncorrbins=50, corrbin=.001): clusters = np.array(clusters, dtype=np.int32) clusters_data = getattr(exp.channel_groups[channel_group].clusters, clustering) cluster_groups_data = getattr(exp.channel_groups[channel_group].cluster_groups, clustering) freq = exp.application_data.spikedetekt.sample_rate # cluster_colors = clusters_data.color[clusters] # cluster_colors = pandaize(cluster_colors, clusters) # get colors from application data: cluster_colors = pd.Series([_get_color(clusters_data, cl) for cl in clusters], index=clusters) # cluster_colors = pd.Series([ # next_color(cl) # if cl in clusters_data else 1 # for cl in clusters], index=clusters) # TODO: cache and optimize this spike_clusters = getattr(exp.channel_groups[channel_group].spikes.clusters, clustering)[:] sizes = np.bincount(spike_clusters) cluster_sizes = sizes[clusters] clusters_selected0 = clusters nclusters_max = nclusters_max or USERPREF['correlograms_max_nclusters'] # Subset of selected clusters if there are too many clusters. if len(clusters_selected0) < nclusters_max: clusters_selected = clusters_selected0 else: clusters_selected = clusters_selected0[:nclusters_max] correlograms = correlograms.submatrix(clusters_selected) cluster_colors = select(cluster_colors, clusters_selected) # Compute the baselines. # corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) # ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = exp.channel_groups[channel_group].spikes.concatenated_time_samples[:][-1] - exp.channel_groups[channel_group].spikes.concatenated_time_samples[:][0] duration /= freq if duration == 0: duration = 1. baselines = get_baselines(cluster_sizes, duration, corrbin) baselines = baselines[:nclusters_max,:nclusters_max] data = dict( correlograms=correlograms, baselines=baselines, clusters_selected=clusters_selected, cluster_colors=cluster_colors, ncorrbins=ncorrbins, corrbin=corrbin, keep_order=wizard, ) return data
def get_correlogramsview_data(loader, statscache): clusters_selected0 = loader.get_clusters_selected() # Subset of selected clusters if there are too many clusters. max_nclusters = USERPREF['correlograms_max_nclusters'] if len(clusters_selected0) < max_nclusters: clusters_selected = clusters_selected0 else: clusters_selected = clusters_selected0[:max_nclusters] correlograms = statscache.correlograms.submatrix(clusters_selected) # Compute the baselines. sizes = get_array(select(loader.get_cluster_sizes(), clusters_selected)) colors = select(loader.get_cluster_colors(), clusters_selected) corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = corrbin * ncorrbins baselines = get_baselines(sizes, duration, corrbin) data = dict( correlograms=correlograms, baselines=baselines, clusters_selected=clusters_selected, cluster_colors=colors, ncorrbins=ncorrbins, corrbin=corrbin, ) return data
def get_correlogramsview_data(loader, statscache): clusters_selected0 = loader.get_clusters_selected() # Subset of selected clusters if there are too many clusters. max_nclusters = USERPREF['correlograms_max_nclusters'] if len(clusters_selected0) < max_nclusters: clusters_selected = clusters_selected0 else: clusters_selected = clusters_selected0[:max_nclusters] correlograms = statscache.correlograms.submatrix( clusters_selected) # Compute the baselines. sizes = get_array(select(loader.get_cluster_sizes(), clusters_selected)) colors = select(loader.get_cluster_colors(), clusters_selected) corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = corrbin * ncorrbins baselines = get_baselines(sizes, duration, corrbin) data = dict( correlograms=correlograms, baselines=baselines, clusters_selected=clusters_selected, cluster_colors=colors, ncorrbins=ncorrbins, corrbin=corrbin, ) return data
def get_correlogramsview_data(exp, correlograms, clusters=[], channel_group=0, clustering='main', wizard=None, nclusters_max=None, ncorrbins=50, corrbin=.001): clusters = np.array(clusters, dtype=np.int32) clusters_data = getattr(exp.channel_groups[channel_group].clusters, clustering) cluster_groups_data = getattr(exp.channel_groups[channel_group].cluster_groups, clustering) freq = exp.application_data.spikedetekt.sample_rate cluster_colors = clusters_data.color[clusters] cluster_colors = pandaize(cluster_colors, clusters) # TODO: cache and optimize this spike_clusters = getattr(exp.channel_groups[channel_group].spikes.clusters, clustering)[:] sizes = np.bincount(spike_clusters) cluster_sizes = sizes[clusters] clusters_selected0 = clusters nclusters_max = nclusters_max or USERPREF['correlograms_max_nclusters'] # Subset of selected clusters if there are too many clusters. if len(clusters_selected0) < nclusters_max: clusters_selected = clusters_selected0 else: clusters_selected = clusters_selected0[:nclusters_max] correlograms = correlograms.submatrix(clusters_selected) cluster_colors = select(cluster_colors, clusters_selected) # Compute the baselines. # corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) # ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = exp.channel_groups[channel_group].spikes.concatenated_time_samples[:][-1] - exp.channel_groups[channel_group].spikes.concatenated_time_samples[:][0] duration /= freq if duration == 0: duration = 1. baselines = get_baselines(cluster_sizes, duration, corrbin) baselines = baselines[:nclusters_max,:nclusters_max] data = dict( correlograms=correlograms, baselines=baselines, clusters_selected=clusters_selected, cluster_colors=cluster_colors, ncorrbins=ncorrbins, corrbin=corrbin, keep_order=wizard, ) return data