Exemple #1
0
def _get_color(clusters_data, cl):
    if cl not in clusters_data:
        return next_color(cl)
    cluster_data = clusters_data[cl]
    try:
        out = cluster_data.application_data.klustaviewa.color or 1
        return out
    except AttributeError as e:
        return next_color(cl)
Exemple #2
0
def _get_color(clusters_data, cl):
    if cl not in clusters_data:
        return next_color(cl)
    cluster_data = clusters_data[cl]
    try:
        out = cluster_data.application_data.klustaviewa.color or 1
        return out
    except AttributeError as e:
        return next_color(cl)
Exemple #3
0
def get_clusterview_data(
    exp,
    statscache=None,
    channel_group=0,
    clustering='main',
):
    clusters_data = getattr(exp.channel_groups[channel_group].clusters,
                            clustering)
    cluster_groups_data = getattr(
        exp.channel_groups[channel_group].cluster_groups, clustering)

    # Get the list of all existing clusters.
    # clusters = sorted(clusters_data.keys())

    spike_clusters = getattr(exp.channel_groups[channel_group].spikes.clusters,
                             clustering)[:]
    clusters = np.unique(spike_clusters)
    groups = cluster_groups_data.keys()

    # cluster_groups = pd.Series([clusters_data[cl].cluster_group or 0
    #                            for cl in clusters], index=clusters)
    # Make sure there's no crash if this is called before the clusters had a chance
    # to be added in the HDF5 file.

    # get colors from application data:
    cluster_colors = pd.Series(
        [_get_color(clusters_data, cl) for cl in clusters], index=clusters)

    # cluster_colors = pd.Series([
    #     next_color(cl)
    #         if cl in clusters_data else 1
    #                        for cl in clusters], index=clusters)

    cluster_groups = pd.Series(
        [(clusters_data[cl].cluster_group if clusters_data[cl].cluster_group
          is not None else 3) if cl in clusters_data else 3
         for cl in clusters],
        index=clusters)

    group_colors = pd.Series([next_color(cl) for g in groups], index=groups)
    group_names = pd.Series(
        [cluster_groups_data[g].name or 'Group' for g in groups], index=groups)
    # TODO: cache the cluster size instead of recomputing every time here
    # (in experiment class?)
    spike_clusters = getattr(exp.channel_groups[channel_group].spikes.clusters,
                             clustering)[:]
    sizes = np.bincount(spike_clusters)
    cluster_sizes = pd.Series(sizes[clusters], index=clusters)

    data = dict(
        cluster_colors=cluster_colors,
        cluster_groups=cluster_groups,
        group_colors=group_colors,
        group_names=group_names,
        cluster_sizes=cluster_sizes,
    )

    if statscache is not None:
        data['cluster_quality'] = statscache.cluster_quality
    return data
Exemple #4
0
def get_clusterview_data(exp, statscache=None, channel_group=0,
                         clustering='main',):
    clusters_data = getattr(exp.channel_groups[channel_group].clusters, clustering)
    cluster_groups_data = getattr(exp.channel_groups[channel_group].cluster_groups, clustering)

    # Get the list of all existing clusters.
    # clusters = sorted(clusters_data.keys())

    spike_clusters = getattr(exp.channel_groups[channel_group].spikes.clusters,
                             clustering)[:]
    clusters = np.unique(spike_clusters)
    groups = cluster_groups_data.keys()

    # cluster_groups = pd.Series([clusters_data[cl].cluster_group or 0
    #                            for cl in clusters], index=clusters)
    # Make sure there's no crash if this is called before the clusters had a chance
    # to be added in the HDF5 file.

    # get colors from application data:
    cluster_colors = pd.Series([_get_color(clusters_data, cl)
                                for cl in clusters], index=clusters)

    # cluster_colors = pd.Series([
    #     next_color(cl)
    #         if cl in clusters_data else 1
    #                        for cl in clusters], index=clusters)

    cluster_groups = pd.Series([
        (clusters_data[cl].cluster_group
                                if clusters_data[cl].cluster_group is not None else 3)
            if cl in clusters_data else 3
                               for cl in clusters], index=clusters)

    group_colors = pd.Series([next_color(cl)
                             for g in groups], index=groups)
    group_names = pd.Series([cluster_groups_data[g].name or 'Group'
                            for g in groups], index=groups)
    # TODO: cache the cluster size instead of recomputing every time here
    # (in experiment class?)
    spike_clusters = getattr(exp.channel_groups[channel_group].spikes.clusters,
                             clustering)[:]
    sizes = np.bincount(spike_clusters)
    cluster_sizes = pd.Series(sizes[clusters], index=clusters)


    data = dict(
        cluster_colors=cluster_colors,
        cluster_groups=cluster_groups,
        group_colors=group_colors,
        group_names=group_names,
        cluster_sizes=cluster_sizes,

    )

    if statscache is not None:
        data['cluster_quality'] = statscache.cluster_quality
    return data
Exemple #5
0
def get_traceview_data(exp,
        channel_group=0, clustering='main'):

    if (len(exp.recordings) == 0) or exp.recordings[0].raw == None:
        data = dict(
            trace=None,
            )
        return data

    rawdata = exp.recordings[0].raw
    freq = exp.application_data.spikedetekt.sample_rate
    clusters_data = getattr(exp.channel_groups[channel_group].clusters, clustering)
    clusters = sorted(clusters_data.keys())
    spikes_data = exp.channel_groups[channel_group].spikes
    channels = exp.channel_groups[channel_group].channel_order
    spiketimes = spikes_data.time_samples
    spikeclusters = getattr(spikes_data.clusters, clustering)[:]

    _, nsamples, nchannels = spikes_data.waveforms_filtered.shape

    freq = exp.application_data.spikedetekt.sample_rate

    cluster_colors = pd.Series([next_color(cl)
                       for cl in clusters], index=clusters)
    fetdim = exp.application_data.spikedetekt.n_features_per_channel

    s_before = exp.application_data.spikedetekt.extract_s_before
    s_after = exp.application_data.spikedetekt.extract_s_after

    if spikes_data.masks is not None:
        spikemasks = np.zeros((spikes_data.masks.shape[0], rawdata.shape[1]))
        spikemasks[:,channels] = spikes_data.masks[:, 0:fetdim*nchannels:fetdim]

    cluster_colors = pandaize(cluster_colors, clusters)

    data = dict(
        freq=freq,
        trace=rawdata,
        spiketimes=spiketimes,
        spikemasks=spikemasks,
        spikeclusters=spikeclusters,
        cluster_colors = cluster_colors,
        s_before = s_before,
        s_after = s_after
    )
    return data
Exemple #6
0
def get_traceview_data(exp, channel_group=0, clustering='main'):

    if (len(exp.recordings) == 0) or exp.recordings[0].raw == None:
        data = dict(trace=None, )
        return data

    rawdata = exp.recordings[0].raw
    freq = exp.application_data.spikedetekt.sample_rate
    clusters_data = getattr(exp.channel_groups[channel_group].clusters,
                            clustering)
    clusters = sorted(clusters_data.keys())
    spikes_data = exp.channel_groups[channel_group].spikes
    channels = exp.channel_groups[channel_group].channel_order
    spiketimes = spikes_data.time_samples
    spikeclusters = getattr(spikes_data.clusters, clustering)[:]

    _, nsamples, nchannels = spikes_data.waveforms_filtered.shape

    freq = exp.application_data.spikedetekt.sample_rate

    cluster_colors = pd.Series([next_color(cl) for cl in clusters],
                               index=clusters)
    fetdim = exp.application_data.spikedetekt.n_features_per_channel

    s_before = exp.application_data.spikedetekt.extract_s_before
    s_after = exp.application_data.spikedetekt.extract_s_after

    if spikes_data.masks is not None:
        spikemasks = np.zeros((spikes_data.masks.shape[0], rawdata.shape[1]))
        spikemasks[:,
                   channels] = spikes_data.masks[:,
                                                 0:fetdim * nchannels:fetdim]

    cluster_colors = pandaize(cluster_colors, clusters)

    data = dict(freq=freq,
                trace=rawdata,
                spiketimes=spiketimes,
                spikemasks=spikemasks,
                spikeclusters=spikeclusters,
                cluster_colors=cluster_colors,
                s_before=s_before,
                s_after=s_after)
    return data
Exemple #7
0
def test_colors_1():
    for c in xrange(1, COLORS_COUNT):
        assert next_color(c) == c + 1
    assert next_color(COLORS_COUNT) == 1
Exemple #8
0
 def get_cluster_color(self, cluster):
     try:
         return next_color(cluster)
     except IndexError:
         return 0
Exemple #9
0
 def get_cluster_color(self, cluster):
     try:
         return next_color(cluster)
     except IndexError:
         return 0