Exemple #1
0
def make_comparison_figures():
    
    gt_sorting, tested_sorting = generate_erroneous_sorting()
    
    comp = sc.compare_sorter_to_ground_truth(gt_sorting, tested_sorting, gt_name=None, tested_name=None,
                                   delta_time=0.4, sampling_frequency=None, min_accuracy=0.5, exhaustive_gt=True, match_mode='hungarian', 
                                   n_jobs=-1, bad_redundant_threshold=0.2, compute_labels=False, verbose=False)
    
    print(comp.hungarian_match_12)
    
    fig, ax = plt.subplots()
    im = ax.matshow(comp.match_event_count, cmap='Greens')
    ax.set_xticks(np.arange(0, comp.match_event_count.shape[1]))
    ax.set_yticks(np.arange(0, comp.match_event_count.shape[0]))
    ax.xaxis.tick_bottom()
    ax.set_yticklabels(comp.match_event_count.index, fontsize=12)
    ax.set_xticklabels(comp.match_event_count.columns, fontsize=12)
    fig.colorbar(im)
    fig.savefig('spikecomparison_match_count.png')
    
    fig, ax = plt.subplots()
    sw.plot_agreement_matrix(comp, ax=ax, ordered=False)
    im = ax.get_images()[0]
    fig.colorbar(im)
    fig.savefig('spikecomparison_agreement_unordered.png')

    fig, ax = plt.subplots()
    sw.plot_agreement_matrix(comp, ax=ax)
    im = ax.get_images()[0]
    fig.colorbar(im)
    fig.savefig('spikecomparison_agreement.png')
    
    fig, ax = plt.subplots()
    sw.plot_confusion_matrix(comp, ax=ax)
    im = ax.get_images()[0]
    fig.colorbar(im)
    fig.savefig('spikecomparison_confusion.png')
    
    
    
    
    
    plt.show()
# The final part of this tutorial deals with comparing spike sorting outputs.
# We can either (1) compare the spike sorting results with the ground-truth sorting :code:`sorting_true`, (2) compare
# the output of two (Klusta and Mountainsor4), or (3) compare the output of multiple sorters:

comp_gt_KL = sc.compare_sorter_to_ground_truth(gt_sorting=sorting_true,
                                               tested_sorting=sorting_KL)
comp_KL_MS4 = sc.compare_two_sorters(sorting1=sorting_KL, sorting2=sorting_MS4)
comp_multi = sc.compare_multiple_sorters(
    sorting_list=[sorting_MS4, sorting_KL], name_list=['klusta', 'ms4'])

##############################################################################
# When comparing with a ground-truth sorting extractor (1), you can get the sorting performance and plot a confusion
# matrix

comp_gt_KL.get_performance()
w_conf = sw.plot_confusion_matrix(comp_gt_KL)

##############################################################################
# When comparing two sorters (2), we can see the matching of units between sorters. For example, this is how to extract
# the unit ids of Mountainsort4 (sorting2) mapped to the units of Klusta (sorting1). Units which are not mapped has -1
# as unit id.

mapped_units = comp_KL_MS4.get_mapped_sorting1().get_mapped_unit_ids()

print('Klusta units:', sorting_KL.get_unit_ids())
print('Mapped Mountainsort4 units:', mapped_units)

##############################################################################
# When comparing multiple sorters (3), you can extract a :code:`SortingExtractor` object with units in agreement
# between sorters. You can also plot a graph showing how the units are matched between the sorters.
Exemple #3
0
perf = cmp_gt_HS.get_performance()

##############################################################################
# Lets use seaborn swarm plot

fig1, ax1 = plt.subplots()
perf2 = pd.melt(perf, var_name='measurement')
ax1 = sns.swarmplot(data=perf2, x='measurement', y='value', ax=ax1)
ax1.set_xticklabels(labels=ax1.get_xticklabels(), rotation=45)

##############################################################################
# The confusion matrix is also a good summary of the score as it has
# the same shape as agreement matrix, but it contains an extra column for FN
# and an extra row for FP

sw.plot_confusion_matrix(cmp_gt_HS)

##############################################################################
# We can query the well and bad detected units. By default, the threshold
# on accuracy is 0.95.

cmp_gt_HS.get_well_detected_units()

##############################################################################

cmp_gt_HS.get_false_positive_units()

##############################################################################

cmp_gt_HS.get_redundant_units()
Exemple #4
0
 def test_confusion(self):
     sw.plot_confusion_matrix(self._gt_comp, count_text=True)
                                               sorting_out=hither.File())
#Aggregating the output of the sorters
sorting_MS4 = AutoSortingExtractor(result_MS4.outputs.sorting_out._path)
sorting_SP = AutoSortingExtractor(
    result_spyKingCircus.outputs.sorting_out._path)

#Comparing  each to ground truth-confusion matrix
comp_MATLAB = sc.compare_sorter_to_ground_truth(gtOutput,
                                                sortingPipeline,
                                                sampling_frequency=sampleRate,
                                                delta_time=3,
                                                match_score=0.5,
                                                chance_score=0.1,
                                                well_detected_score=0.1,
                                                exhaustive_gt=True)
w_comp_MATLAB = sw.plot_confusion_matrix(comp_MATLAB, count_text=True)
plt.show()

comp_MS4 = sc.compare_sorter_to_ground_truth(gtOutput,
                                             sorting_MS4,
                                             sampling_frequency=sampleRate,
                                             delta_time=3,
                                             match_score=0.5,
                                             chance_score=0.1,
                                             well_detected_score=0.1,
                                             exhaustive_gt=True)
w_comp_MS4 = sw.plot_confusion_matrix(comp_MS4, count_text=True)
plt.show()

comp_SP = sc.compare_sorter_to_ground_truth(gtOutput,
                                            sorting_SP,
Exemple #6
0
# Widgets using SortingComparison
# ---------------------------------
#
# We can compare the spike sorting output to the ground-truth sorting :code:`sorting_true` using the
# :code:`comparison` module. :code:`comp_MS4` and :code:`comp_KL` are :code:`SortingComparison` objects

import spikeinterface.comparison as sc

comp_MS4 = sc.compare_sorter_to_ground_truth(sorting_true, sorting_MS4)
comp_KL = sc.compare_sorter_to_ground_truth(sorting_true, sorting_KL)

##############################################################################
# plot_confusion_matrix()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~

w_comp_MS4 = sw.plot_confusion_matrix(comp_MS4, count_text=False)
w_comp_KL = sw.plot_confusion_matrix(comp_KL, count_text=False)

##############################################################################
# plot_agreement_matrix()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~

w_agr_MS4 = sw.plot_agreement_matrix(comp_MS4, count_text=False)

##############################################################################
# plot_sorting_performance()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We can also plot a performance metric (e.g. accuracy, recall, precision) with respect to a quality metric, for
# example signal-to-noise ratio. Quality metrics can be computed using the :code:`toolkit.validation` submodule
Exemple #7
0
perf = cmp_gt_MS4.get_performance()

##############################################################################
# Lets use seaborn swarm plot

fig1, ax1 = plt.subplots()
perf2 = pd.melt(perf, var_name='measurement')
ax1 = sns.swarmplot(data=perf2, x='measurement', y='value', ax=ax1)
ax1.set_xticklabels(labels=ax1.get_xticklabels(), rotation=45)

##############################################################################
# The confusion matrix is also a good summary of the score as it has
# the same shape as agreement matrix, but it contains an extra column for FN
# and an extra row for FP

sw.plot_confusion_matrix(cmp_gt_MS4)

##############################################################################
# We can query the well and bad detected units. By default, the threshold
# on accuracy is 0.95.

cmp_gt_MS4.get_well_detected_units()

##############################################################################

cmp_gt_MS4.get_false_positive_units()

##############################################################################

cmp_gt_MS4.get_redundant_units()
#  So you can acces finely to all individual results.
#  
#  Note that exhaustive_gt=True when you excatly how many
#  units in ground truth (for synthetic datasets)

study.run_comparisons(exhaustive_gt=True)

for (rec_name, sorter_name), comp in study.comparisons.items():
    print('*' * 10)
    print(rec_name, sorter_name)
    print(comp.count)  # raw counting of tp/fp/...
    comp.print_summary()
    perf_unit = comp.get_performance(method='by_unit')
    perf_avg = comp.get_performance(method='pooled_with_average')
    m = comp.get_confusion_matrix()
    w_comp = sw.plot_confusion_matrix(comp)
    w_comp.ax.set_title(rec_name  + ' - ' + sorter_name)

##############################################################################
#  Collect synthetic dataframes and display
# -------------------------------------------------------------
# 
# As shown previously, the performance is returned as a pandas dataframe.
#  The :code:`aggregate_performances_table` function, gathers all the outputs in
# the study folder and merges them in a single dataframe.

dataframes = study.aggregate_dataframes()

##############################################################################
# Pandas dataframes can be nicely displayed as tables in the notebook.