def test_get_template_channel_sparsity(): we = WaveformExtractor.load_from_folder('toy_waveforms') sparsity = get_template_channel_sparsity(we, method='best_channels', outputs='id', num_channels=5) print(sparsity) sparsity = get_template_channel_sparsity(we, method='best_channels', outputs='index', num_channels=5) print(sparsity) sparsity = get_template_channel_sparsity(we, method='radius', outputs='id', radius_um=50) print(sparsity) sparsity = get_template_channel_sparsity(we, method='radius', outputs='index', radius_um=50) print(sparsity) sparsity = get_template_channel_sparsity(we, method='threshold', outputs='id', threshold=3) print(sparsity) sparsity = get_template_channel_sparsity(we, method='threshold', outputs='index', threshold=3) print(sparsity) # load from folder because sorting properties must be loaded rec = load_extractor('toy_rec') sort = load_extractor('toy_sort') we = extract_waveforms(rec, sort, 'toy_waveforms_1') sparsity = get_template_channel_sparsity(we, method='by_property', outputs='id', by_property="group") print(sparsity) sparsity = get_template_channel_sparsity(we, method='by_property', outputs='index', by_property="group") print(sparsity)
def test_compute_quality_metrics_peak_sign(): rec = load_extractor('toy_rec') sort = load_extractor('toy_sorting') # invert recording rec_inv = scale(rec, gain=-1.) we = WaveformExtractor.load_from_folder('toy_waveforms') print(we) we_inv = WaveformExtractor.create(rec_inv, sort, 'toy_waveforms_inv') we_inv.set_params(ms_before=3., ms_after=4., max_spikes_per_unit=500) we_inv.run_extract_waveforms(n_jobs=1, chunk_size=30000) print(we_inv) # without PC metrics = compute_quality_metrics(we, metric_names=['snr', 'amplitude_cutoff'], peak_sign="neg") metrics_inv = compute_quality_metrics( we_inv, metric_names=['snr', 'amplitude_cutoff'], peak_sign="pos") assert np.allclose(metrics["snr"].values, metrics_inv["snr"].values) assert np.allclose(metrics["amplitude_cutoff"].values, metrics_inv["amplitude_cutoff"].values)
############################################################################### # A recording can be "dumped" (exported) to: # * a dict # * a json file # * a pickle file # # The "dump" operation is lazy, i.e., the traces are not exported. # Only the information about how to reconstruct the recording are dumped: from spikeinterface import load_extractor from pprint import pprint d = recording2.to_dict() pprint(d) recording2_loaded = load_extractor(d) print(recording2_loaded) ############################################################################### # The dictionary can also be dumped directly to a JSON file on disk: recording2.dump('my_recording.json') recording2_loaded = load_extractor('my_recording.json') print(recording2_loaded) ############################################################################### # **IMPORTANT**: the "dump" operation DOES NOT copy the traces to disk! # # If you wish to also store the traces in a compact way you need to use the # :code:`save()` function. This operation is very useful to save traces obtained
############################################################################### # A sorting can be "dumped" (exported) to: # * a dict # * a json file # * a pickle file # # The "dump" operation is lazy, i.e., the spike trains are not exported. # Only the information about how to reconstruct the sorting are dumped: from spikeinterface import load_extractor from pprint import pprint d = sorting2.to_dict() pprint(d) sorting2_loaded = load_extractor(d) print(sorting2_loaded) ############################################################################### # The dictionary can also be dumped directly to a JSON file on disk: sorting2.dump('my_sorting.json') sorting2_loaded = load_extractor('my_sorting.json') print(sorting2_loaded) ############################################################################### # **IMPORTANT**: the "dump" operation DOES NOT copy the spike trains to disk! # # If you wish to also store the spike trains in a compact way you need to use the # :code:`save()` function: