Esempio n. 1
0
def test_get_auto_params_for_catalogue():
    if os.path.exists('test_cataloguetools'):
        shutil.rmtree('test_cataloguetools')

    dataio = DataIO(dirname='test_cataloguetools')
    #~ localdir, filenames, params = download_dataset(name='olfactory_bulb')
    localdir, filenames, params = download_dataset(name='locust')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)

    params = get_auto_params_for_catalogue(dataio)
    print(params)
    print(params['cluster_method'])
    print(params['cluster_kargs'])
Esempio n. 2
0
def test_apply_all_catalogue_steps():
    if os.path.exists('test_cataloguetools'):
        shutil.rmtree('test_cataloguetools')
        
    dataio = DataIO(dirname='test_cataloguetools')
    #~ localdir, filenames, params = download_dataset(name='olfactory_bulb')
    localdir, filenames, params = download_dataset(name='locust')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    
    params = get_auto_params_for_catalogue(dataio)
    
    cc = CatalogueConstructor(dataio, chan_grp=0)
    apply_all_catalogue_steps(cc, params, verbose=True)
Esempio n. 3
0
def test_peeler():
    dataio = DataIO(dirname='test_peeler')
    print(dataio)
    initial_catalogue = dataio.load_catalogue(chan_grp=0)

    peeler = Peeler(dataio)

    peeler.change_params(catalogue=initial_catalogue, chunksize=1024)

    t1 = time.perf_counter()
    peeler.run(progressbar=False)
    t2 = time.perf_counter()
    print('peeler.run_loop', t2 - t1)
Esempio n. 4
0
def test_peeler_with_and_without_preprocessor():

    if ON_CI_CLOUD:
        engines = ['geometrical']
    else:
        engines = ['geometrical', 'geometrical_opencl']

    #~ engines = ['geometrical_opencl']

    for engine in engines:
        for i in range(2):
            #~ for i in [1]:

            print()
            if i == 0:
                print(engine, 'without processing')
                dataio = DataIO(dirname='test_peeler')
            else:
                print(engine, 'with processing')
                dataio = DataIO(dirname='test_peeler2')

            catalogue = dataio.load_catalogue(chan_grp=0)

            peeler = Peeler(dataio)
            peeler.change_params(engine=engine,
                                 catalogue=catalogue,
                                 chunksize=1024)
            t1 = time.perf_counter()
            peeler.run(progressbar=False)
            t2 = time.perf_counter()
            print('peeler run_time', t2 - t1)
            spikes = dataio.get_spikes(chan_grp=0).copy()
            labels = catalogue['clusters']['cluster_label']
            count_by_label = [
                np.sum(spikes['cluster_label'] == label) for label in labels
            ]
            print(labels)
            print(count_by_label)
Esempio n. 5
0
def test_peeler_several_chunksize():

    dataio = DataIO(dirname='test_peeler')
    print(dataio)
    catalogue = dataio.load_catalogue(chan_grp=0)

    all_spikes = []
    sig_length = dataio.get_segment_length(0)
    chunksizes = [174, 512, 1024, 1023, 10000, 150000]
    #~ chunksizes = [512, 1024,]
    for chunksize in chunksizes:
        print('**', chunksize, '**')
        peeler = Peeler(dataio)
        peeler.change_params(catalogue=catalogue, chunksize=chunksize)
        t1 = time.perf_counter()
        peeler.run_offline_loop_one_segment(seg_num=0, progressbar=False)
        t2 = time.perf_counter()
        print('n_side', peeler.n_side, 'n_span', peeler.n_span, 'peak_width',
              peeler.peak_width)
        print('peeler.run_loop', t2 - t1)

        spikes = dataio.get_spikes(seg_num=0, chan_grp=0)
        all_spikes.append(spikes)

    # clip to last spike
    last = min([spikes[-1]['index'] for spikes in all_spikes])
    for i, chunksize in enumerate(chunksizes):
        spikes = all_spikes[i]
        all_spikes[i] = spikes[spikes['index'] <= last]

    previsous_spikes = None
    for i, chunksize in enumerate(chunksizes):
        print('**', chunksize, '**')
        spikes = all_spikes[i]
        is_sorted = np.all(np.diff(spikes['index']) >= 0)
        assert is_sorted

        labeled_spike = spikes[spikes['cluster_label'] >= 0]
        unlabeled_spike = spikes[spikes['cluster_label'] < 0]
        print('labeled_spike.size', labeled_spike.size, 'unlabeled_spike.size',
              unlabeled_spike.size)

        if previsous_spikes is not None:
            assert previsous_spikes.size == spikes.size
            np.testing.assert_array_equal(previsous_spikes['index'],
                                          spikes['index'])
            np.testing.assert_array_equal(previsous_spikes['cluster_label'],
                                          spikes['cluster_label'])

        previsous_spikes = spikes
Esempio n. 6
0
def test_plot_signals():
    dataio = DataIO('test_matplotlibplot')
    catalogueconstructor = CatalogueConstructor(dataio=dataio, chan_grp=0)

    plot_signals(dataio, signal_type='initial')
    plot_signals(dataio, signal_type='processed')
    plot_signals(catalogueconstructor,
                 signal_type='processed',
                 with_peaks=True,
                 time_slice=(2., 3))
    plot_signals(catalogueconstructor,
                 signal_type='processed',
                 with_span=True,
                 time_slice=(2., 3))
def test_all_decomposition():
    dirname = 'test_catalogueconstructor'
    
    dataio = DataIO(dirname=dirname)
    cc = catalogueconstructor = CatalogueConstructor(dataio=dataio)
    print(dataio)
    print(cc)
    
    methods = ['global_pca', 'pca_by_channel', 'peak_max','neighborhood_pca' ] #'neighborhood_pca', 'tsne', 'pca_by_channel_then_tsne'
    for method in methods:
        t0 = time.perf_counter()
        cc.extract_some_features(method=method)
        t1 = time.perf_counter()
        print('extract_some_features', method, t1-t0)
Esempio n. 8
0
def test_auto_merge():
    dirname = 'test_cleancluster'

    restore_savepoint(dirname, savepoint='after_trash_not_aligned')

    dataio = DataIO(dirname=dirname)
    cc = CatalogueConstructor(dataio=dataio)

    t1 = time.perf_counter()
    cc.auto_merge_cluster()
    t2 = time.perf_counter()
    print('auto_merge_cluster', t2 - t1)

    cc.create_savepoint(name='after_auto_merge_cluster')
Esempio n. 9
0
def test_pruningshears():

    dirname = 'test_cluster'

    dataio = DataIO(dirname=dirname)
    print(dataio)
    cc = CatalogueConstructor(dataio=dataio)

    cc.extract_some_features(method='pca_by_channel')
    #~ print(dataio)
    #~ print(cc)

    t0 = time.perf_counter()
    cc.find_clusters(method='pruningshears', print_debug=True)
    t1 = time.perf_counter()
    print('cluster', t1 - t0)
Esempio n. 10
0
def test_trash_low_extremum():
    dirname = 'test_cleancluster'

    restore_savepoint(dirname, savepoint='after_auto_merge_cluster')

    dataio = DataIO(dirname=dirname)
    cc = CatalogueConstructor(dataio=dataio)

    print(cc)

    t1 = time.perf_counter()
    cc.trash_low_extremum()
    t2 = time.perf_counter()
    print('trash_low_extremum', t2 - t1)

    cc.create_savepoint(name='after_trash_low_extremum')
    print(cc)
Esempio n. 11
0
def setup_catalogue():
    if os.path.exists('test_peeler'):
        shutil.rmtree('test_peeler')

    dataio = DataIO(dirname='test_peeler')
    localdir, filenames, params = download_dataset(name='olfactory_bulb')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    dataio.add_one_channel_group(channels=[5, 6, 7, 8, 9])

    catalogueconstructor = CatalogueConstructor(dataio=dataio)

    fullchain_kargs = {
        'duration': 60.,
        'preprocessor': {
            'highpass_freq': 300.,
            'chunksize': 1024,
            'lostfront_chunksize': 100,
        },
        'peak_detector': {
            'peak_sign': '-',
            'relative_threshold': 7.,
            'peak_span': 0.0005,
            #~ 'peak_span' : 0.000,
        },
        'extract_waveforms': {
            'n_left': -25,
            'n_right': 40,
            'nb_max': 10000,
        },
        'clean_waveforms': {
            'alien_value_threshold': 60.,
        },
        'noise_snippet': {
            'nb_snippet': 300,
        },
    }

    apply_all_catalogue_steps(catalogueconstructor,
                              fullchain_kargs,
                              'global_pca', {'n_components': 12},
                              'kmeans', {'n_clusters': 12},
                              verbose=True)
    catalogueconstructor.trash_small_cluster()

    catalogueconstructor.make_catalogue_for_peeler()
Esempio n. 12
0
def test_peeler_sparse_opencl():
    dataio = DataIO(dirname='test_peeler')
    initial_catalogue = dataio.load_catalogue(chan_grp=0)

    peeler = Peeler(dataio)

    peeler.change_params(
        catalogue=initial_catalogue,
        chunksize=1024,
        use_sparse_template=True,
        sparse_threshold_mad=1.5,
        use_opencl_with_sparse=True,
    )

    t1 = time.perf_counter()
    peeler.run(progressbar=False)
    t2 = time.perf_counter()
    print('peeler.run_loop', t2 - t1)
Esempio n. 13
0
def test_auto_split():
    dirname = 'test_cleancluster'

    restore_savepoint(dirname, savepoint='after_find_clusters')

    dataio = DataIO(dirname=dirname)
    cc = CatalogueConstructor(dataio=dataio)

    cc.find_clusters(method='pruningshears')
    print(cc)
    print(cc.n_jobs)
    t1 = time.perf_counter()
    cc.auto_split_cluster()
    t2 = time.perf_counter()
    print('auto_split_cluster', t2 - t1)

    print(cc)

    cc.create_savepoint(name='after_auto_split')
Esempio n. 14
0
def debug_one_decomposition():
    dirname = 'test_catalogueconstructor'

    dataio = DataIO(dirname=dirname)
    cc = catalogueconstructor = CatalogueConstructor(dataio=dataio)
    print(dataio)
    print(cc)

    t0 = time.perf_counter()
    #~ cc.extract_some_features(method='global_pca', n_components=7)
    #~ cc.extract_some_features(method='peak_max')
    #~ cc.extract_some_features(method='pca_by_channel', n_components_by_channel=3)
    cc.extract_some_features(method='neighborhood_pca',
                             n_components_by_neighborhood=3,
                             radius_um=500)

    print(cc.channel_to_features)
    print(cc.channel_to_features.shape)
    t1 = time.perf_counter()
    print('extract_some_features', t1 - t0)
Esempio n. 15
0
def test_sawchaincut():
    dirname = 'test_cluster'

    dataio = DataIO(dirname=dirname)
    cc = CatalogueConstructor(dataio=dataio)
    #~ print(dataio)
    #~ print(cc)

    t0 = time.perf_counter()
    cc.find_clusters(method='sawchaincut', print_debug=True)
    t1 = time.perf_counter()
    print('cluster', t1 - t0)
    #~ exit()

    #~ print(cc)

    if __name__ == '__main__':
        app = mkQApp()
        win = CatalogueWindow(cc)
        win.show()
        app.exec_()
Esempio n. 16
0
def test_pruningshears():

    dirname = 'test_cluster'

    dataio = DataIO(dirname=dirname)
    print(dataio)
    cc = CatalogueConstructor(dataio=dataio)

    #~ cc.extract_some_features(method='pca_by_channel')
    #~ print(dataio)
    #~ print(cc)

    t0 = time.perf_counter()
    cc.find_clusters(method='pruningshears', print_debug=True)
    t1 = time.perf_counter()
    print('cluster', t1 - t0)

    if __name__ == '__main__':
        app = mkQApp()
        win = CatalogueWindow(cc)
        win.show()
        app.exec_()
Esempio n. 17
0
def test_peeler_classic():
    dataio = DataIO(dirname='test_peeler')
    catalogue = dataio.load_catalogue(chan_grp=0, name='with_oversampling')

    peeler = Peeler(dataio)

    peeler.change_params(engine='classic',
                         catalogue=catalogue,
                         chunksize=1024,
                         argmin_method='numba')
    #~ argmin_method='opencl')

    t1 = time.perf_counter()
    peeler.run(progressbar=False)
    t2 = time.perf_counter()
    print('peeler.run_loop', t2 - t1)

    spikes = dataio.get_spikes(chan_grp=0).copy()
    labels = catalogue['clusters']['cluster_label']
    count_by_label = [
        np.sum(spikes['cluster_label'] == label) for label in labels
    ]
    print(labels)
    print(count_by_label)
Esempio n. 18
0
def test_peeler_argmin_methods():
    dataio = DataIO(dirname='test_peeler')
    catalogue = dataio.load_catalogue(chan_grp=0, name='with_oversampling')

    argmin_methods = ['opencl', 'numba', 'pythran']
    #~ argmin_methods = ['opencl', 'pythran']

    for argmin_method in argmin_methods:

        peeler = Peeler(dataio)

        peeler.change_params(
            engine='classic',
            catalogue=catalogue,
            chunksize=1024,
            argmin_method=argmin_method,
            cl_platform_index=0,
            cl_device_index=0,
        )

        t1 = time.perf_counter()
        peeler.run(progressbar=False)
        t2 = time.perf_counter()
        print(argmin_method, 'peeler.run_loop', t2 - t1)
Esempio n. 19
0
def test_compare_peeler():

    dataio = DataIO(dirname='test_peeler')
    print(dataio)

    all_spikes = []
    #~ for peeler_class in [Peeler,]:
    #~ for peeler_class in [Peeler_OpenCl,]:
    for peeler_class in [Peeler, Peeler_OpenCl]:
        print()
        print(peeler_class)
        initial_catalogue = dataio.load_catalogue(chan_grp=0)

        peeler = peeler_class(dataio)

        peeler.change_params(catalogue=initial_catalogue, chunksize=1024)

        t1 = time.perf_counter()
        #~ peeler.run_offline_loop_one_segment(duration=None, progressbar=False)
        peeler.run_offline_loop_one_segment(duration=4., progressbar=False)
        t2 = time.perf_counter()
        print('peeler.run_loop', t2 - t1)

        all_spikes.append(dataio.get_spikes(chan_grp=0).copy())
Esempio n. 20
0
def test_peeler_empty_catalogue():
    """
    This test peeler with empty catalogue.
    This is like a peak detector.
    Check several chunksize and compare to offline-one-buffer.
    
    """
    dataio = DataIO(dirname='test_peeler')
    #~ print(dataio)
    catalogue = dataio.load_catalogue(chan_grp=0)
    
    # empty catalogue for debug peak detection
    s = catalogue['centers0'].shape
    empty_centers = np.zeros((0, s[1], s[2]), dtype='float32')
    catalogue['centers0'] = empty_centers
    catalogue['centers1'] = empty_centers
    catalogue['centers2'] = empty_centers
    catalogue['cluster_labels'] = np.zeros(0, dtype=catalogue['cluster_labels'].dtype)
        
    
    sig_length = dataio.get_segment_length(0)
    chunksizes = [ 101, 174, 512, 1024, 1023, 10000, 150000]
    #~ chunksizes = [1024,]
    
    previous_peak = None
    
    for chunksize in chunksizes:
        print('**',  chunksize, '**')
        peeler = Peeler(dataio)
        peeler.change_params(engine='classic', catalogue=catalogue,chunksize=chunksize)
        t1 = time.perf_counter()
        #~ peeler.run(progressbar=False)
        peeler.run_offline_loop_one_segment(seg_num=0, progressbar=False)
        t2 = time.perf_counter()
        
        #~ print('n_side', peeler.n_side, 'n_span', peeler.n_span, 'peak_width', peeler.peak_width)
        #~ print('peeler.run_loop', t2-t1)
        
        spikes = dataio.get_spikes(seg_num=0, chan_grp=0)
        labeled_spike = spikes[spikes['cluster_label']>=0]
        unlabeled_spike = spikes[spikes['cluster_label']<0]
        assert labeled_spike.size == 0
        
        is_sorted = np.all(np.diff(unlabeled_spike['index'])>=0)
        assert is_sorted
        
        
        online_peaks = unlabeled_spike['index']
        engine = peeler.peeler_engine
        
        i_stop = sig_length-catalogue['signal_preprocessor_params']['lostfront_chunksize']-engine.n_side+engine.n_span
        sigs = dataio.get_signals_chunk(signal_type='processed', i_stop=i_stop)
        offline_peaks  = detect_peaks_in_chunk(sigs, engine.n_span, engine.relative_threshold, engine.peak_sign)
        
        offline_peaks  = offline_peaks[offline_peaks<=online_peaks[-1]]
        
        assert offline_peaks.size == online_peaks.size
        np.testing.assert_array_equal(offline_peaks, online_peaks)
        
        if previous_peak is not None:
            last = min(previous_peak[-1], online_peaks[-1])
            previous_peak = previous_peak[previous_peak<=last]
            online_peaks_cliped = online_peaks[online_peaks<=last]
            assert  previous_peak.size == online_peaks_cliped.size
            np.testing.assert_array_equal(previous_peak, online_peaks_cliped)
        
        previous_peak = online_peaks
Esempio n. 21
0
def test_summary_noise():
    dataio = DataIO(dirname='test_report')

    summary_noise(dataio, chan_grp=0)
Esempio n. 22
0
def test_create_savepoint_catalogue_constructor():
    dataio = DataIO(dirname='test_catalogueconstructor')
    catalogueconstructor = CatalogueConstructor(dataio=dataio)
    copy_path = catalogueconstructor.create_savepoint()
    print(copy_path)
Esempio n. 23
0
def test_export_spikes():
    dataio = DataIO(dirname='test_peeler')
    dataio.export_spikes()
Esempio n. 24
0
def test_ratio_amplitude():
    dataio = DataIO(dirname='test_catalogueconstructor')
    catalogueconstructor = CatalogueConstructor(dataio=dataio)

    pairs = catalogueconstructor.detect_similar_waveform_ratio(0.5)
    print(pairs)
Esempio n. 25
0
def compare_nb_waveforms():
    if os.path.exists('test_catalogueconstructor'):
        shutil.rmtree('test_catalogueconstructor')

    dataio = DataIO(dirname='test_catalogueconstructor')
    localdir, filenames, params = download_dataset(name='olfactory_bulb')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    dataio.add_one_channel_group(channels=range(14), chan_grp=0)

    cc = CatalogueConstructor(dataio=dataio)

    cc.set_global_params(
        chunksize=1024,
        memory_mode='ram',
        mode='dense',
        n_jobs=1,
        #~ adjacency_radius_um=None,
    )

    cc.set_preprocessor_params(
        #signal preprocessor
        highpass_freq=300,
        lowpass_freq=5000.,
        common_ref_removal=False,
        smooth_size=0,
        lostfront_chunksize=None)

    cc.set_peak_detector_params(
        #peak detector
        method='global',
        engine='numpy',
        peak_sign='-',
        relative_threshold=7,
        peak_span_ms=0.5,
        #~ adjacency_radius_um=None,
    )

    t1 = time.perf_counter()
    cc.estimate_signals_noise(seg_num=0, duration=10.)
    t2 = time.perf_counter()
    print('estimate_signals_noise', t2 - t1)

    t1 = time.perf_counter()
    cc.run_signalprocessor()
    t2 = time.perf_counter()
    print('run_signalprocessor', t2 - t1)

    print(cc)

    fig, axs = plt.subplots(nrows=2)

    cc.set_waveform_extractor_params(wf_left_ms=-2.0, wf_right_ms=3.0)

    t1 = time.perf_counter()
    cc.sample_some_peaks(mode='rand', nb_max=5000)
    t2 = time.perf_counter()
    print('sample_some_peaks', t2 - t1)

    colors = ['r', 'g', 'b', 'y']
    for i, nb_max in enumerate([100, 500, 1000, 2000]):

        cc.sample_some_peaks(mode='rand', nb_max=nb_max)
        #~ catalogueconstructor.extract_some_waveforms(wf_left_ms=-2.0, wf_right_ms=3.0,  nb_max=nb_max)
        #~ print(catalogueconstructor.some_waveforms.shape)
        t1 = time.perf_counter()
        wf = cc.get_some_waveforms()
        t2 = time.perf_counter()
        print('get_some_waveforms', nb_max, t2 - t1)

        #~ wf = catalogueconstructor.some_waveforms
        wf = wf.swapaxes(1, 2).reshape(wf.shape[0], -1)
        axs[0].plot(np.median(wf, axis=0),
                    color=colors[i],
                    label='nb_max {}'.format(nb_max))

        axs[1].plot(np.mean(wf, axis=0),
                    color=colors[i],
                    label='nb_max {}'.format(nb_max))

    axs[0].legend()
    axs[0].set_title('median')
    axs[1].set_title('mean')
    plt.show()
Esempio n. 26
0
def test_catalogue_constructor():
    if os.path.exists('test_catalogueconstructor'):
        shutil.rmtree('test_catalogueconstructor')

    dataio = DataIO(dirname='test_catalogueconstructor')
    localdir, filenames, params = download_dataset(name='olfactory_bulb')
    #~ localdir, filenames, params = download_dataset(name='locust')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)

    channels = range(14)
    #~ channels=list(range(4))
    dataio.add_one_channel_group(channels=channels, chan_grp=0)

    cc = CatalogueConstructor(dataio=dataio)

    for memory_mode in ['ram', 'memmap']:
        for mode in ['dense', 'sparse']:

            print('*' * 5)
            print('memory_mode', memory_mode, 'mode', mode)

            if mode == 'dense':
                peak_engine = 'numpy'
                peak_method = 'global'
                adjacency_radius_um = None
            elif mode == 'sparse':
                peak_engine = 'numpy'
                peak_method = 'geometrical'
                adjacency_radius_um = 450.

            cc.set_global_params(
                chunksize=1024,
                memory_mode=memory_mode,
                mode=mode,
                n_jobs=1,
                #~ adjacency_radius_um=adjacency_radius_um,
            )

            cc.set_preprocessor_params(
                #signal preprocessor
                highpass_freq=300,
                lowpass_freq=5000.,
                common_ref_removal=False,
                smooth_size=0,
                lostfront_chunksize=None)

            cc.set_peak_detector_params(
                #peak detector
                method=peak_method,
                engine=peak_engine,
                peak_sign='-',
                relative_threshold=7,
                peak_span_ms=0.5,
                adjacency_radius_um=adjacency_radius_um,
            )

            t1 = time.perf_counter()
            cc.estimate_signals_noise(seg_num=0, duration=10.)
            t2 = time.perf_counter()
            print('estimate_signals_noise', t2 - t1)

            t1 = time.perf_counter()
            cc.run_signalprocessor(duration=10., detect_peak=True)
            t2 = time.perf_counter()
            print('run_signalprocessor_loop', t2 - t1)

            for seg_num in range(dataio.nb_segment):
                mask = cc.all_peaks['segment'] == seg_num
                print('seg_num', seg_num, 'nb peak', np.sum(mask))

            # redetect peak
            cc.re_detect_peak(method=peak_method,
                              engine=peak_engine,
                              peak_sign='-',
                              relative_threshold=5,
                              peak_span_ms=0.7,
                              adjacency_radius_um=adjacency_radius_um)
            for seg_num in range(dataio.nb_segment):
                mask = cc.all_peaks['segment'] == seg_num
                print('seg_num', seg_num, 'nb peak', np.sum(mask))

            cc.set_waveform_extractor_params(n_left=-25, n_right=40)

            t1 = time.perf_counter()
            cc.clean_peaks(alien_value_threshold=100,
                           mode='extremum_amplitude')
            t2 = time.perf_counter()
            print('clean_peaks extremum_amplitude', t2 - t1)

            t1 = time.perf_counter()
            cc.clean_peaks(alien_value_threshold=100, mode='full_waveform')
            t2 = time.perf_counter()
            print('clean_peaks full_waveforms', t2 - t1)

            t1 = time.perf_counter()
            cc.sample_some_peaks(mode='rand', nb_max=5000)
            t2 = time.perf_counter()
            print('sample_some_peaks', t2 - t1)

            print(cc)

            #extract_some_noise
            t1 = time.perf_counter()
            cc.extract_some_noise(nb_snippet=400)
            t2 = time.perf_counter()
            print('extract_some_noise', t2 - t1)

            if mode == 'dense':
                # PCA
                t1 = time.perf_counter()
                cc.extract_some_features(method='global_pca', n_components=12)
                t2 = time.perf_counter()
                print('project pca', t2 - t1)

                # cluster
                t1 = time.perf_counter()
                cc.find_clusters(method='kmeans', n_clusters=11)
                t2 = time.perf_counter()
                print('find_clusters', t2 - t1)

            elif mode == 'sparse':

                # PCA
                t1 = time.perf_counter()
                cc.extract_some_features(method='pca_by_channel',
                                         n_components_by_channel=3)
                t2 = time.perf_counter()
                print('project pca', t2 - t1)

                # cluster
                t1 = time.perf_counter()
                cc.find_clusters(method='pruningshears')
                t2 = time.perf_counter()
                print('find_clusters', t2 - t1)

            print(cc)

        t1 = time.perf_counter()
        cc.auto_split_cluster()
        t2 = time.perf_counter()
        print('auto_split_cluster', t2 - t1)

        t1 = time.perf_counter()
        cc.trash_not_aligned()
        t2 = time.perf_counter()
        print('trash_not_aligned', t2 - t1)

        t1 = time.perf_counter()
        cc.auto_merge_cluster()
        t2 = time.perf_counter()
        print('auto_merge_cluster', t2 - t1)

        t1 = time.perf_counter()
        cc.trash_low_extremum()
        t2 = time.perf_counter()
        print('trash_low_extremum', t2 - t1)

        t1 = time.perf_counter()
        cc.trash_small_cluster()
        t2 = time.perf_counter()
        print('trash_small_cluster', t2 - t1)
def compare_nb_waveforms():
    if os.path.exists('test_catalogueconstructor'):
        shutil.rmtree('test_catalogueconstructor')

    dataio = DataIO(dirname='test_catalogueconstructor')
    localdir, filenames, params = download_dataset(name='olfactory_bulb')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    dataio.add_one_channel_group(channels=range(14), chan_grp=0)

    catalogueconstructor = CatalogueConstructor(dataio=dataio)

    catalogueconstructor.set_preprocessor_params(
        chunksize=1024,

        #signal preprocessor
        highpass_freq=300.,
        lowpass_freq=5000.,
        lostfront_chunksize=128,

        #peak detector
        peak_sign='-',
        relative_threshold=7,
        peak_span=0.0005,
    )

    t1 = time.perf_counter()
    catalogueconstructor.estimate_signals_noise(seg_num=0, duration=10.)
    t2 = time.perf_counter()
    print('estimate_signals_noise', t2 - t1)

    t1 = time.perf_counter()
    catalogueconstructor.run_signalprocessor()
    t2 = time.perf_counter()
    print('run_signalprocessor', t2 - t1)

    print(catalogueconstructor)

    fig, axs = plt.subplots(nrows=2)

    colors = ['r', 'g', 'b']
    for i, nb_max in enumerate([100, 1000, 10000]):
        t1 = time.perf_counter()
        catalogueconstructor.extract_some_waveforms(n_left=-20,
                                                    n_right=30,
                                                    nb_max=nb_max)
        t2 = time.perf_counter()
        print('extract_some_waveforms', nb_max, t2 - t1)
        print(catalogueconstructor.some_waveforms.shape)
        wf = catalogueconstructor.some_waveforms
        wf = wf.swapaxes(1, 2).reshape(wf.shape[0], -1)
        axs[0].plot(np.median(wf, axis=0),
                    color=colors[i],
                    label='nb_max {}'.format(nb_max))

        axs[1].plot(np.mean(wf, axis=0),
                    color=colors[i],
                    label='nb_max {}'.format(nb_max))

    axs[0].legend()
    axs[0].set_title('median')
    axs[1].set_title('mean')
    plt.show()
Esempio n. 28
0
def test_summary_catalogue_clusters():
    dataio = DataIO(dirname='test_report')

    #~ summary_catalogue_clusters(dataio, chan_grp=0)
    summary_catalogue_clusters(dataio, chan_grp=0, labels=[0])
Esempio n. 29
0
def debug_compare_peeler_engines():

    dataio = DataIO(dirname='test_peeler')
    print(dataio)

    engine_list = [
        ('classic argmin opencl', 'classic', {
            'argmin_method': 'opencl'
        }),
        ('classic argmin numba', 'classic', {
            'argmin_method': 'numba'
        }),
        ('geometrical argmin opencl', 'geometrical', {
            'argmin_method': 'opencl'
        }),
        ('geometrical argmin numba', 'geometrical', {
            'argmin_method': 'numba'
        }),
        ('geometrical_opencl', 'geometrical_opencl', {}),
    ]

    all_spikes = []
    for name, engine, kargs in engine_list:
        #~ print()
        #~ print(name)
        catalogue = dataio.load_catalogue(chan_grp=0, name='with_oversampling')

        peeler = Peeler(dataio)
        peeler.change_params(engine=engine,
                             catalogue=catalogue,
                             chunksize=1024,
                             **kargs)

        t1 = time.perf_counter()
        peeler.run(progressbar=False, duration=None)
        t2 = time.perf_counter()
        print(name, 'run', t2 - t1)

        spikes = dataio.get_spikes(chan_grp=0).copy()
        #~ print(spikes.size)
        all_spikes.append(spikes)

        #~ print(dataio.get_spikes(chan_grp=0).size)

    print()
    #~ all_spikes[0] = all_spikes[0][88+80:88+81+10]
    #~ all_spikes[1] = all_spikes[1][88+80:88+81+10]

    #~ all_spikes[0] = all_spikes[0][:88+81]
    #~ all_spikes[1] = all_spikes[1][:88+81]

    labels = catalogue['clusters']['cluster_label']

    for i, spikes in enumerate(all_spikes):
        name = engine_list[i][0]
        print()
        print(name)
        print(spikes[:10])
        print(spikes.size)

        count_by_label = [
            np.sum(spikes['cluster_label'] == label) for label in labels
        ]
        print(count_by_label)
Esempio n. 30
0
def test_generate_report():
    dataio = DataIO(dirname='test_report')

    generate_report(dataio)