Пример #1
0
def debug_compare_peeler():

    dataio = DataIO(dirname='test_peeler')
    print(dataio)
    
    all_spikes = []
    #~ for peeler_class in [Peeler,]:
    #~ for peeler_class in [Peeler_OpenCl,]:
    for peeler_class in [Peeler, Peeler_OpenCl]:
        print()
        print(peeler_class)
        initial_catalogue = dataio.load_catalogue(chan_grp=0)
        
        peeler = peeler_class(dataio)
        
        peeler.change_params(catalogue=initial_catalogue,  chunksize=1024)
        
        t1 = time.perf_counter()
        #~ peeler.run_offline_loop_one_segment(duration=None, progressbar=False)
        peeler.run_offline_loop_one_segment(duration=4., progressbar=False)
        t2 = time.perf_counter()
        print('peeler.run_loop', t2-t1)
        
        
        all_spikes.append(dataio.get_spikes(chan_grp=0).copy())
Пример #2
0
def test_peeler_geometry():
    dataio = DataIO(dirname='test_peeler')

    catalogue0 = dataio.load_catalogue(chan_grp=0)
    catalogue1 = dataio.load_catalogue(chan_grp=0, name='with_oversampling')

    for catalogue in (catalogue0, catalogue1):
        print()
        print('engine=geometrical')
        print('inter_sample_oversampling',
              catalogue['inter_sample_oversampling'])
        peeler = Peeler(dataio)

        peeler.change_params(engine='geometrical',
                             catalogue=catalogue,
                             chunksize=1024,
                             argmin_method='numba')
        #~ argmin_method='opencl')

        t1 = time.perf_counter()
        peeler.run(progressbar=False)
        t2 = time.perf_counter()
        print('peeler.run_loop', t2 - t1)

        spikes = dataio.get_spikes(chan_grp=0).copy()
        labels = catalogue['clusters']['cluster_label']
        count_by_label = [
            np.sum(spikes['cluster_label'] == label) for label in labels
        ]
        print(labels)
        print(count_by_label)
Пример #3
0
def test_peeler_several_chunksize():

    dataio = DataIO(dirname='test_peeler')
    print(dataio)
    catalogue = dataio.load_catalogue(chan_grp=0)

    all_spikes = []
    sig_length = dataio.get_segment_length(0)
    chunksizes = [174, 512, 1024, 1023, 10000, 150000]
    #~ chunksizes = [512, 1024,]
    for chunksize in chunksizes:
        print('**', chunksize, '**')
        peeler = Peeler(dataio)
        peeler.change_params(engine='geometrical',
                             catalogue=catalogue,
                             chunksize=chunksize,
                             argmin_method='numba')
        t1 = time.perf_counter()
        peeler.run(progressbar=False)
        t2 = time.perf_counter()
        print('extra_size', peeler.peeler_engine.extra_size, 'n_span',
              peeler.peeler_engine.n_span, 'peak_width',
              peeler.peeler_engine.peak_width)
        print('peeler.run_loop', t2 - t1)

        # copy is need because the memmap is reset at each loop
        spikes = dataio.get_spikes(seg_num=0, chan_grp=0).copy()
        all_spikes.append(spikes)
        print(spikes.size)

    # clip to last spike
    last = min([spikes[-1]['index'] for spikes in all_spikes])
    for i, chunksize in enumerate(chunksizes):
        spikes = all_spikes[i]
        all_spikes[i] = spikes[spikes['index'] <= last]

    previsous_spikes = None
    for i, chunksize in enumerate(chunksizes):
        print('**', chunksize, '**')
        spikes = all_spikes[i]
        is_sorted = np.all(np.diff(spikes['index']) >= 0)
        assert is_sorted

        labeled_spike = spikes[spikes['cluster_label'] >= 0]
        unlabeled_spike = spikes[spikes['cluster_label'] < 0]
        print('labeled_spike.size', labeled_spike.size, 'unlabeled_spike.size',
              unlabeled_spike.size)
        print(spikes)

        # TODO: Peeler chunksize influence the number of spikes

        if previsous_spikes is not None:
            assert previsous_spikes.size == spikes.size
            np.testing.assert_array_equal(previsous_spikes['index'],
                                          spikes['index'])
            np.testing.assert_array_equal(previsous_spikes['cluster_label'],
                                          spikes['cluster_label'])

        previsous_spikes = spikes
Пример #4
0
def open_PeelerWindow():
    dataio = DataIO(dirname='test_peeler')
    initial_catalogue = dataio.load_catalogue(chan_grp=0)

    app = pg.mkQApp()
    win = PeelerWindow(dataio=dataio, catalogue=initial_catalogue)
    win.show()
    app.exec_()
Пример #5
0
def setup_module():
    setup_catalogue('test_report', dataset_name='striatum_rat')

    dataio = DataIO(dirname='test_report')
    initial_catalogue = dataio.load_catalogue(chan_grp=0)
    peeler = Peeler(dataio)
    peeler.change_params(catalogue=initial_catalogue, chunksize=1024)
    peeler.run(progressbar=False)
def test_make_catalogue():
    if os.path.exists('test_catalogueconstructor'):
        shutil.rmtree('test_catalogueconstructor')
        
    dataio = DataIO(dirname='test_catalogueconstructor')
    localdir, filenames, params = download_dataset(name='olfactory_bulb')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    #~ dataio.set_manual_channel_group(range(14))
    dataio.set_manual_channel_group([5, 6, 7, 8, 9])

    catalogueconstructor = CatalogueConstructor(dataio=dataio)

    catalogueconstructor.set_preprocessor_params(chunksize=1024,
            
                                    #signal preprocessor
                                    highpass_freq=300,
                                    backward_chunksize=1280,
                                    
                                    #peak detector
                                    peak_sign='-', relative_threshold=7, peak_span=0.0005,
                                    )
    
    t1 = time.perf_counter()
    catalogueconstructor.estimate_signals_noise(seg_num=0, duration=10.)
    t2 = time.perf_counter()
    print('estimate_signals_noise', t2-t1)
    
    t1 = time.perf_counter()
    catalogueconstructor.run_signalprocessor()
    t2 = time.perf_counter()
    print('run_signalprocessor', t2-t1)

    print(catalogueconstructor)
    
    t1 = time.perf_counter()
    catalogueconstructor.extract_some_waveforms(n_left=-12, n_right=15,  nb_max=10000)
    t2 = time.perf_counter()
    print('extract_some_waveforms', t2-t1)


    # PCA
    t1 = time.perf_counter()
    catalogueconstructor.project(method='pca', n_components=12, batch_size=16384)
    t2 = time.perf_counter()
    print('project', t2-t1)
    
    # cluster
    t1 = time.perf_counter()
    catalogueconstructor.find_clusters(method='kmeans', n_clusters=13)
    t2 = time.perf_counter()
    print('find_clusters', t2-t1)
    
    # trash_small_cluster
    catalogueconstructor.trash_small_cluster()
    
    catalogueconstructor.make_catalogue()
Пример #7
0
def test_get_auto_params_for_catalogue():
    if os.path.exists('test_cataloguetools'):
        shutil.rmtree('test_cataloguetools')
        
    dataio = DataIO(dirname='test_cataloguetools')
    #~ localdir, filenames, params = download_dataset(name='olfactory_bulb')
    localdir, filenames, params = download_dataset(name='locust')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    
    params = get_auto_params_for_catalogue(dataio)
    pprint(params)
Пример #8
0
def test_peeler():
    dataio = DataIO(dirname='test_peeler')
    initial_catalogue = dataio.load_catalogue(chan_grp=0)

    peeler = Peeler(dataio)
    
    peeler.change_params(engine='classic', catalogue=initial_catalogue,chunksize=1024)
    
    t1 = time.perf_counter()
    peeler.run(progressbar=False)
    t2 = time.perf_counter()
    print('peeler.run_loop', t2-t1)
def compare_nb_waveforms():
    if os.path.exists('test_catalogueconstructor'):
        shutil.rmtree('test_catalogueconstructor')
        
    dataio = DataIO(dirname='test_catalogueconstructor')
    localdir, filenames, params = download_dataset(name='olfactory_bulb')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    dataio.set_manual_channel_group(range(14))

    catalogueconstructor = CatalogueConstructor(dataio=dataio)

    catalogueconstructor.set_preprocessor_params(chunksize=1024,
            
                                #signal preprocessor
                                highpass_freq=300,
                                backward_chunksize=1280,
                                
                                #peak detector
                                peak_sign='-', relative_threshold=7, peak_span=0.0005,
                                )
    
    t1 = time.perf_counter()
    catalogueconstructor.estimate_signals_noise(seg_num=0, duration=10.)
    t2 = time.perf_counter()
    print('estimate_signals_noise', t2-t1)
    
    t1 = time.perf_counter()
    catalogueconstructor.run_signalprocessor()
    t2 = time.perf_counter()
    print('run_signalprocessor', t2-t1)
    
    print(catalogueconstructor)
    
    fig, axs = pyplot.subplots(nrows=2)
    
    colors = ['r', 'g', 'b']
    for i, nb_max in enumerate([100, 1000, 10000]):
        t1 = time.perf_counter()
        catalogueconstructor.extract_some_waveforms(n_left=-20, n_right=30,  nb_max=nb_max)
        t2 = time.perf_counter()
        print('extract_some_waveforms', nb_max,  t2-t1)
        print(catalogueconstructor.some_waveforms.shape)
        wf = catalogueconstructor.some_waveforms
        wf = wf.swapaxes(1,2).reshape(wf.shape[0], -1)
        axs[0].plot(np.median(wf, axis=0), color=colors[i], label='nb_max {}'.format(nb_max))
        
        axs[1].plot(np.mean(wf, axis=0), color=colors[i], label='nb_max {}'.format(nb_max))
    
    axs[0].legend()
    axs[0].set_title('median')
    axs[1].set_title('mean')
    pyplot.show()        
Пример #10
0
def test_apply_all_catalogue_steps():
    if os.path.exists('test_cataloguetools'):
        shutil.rmtree('test_cataloguetools')
        
    dataio = DataIO(dirname='test_cataloguetools')
    #~ localdir, filenames, params = download_dataset(name='olfactory_bulb')
    localdir, filenames, params = download_dataset(name='locust')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    
    params = get_auto_params_for_catalogue(dataio)
    
    cc = CatalogueConstructor(dataio, chan_grp=0)
    apply_all_catalogue_steps(cc, params, verbose=True)
Пример #11
0
def test_peeler_several_chunksize():

    dataio = DataIO(dirname='test_peeler')
    print(dataio)
    catalogue = dataio.load_catalogue(chan_grp=0)

    all_spikes = []
    sig_length = dataio.get_segment_length(0)
    chunksizes = [174, 512, 1024, 1023, 10000, 150000]
    #~ chunksizes = [512, 1024,]
    for chunksize in chunksizes:
        print('**', chunksize, '**')
        peeler = Peeler(dataio)
        peeler.change_params(catalogue=catalogue, chunksize=chunksize)
        t1 = time.perf_counter()
        peeler.run_offline_loop_one_segment(seg_num=0, progressbar=False)
        t2 = time.perf_counter()
        print('n_side', peeler.n_side, 'n_span', peeler.n_span, 'peak_width',
              peeler.peak_width)
        print('peeler.run_loop', t2 - t1)

        spikes = dataio.get_spikes(seg_num=0, chan_grp=0)
        all_spikes.append(spikes)

    # clip to last spike
    last = min([spikes[-1]['index'] for spikes in all_spikes])
    for i, chunksize in enumerate(chunksizes):
        spikes = all_spikes[i]
        all_spikes[i] = spikes[spikes['index'] <= last]

    previsous_spikes = None
    for i, chunksize in enumerate(chunksizes):
        print('**', chunksize, '**')
        spikes = all_spikes[i]
        is_sorted = np.all(np.diff(spikes['index']) >= 0)
        assert is_sorted

        labeled_spike = spikes[spikes['cluster_label'] >= 0]
        unlabeled_spike = spikes[spikes['cluster_label'] < 0]
        print('labeled_spike.size', labeled_spike.size, 'unlabeled_spike.size',
              unlabeled_spike.size)

        if previsous_spikes is not None:
            assert previsous_spikes.size == spikes.size
            np.testing.assert_array_equal(previsous_spikes['index'],
                                          spikes['index'])
            np.testing.assert_array_equal(previsous_spikes['cluster_label'],
                                          spikes['cluster_label'])

        previsous_spikes = spikes
Пример #12
0
def debug_interp_centers0():
    dataio = DataIO(dirname='test_catalogueconstructor')
    catalogueconstructor = CatalogueConstructor(dataio=dataio)
    catalogue = catalogueconstructor.make_catalogue()
    centers = catalogue['centers0']
    interp_centers = catalogue['interp_centers0']
    subsample_ratio = catalogue['subsample_ratio']
Пример #13
0
def test_pruningshears():

    dirname = 'test_cluster'
    
    
    dataio = DataIO(dirname=dirname)
    print(dataio)
    cc = CatalogueConstructor(dataio=dataio)
    #~ print(cc.mode)
    #~ exit()
    
    #~ cc.extract_some_features(method='pca_by_channel')
    #~ print(dataio)
    #~ print(cc)
    
    if dataset_name == 'olfactory_bulb':
        kargs = dict(adjacency_radius_um = 420)
    else:
        kargs = {}
    
    t0 = time.perf_counter()
    #~ cc.find_clusters(method='pruningshears', print_debug=True)
    #~ cc.find_clusters(method='pruningshears', print_debug=True, debug_plot=True, **kargs)
    cc.find_clusters(method='pruningshears', print_debug=False, debug_plot=False, **kargs)
    t1 = time.perf_counter()
    print('cluster', t1-t0)

    if __name__ == '__main__':
        app = mkQApp()
        win = CatalogueWindow(cc)
        win.show()
        app.exec_()
Пример #14
0
def test_make_catalogue():
    dataio = DataIO(dirname='test_catalogueconstructor')

    cc = CatalogueConstructor(dataio=dataio)

    #~ cc.make_catalogue()
    cc.make_catalogue_for_peeler()
Пример #15
0
def open_catalogue_window():
    dataio = DataIO(dirname='test_peeler')
    catalogueconstructor = CatalogueConstructor(dataio=dataio)
    app = pg.mkQApp()
    win = CatalogueWindow(catalogueconstructor)
    win.show()
    app.exec_()
Пример #16
0
def test_sawchaincut():
    #~ dirname = 'test_catalogueconstructor'
    #~ dirname = '/home/samuel/Documents/projet/tridesclous/example/tridesclous_locust/'

    #~ dirname = '/home/samuel/Documents/projet/DataSpikeSorting/GT 252/tdc_20170623_patch1/'
    #~ dirname = '/home/samuel/Documents/projet/tridesclous/example/tridesclous_locust/'
    #~ dirname = '/home/samuel/Documents/projet/tridesclous/example/tridesclous_olfactory_bulb/'
    #~ dirname = '/home/samuel/Documents/projet/tridesclous/example/tridesclous_olfactory_bulb/'
    #~ dirname = '/home/samuel/Documents/projet/DataSpikeSorting/kampff/tdc_2015_09_03_Cell9.0/'
    #~ dirname = '/home/samuel/Documents/projet/DataSpikeSorting/spikesortingtest/tdc_silico_0/'
    dirname = '/home/samuel/Documents/projet/tridesclous/example/tridesclous_purkinje/'

    dataio = DataIO(dirname=dirname)
    cc = catalogueconstructor = CatalogueConstructor(dataio=dataio)
    print(dataio)
    print(cc)

    t0 = time.perf_counter()
    cc.find_clusters(method='sawchaincut')
    t1 = time.perf_counter()
    print('cluster', t1 - t0)
    #~ exit()

    print(cc)

    app = mkQApp()
    win = CatalogueWindow(catalogueconstructor)
    win.show()

    app.exec_()
Пример #17
0
def test_summary_after_peeler_clusters():
    dataio = DataIO(dirname='test_report')
    summary_after_peeler_clusters(dataio, chan_grp=0, labels=[0])

    summary_after_peeler_clusters(dataio,
                                  chan_grp=0,
                                  labels=[0],
                                  neighborhood_radius=200)
Пример #18
0
def test_peeler_sparse_opencl():
    dataio = DataIO(dirname='test_peeler')
    initial_catalogue = dataio.load_catalogue(chan_grp=0)

    peeler = Peeler(dataio)

    peeler.change_params(
        catalogue=initial_catalogue,
        chunksize=1024,
        use_sparse_template=True,
        sparse_threshold_mad=1.5,
        use_opencl_with_sparse=True,
    )

    t1 = time.perf_counter()
    peeler.run(progressbar=False)
    t2 = time.perf_counter()
    print('peeler.run_loop', t2 - t1)
Пример #19
0
def test_feature_with_lda_selection():
    dataio = DataIO(dirname='test_catalogueconstructor')
    cc = CatalogueConstructor(dataio=dataio)
    print(cc)

    selection = np.in1d(cc.all_peaks['cluster_label'], [1, 2, 3, 4])
    print(np.sum(selection), '/', cc.all_peaks.size)

    cc.extract_some_features(method='global_lda', selection=selection)
    print(cc.some_features.shape)
    print(cc.some_features)
Пример #20
0
def test_trash_small_cluster():
    dirname = 'test_cleancluster'

    restore_savepoint(dirname, savepoint='after_trash_low_extremum')

    dataio = DataIO(dirname=dirname)
    cc = CatalogueConstructor(dataio=dataio)

    t1 = time.perf_counter()
    cc.trash_small_cluster()
    t2 = time.perf_counter()
    print('trash_small_cluster', t2 - t1)
Пример #21
0
def test_all_decomposition():
    dirname = 'test_catalogueconstructor'
    
    dataio = DataIO(dirname=dirname)
    cc = catalogueconstructor = CatalogueConstructor(dataio=dataio)
    print(dataio)
    print(cc)
    
    methods = ['global_pca', 'pca_by_channel', 'peak_max','neighborhood_pca' ] #'neighborhood_pca', 'tsne', 'pca_by_channel_then_tsne'
    for method in methods:
        t0 = time.perf_counter()
        cc.extract_some_features(method=method)
        t1 = time.perf_counter()
        print('extract_some_features', method, t1-t0)
Пример #22
0
def test_plot_signals():
    dataio = DataIO('test_matplotlibplot')
    catalogueconstructor = CatalogueConstructor(dataio=dataio, chan_grp=0)

    plot_signals(dataio, signal_type='initial')
    plot_signals(dataio, signal_type='processed')
    plot_signals(catalogueconstructor,
                 signal_type='processed',
                 with_peaks=True,
                 time_slice=(2., 3))
    plot_signals(catalogueconstructor,
                 signal_type='processed',
                 with_span=True,
                 time_slice=(2., 3))
Пример #23
0
def test_auto_merge():
    dirname = 'test_cleancluster'

    restore_savepoint(dirname, savepoint='after_trash_not_aligned')

    dataio = DataIO(dirname=dirname)
    cc = CatalogueConstructor(dataio=dataio)

    t1 = time.perf_counter()
    cc.auto_merge_cluster()
    t2 = time.perf_counter()
    print('auto_merge_cluster', t2 - t1)

    cc.create_savepoint(name='after_auto_merge_cluster')
Пример #24
0
def setup_catalogue(dirname, dataset_name='olfactory_bulb'):
    if os.path.exists(dirname):
        shutil.rmtree(dirname)
        
    dataio = DataIO(dirname=dirname)
    localdir, filenames, params = download_dataset(name=dataset_name)
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    
    if dataset_name=='olfactory_bulb':
        channels = [5, 6, 7, 8, 9]
    else:
        channels = [0,1,2,3]
    dataio.add_one_channel_group(channels=channels)
    
    
    catalogueconstructor = CatalogueConstructor(dataio=dataio)
    
    
    params = {
        'duration' : 60.,
        'preprocessor' : {
            'highpass_freq' : 300.,
            'chunksize' : 1024,
            'lostfront_chunksize' : 100,
        },
        'peak_detector' : {
            'peak_sign' : '-',
            'relative_threshold' : 7.,
            'peak_span_ms' : 0.5,
        },
        'extract_waveforms' : {
            'wf_left_ms' : -2.5,
            'wf_right_ms' : 4.0,
            'nb_max' : 10000,
        },
        'clean_waveforms' : {
            'alien_value_threshold' : 60.,
        },
        'noise_snippet' : {
            'nb_snippet' : 300,
        },
        'feature_method': 'global_pca', 
        'feature_kargs':{'n_components': 5},
        'cluster_method' : 'kmeans', 
        'cluster_kargs' : {'n_clusters': 12},
        'clean_cluster' : False,
        'clean_cluster_kargs' : {},
    }
    
    apply_all_catalogue_steps(catalogueconstructor, params, verbose=True)
        
    catalogueconstructor.trash_small_cluster()
    
    catalogueconstructor.order_clusters(by='waveforms_rms')
    
    
    catalogueconstructor.make_catalogue_for_peeler()
Пример #25
0
def test_peeler_argmin_methods():
    dataio = DataIO(dirname='test_peeler')
    catalogue = dataio.load_catalogue(chan_grp=0, name='with_oversampling')

    argmin_methods = ['opencl', 'numba', 'pythran']
    #~ argmin_methods = ['opencl', 'pythran']

    for argmin_method in argmin_methods:

        peeler = Peeler(dataio)

        peeler.change_params(
            engine='classic',
            catalogue=catalogue,
            chunksize=1024,
            argmin_method=argmin_method,
            cl_platform_index=0,
            cl_device_index=0,
        )

        t1 = time.perf_counter()
        peeler.run(progressbar=False)
        t2 = time.perf_counter()
        print(argmin_method, 'peeler.run_loop', t2 - t1)
Пример #26
0
def test_pruningshears():

    dirname = 'test_cluster'

    dataio = DataIO(dirname=dirname)
    print(dataio)
    cc = CatalogueConstructor(dataio=dataio)

    cc.extract_some_features(method='pca_by_channel')
    #~ print(dataio)
    #~ print(cc)

    t0 = time.perf_counter()
    cc.find_clusters(method='pruningshears', print_debug=True)
    t1 = time.perf_counter()
    print('cluster', t1 - t0)
Пример #27
0
def test_auto_split():
    dirname = 'test_cleancluster'

    restore_savepoint(dirname, savepoint='after_find_clusters')

    dataio = DataIO(dirname=dirname)
    cc = CatalogueConstructor(dataio=dataio)

    cc.find_clusters(method='pruningshears')
    print(cc)
    print(cc.n_jobs)
    t1 = time.perf_counter()
    cc.auto_split_cluster()
    t2 = time.perf_counter()
    print('auto_split_cluster', t2 - t1)

    print(cc)

    cc.create_savepoint(name='after_auto_split')
Пример #28
0
def debug_one_decomposition():
    dirname = 'test_catalogueconstructor'

    dataio = DataIO(dirname=dirname)
    cc = catalogueconstructor = CatalogueConstructor(dataio=dataio)
    print(dataio)
    print(cc)

    t0 = time.perf_counter()
    #~ cc.extract_some_features(method='global_pca', n_components=7)
    #~ cc.extract_some_features(method='peak_max')
    #~ cc.extract_some_features(method='pca_by_channel', n_components_by_channel=3)
    cc.extract_some_features(method='neighborhood_pca',
                             n_components_by_neighborhood=3,
                             radius_um=500)

    print(cc.channel_to_features)
    print(cc.channel_to_features.shape)
    t1 = time.perf_counter()
    print('extract_some_features', t1 - t0)
Пример #29
0
def test_sawchaincut():
    dirname = 'test_cluster'

    dataio = DataIO(dirname=dirname)
    cc = CatalogueConstructor(dataio=dataio)
    #~ print(dataio)
    #~ print(cc)

    t0 = time.perf_counter()
    cc.find_clusters(method='sawchaincut', print_debug=True)
    t1 = time.perf_counter()
    print('cluster', t1 - t0)
    #~ exit()

    #~ print(cc)

    if __name__ == '__main__':
        app = mkQApp()
        win = CatalogueWindow(cc)
        win.show()
        app.exec_()
Пример #30
0
def test_pruningshears():

    dirname = 'test_cluster'

    dataio = DataIO(dirname=dirname)
    print(dataio)
    cc = CatalogueConstructor(dataio=dataio)

    #~ cc.extract_some_features(method='pca_by_channel')
    #~ print(dataio)
    #~ print(cc)

    t0 = time.perf_counter()
    cc.find_clusters(method='pruningshears', print_debug=True)
    t1 = time.perf_counter()
    print('cluster', t1 - t0)

    if __name__ == '__main__':
        app = mkQApp()
        win = CatalogueWindow(cc)
        win.show()
        app.exec_()
Пример #31
0
def setup_catalogue():
    if os.path.exists('test_peeler'):
        shutil.rmtree('test_peeler')

    dataio = DataIO(dirname='test_peeler')
    localdir, filenames, params = download_dataset(name='olfactory_bulb')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    dataio.add_one_channel_group(channels=[5, 6, 7, 8, 9])

    catalogueconstructor = CatalogueConstructor(dataio=dataio)

    fullchain_kargs = {
        'duration': 60.,
        'preprocessor': {
            'highpass_freq': 300.,
            'chunksize': 1024,
            'lostfront_chunksize': 100,
        },
        'peak_detector': {
            'peak_sign': '-',
            'relative_threshold': 7.,
            'peak_span': 0.0005,
            #~ 'peak_span' : 0.000,
        },
        'extract_waveforms': {
            'n_left': -25,
            'n_right': 40,
            'nb_max': 10000,
        },
        'clean_waveforms': {
            'alien_value_threshold': 60.,
        },
        'noise_snippet': {
            'nb_snippet': 300,
        },
    }

    apply_all_catalogue_steps(catalogueconstructor,
                              fullchain_kargs,
                              'global_pca', {'n_components': 12},
                              'kmeans', {'n_clusters': 12},
                              verbose=True)
    catalogueconstructor.trash_small_cluster()

    catalogueconstructor.make_catalogue_for_peeler()
Пример #32
0
def test_peeler_with_and_without_preprocessor():

    if ON_CI_CLOUD:
        engines = ['geometrical']
    else:
        engines = ['geometrical', 'geometrical_opencl']

    #~ engines = ['geometrical_opencl']

    for engine in engines:
        for i in range(2):
            #~ for i in [1]:

            print()
            if i == 0:
                print(engine, 'without processing')
                dataio = DataIO(dirname='test_peeler')
            else:
                print(engine, 'with processing')
                dataio = DataIO(dirname='test_peeler2')

            catalogue = dataio.load_catalogue(chan_grp=0)

            peeler = Peeler(dataio)
            peeler.change_params(engine=engine,
                                 catalogue=catalogue,
                                 chunksize=1024)
            t1 = time.perf_counter()
            peeler.run(progressbar=False)
            t2 = time.perf_counter()
            print('peeler run_time', t2 - t1)
            spikes = dataio.get_spikes(chan_grp=0).copy()
            labels = catalogue['clusters']['cluster_label']
            count_by_label = [
                np.sum(spikes['cluster_label'] == label) for label in labels
            ]
            print(labels)
            print(count_by_label)
def test_catalogue_constructor():
    if os.path.exists('test_catalogueconstructor'):
        shutil.rmtree('test_catalogueconstructor')
        
    dataio = DataIO(dirname='test_catalogueconstructor')
    localdir, filenames, params = download_dataset(name='olfactory_bulb')
    dataio.set_data_source(type='RawData', filenames=filenames, **params)
    
    channels=range(14)
    dataio.set_manual_channel_group(channels, chan_grp=0)
    
    catalogueconstructor = CatalogueConstructor(dataio=dataio)
    
    for memory_mode in ['ram', 'memmap']:
    #~ for memory_mode in ['memmap']:
    
        print()
        print(memory_mode)
        catalogueconstructor.set_preprocessor_params(chunksize=1024,
                memory_mode=memory_mode,
                
                #signal preprocessor
                highpass_freq=300,
                backward_chunksize=1280,
                #~ backward_chunksize=1024*2,
                
                #peak detector
                peakdetector_engine='numpy',
                peak_sign='-', relative_threshold=7, peak_span=0.0005,
                
                #waveformextractor
                #~ n_left=-20, n_right=30, 
                
                )
        t1 = time.perf_counter()
        catalogueconstructor.estimate_signals_noise(seg_num=0, duration=10.)
        t2 = time.perf_counter()
        print('estimate_signals_noise', t2-t1)
        
        t1 = time.perf_counter()
        for seg_num in range(dataio.nb_segment):
            #~ print('seg_num', seg_num)
            catalogueconstructor.run_signalprocessor_loop_one_segment(seg_num=seg_num, duration=10.)
        t2 = time.perf_counter()
        print('run_signalprocessor_loop', t2-t1)

        t1 = time.perf_counter()
        catalogueconstructor.finalize_signalprocessor_loop()
        t2 = time.perf_counter()
        print('finalize_signalprocessor_loop', t2-t1)
        
        for seg_num in range(dataio.nb_segment):
            mask = catalogueconstructor.all_peaks['segment']==seg_num
            print('seg_num', seg_num, np.sum(mask))
        
        
        t1 = time.perf_counter()
        catalogueconstructor.extract_some_waveforms(n_left=-25, n_right=40, mode='rand', nb_max=5000)
        t2 = time.perf_counter()
        print('extract_some_waveforms rand', t2-t1)
        print(catalogueconstructor.some_waveforms.shape)

        t1 = time.perf_counter()
        catalogueconstructor.find_good_limits()
        t2 = time.perf_counter()
        print('find_good_limits', t2-t1)
        print(catalogueconstructor.some_waveforms.shape)

        t1 = time.perf_counter()
        catalogueconstructor.extract_some_waveforms(n_left=None, n_right=None, mode='rand', nb_max=2000)
        t2 = time.perf_counter()
        print('extract_some_waveforms rand', t2-t1)
        print(catalogueconstructor.some_waveforms.shape)


        #~ break


        
        # PCA
        t1 = time.perf_counter()
        catalogueconstructor.project(method='pca', n_components=7, batch_size=16384)
        t2 = time.perf_counter()
        print('project pca', t2-t1)

        # peak_max
        #~ t1 = time.perf_counter()
        #~ catalogueconstructor.project(method='peak_max')
        #~ t2 = time.perf_counter()
        #~ print('project peak_max', t2-t1)
        #~ print(catalogueconstructor.some_features.shape)

        t1 = time.perf_counter()
        catalogueconstructor.extract_some_waveforms(index=np.arange(1000))
        t2 = time.perf_counter()
        print('extract_some_waveforms others', t2-t1)
        print(catalogueconstructor.some_waveforms.shape)

        
        # cluster
        t1 = time.perf_counter()
        catalogueconstructor.find_clusters(method='kmeans', n_clusters=11)
        t2 = time.perf_counter()
        print('find_clusters', t2-t1)