Exemplo n.º 1
0
def test_peeler():
    dataio = DataIO(dirname = 'datatest')
    #~ dataio = DataIO(dirname = 'datatest_neo')
    
    sigs = dataio.get_signals(seg_num=0)
    
    #peak
    peakdetector = PeakDetector(sigs)
    peak_pos = peakdetector.detect_peaks(threshold=-4, peak_sign = '-', n_span = 5)
    
    #waveforms
    waveformextractor = WaveformExtractor(peakdetector, n_left=-30, n_right=50)
    limit_left, limit_right = waveformextractor.find_good_limits(mad_threshold = 1.1)
    #~ print(limit_left, limit_right)
    short_wf = waveformextractor.get_ajusted_waveforms()
    #~ print(short_wf.shape)
    
    #clustering
    clustering = Clustering(short_wf)
    features = clustering.project(method = 'pca', n_components = 4)
    clustering.find_clusters(8, order_clusters = True)
    catalogue = clustering.construct_catalogue()
    #~ clustering.plot_catalogue(sameax = False)
    #~ clustering.plot_catalogue(sameax = True)
    
    #~ clustering.merge_cluster(1, 2)
    catalogue = clustering.construct_catalogue()
    clustering.plot_catalogue(sameax = False)
    #~ clustering.plot_catalogue(sameax = True)
    
    
    #peeler
    signals = peakdetector.normed_sigs
    peeler = Peeler(signals, catalogue,  limit_left, limit_right,
                            threshold=-5., peak_sign = '-', n_span = 5)
    
    prediction0, residuals0 = peeler.peel()
    prediction1, residuals1 = peeler.peel()
    
    spiketrains = peeler.get_spiketrains()
    print(spiketrains)
    
    fig, axs = pyplot.subplots(nrows = 6, sharex = True)#, sharey = True)
    axs[0].plot(signals)
    axs[1].plot(prediction0) 
    axs[2].plot(residuals0)
    axs[3].plot(prediction1)
    axs[4].plot(residuals1)
    
    for i in range(5):
        axs[i].set_ylim(-25, 10)
    
    peeler.plot_spiketrains(ax = axs[5])
Exemplo n.º 2
0
def test_peeler():
    dataio = DataIO(dirname = 'datatest')
    sigs = dataio.get_signals(seg_num=0)
    
    #peak
    peakdetector = PeakDetector(sigs)
    peak_pos = peakdetector.detect_peaks(threshold=-4, peak_sign = '-', n_span = 5)
    
    #waveforms
    waveformextractor = WaveformExtractor(peakdetector, n_left=-30, n_right=50)
    limit_left, limit_right = waveformextractor.find_good_limits(mad_threshold = 1.1)
    #~ print(limit_left, limit_right)
    short_wf = waveformextractor.get_ajusted_waveforms(margin=2)
    #~ print(short_wf.shape)
    
    #clustering
    clustering = Clustering(short_wf)
    features = clustering.project(method = 'pca', n_components = 5)
    clustering.find_clusters(7)
    catalogue = clustering.construct_catalogue()
    
    clustering.plot_catalogue()
    
    #peeler
    signals = peakdetector.normed_sigs
    peeler = Peeler(signals, catalogue,  limit_left, limit_right,
                            threshold=-4, peak_sign = '-', n_span = 5)
    
    prediction0, residuals0 = peeler.peel()
    prediction1, residuals1 = peeler.peel()
    fig, axs = pyplot.subplots(nrows = 6, sharex = True)#, sharey = True)
    axs[0].plot(signals)
    axs[1].plot(prediction0) 
    axs[2].plot(residuals0)
    axs[3].plot(prediction1)
    axs[4].plot(residuals1)
    
    colors = sns.color_palette('husl', len(catalogue))
    spiketrains = peeler.get_spiketrains()
    i = 0
    for k , pos in spiketrains.items():
        axs[5].plot(pos, np.ones(pos.size)*k, ls = 'None', marker = '|',  markeredgecolor = colors[i], markersize = 10, markeredgewidth = 2)
        i += 1
    axs[5].set_ylim(0, len(catalogue))
Exemplo n.º 3
0
def apply_peeler():
    dataio = DataIO(dirname=dirname)
    catalogue = dataio.load_catalogue(chan_grp=0)
    peeler = Peeler(dataio)
    peeler.change_params(catalogue=catalogue, chunksize=1024)

    peeler.run(progressbar=True)
Exemplo n.º 4
0
def test_peeler_geometry():
    dataio = DataIO(dirname='test_peeler')

    catalogue0 = dataio.load_catalogue(chan_grp=0)
    catalogue1 = dataio.load_catalogue(chan_grp=0, name='with_oversampling')

    for catalogue in (catalogue0, catalogue1):
        print()
        print('engine=geometrical')
        print('inter_sample_oversampling',
              catalogue['inter_sample_oversampling'])
        peeler = Peeler(dataio)

        peeler.change_params(engine='geometrical',
                             catalogue=catalogue,
                             chunksize=1024,
                             argmin_method='numba')
        #~ argmin_method='opencl')

        t1 = time.perf_counter()
        peeler.run(progressbar=False)
        t2 = time.perf_counter()
        print('peeler.run_loop', t2 - t1)

        spikes = dataio.get_spikes(chan_grp=0).copy()
        labels = catalogue['clusters']['cluster_label']
        count_by_label = [
            np.sum(spikes['cluster_label'] == label) for label in labels
        ]
        print(labels)
        print(count_by_label)
Exemplo n.º 5
0
def test_peeler_several_chunksize():

    dataio = DataIO(dirname='test_peeler')
    print(dataio)
    catalogue = dataio.load_catalogue(chan_grp=0)

    all_spikes = []
    sig_length = dataio.get_segment_length(0)
    chunksizes = [174, 512, 1024, 1023, 10000, 150000]
    #~ chunksizes = [512, 1024,]
    for chunksize in chunksizes:
        print('**', chunksize, '**')
        peeler = Peeler(dataio)
        peeler.change_params(engine='geometrical',
                             catalogue=catalogue,
                             chunksize=chunksize,
                             argmin_method='numba')
        t1 = time.perf_counter()
        peeler.run(progressbar=False)
        t2 = time.perf_counter()
        print('extra_size', peeler.peeler_engine.extra_size, 'n_span',
              peeler.peeler_engine.n_span, 'peak_width',
              peeler.peeler_engine.peak_width)
        print('peeler.run_loop', t2 - t1)

        # copy is need because the memmap is reset at each loop
        spikes = dataio.get_spikes(seg_num=0, chan_grp=0).copy()
        all_spikes.append(spikes)
        print(spikes.size)

    # clip to last spike
    last = min([spikes[-1]['index'] for spikes in all_spikes])
    for i, chunksize in enumerate(chunksizes):
        spikes = all_spikes[i]
        all_spikes[i] = spikes[spikes['index'] <= last]

    previsous_spikes = None
    for i, chunksize in enumerate(chunksizes):
        print('**', chunksize, '**')
        spikes = all_spikes[i]
        is_sorted = np.all(np.diff(spikes['index']) >= 0)
        assert is_sorted

        labeled_spike = spikes[spikes['cluster_label'] >= 0]
        unlabeled_spike = spikes[spikes['cluster_label'] < 0]
        print('labeled_spike.size', labeled_spike.size, 'unlabeled_spike.size',
              unlabeled_spike.size)
        print(spikes)

        # TODO: Peeler chunksize influence the number of spikes

        if previsous_spikes is not None:
            assert previsous_spikes.size == spikes.size
            np.testing.assert_array_equal(previsous_spikes['index'],
                                          spikes['index'])
            np.testing.assert_array_equal(previsous_spikes['cluster_label'],
                                          spikes['cluster_label'])

        previsous_spikes = spikes
Exemplo n.º 6
0
def setup_module():
    setup_catalogue('test_report', dataset_name='striatum_rat')

    dataio = DataIO(dirname='test_report')
    initial_catalogue = dataio.load_catalogue(chan_grp=0)
    peeler = Peeler(dataio)
    peeler.change_params(catalogue=initial_catalogue, chunksize=1024)
    peeler.run(progressbar=False)
Exemplo n.º 7
0
def test_peeler():
    dataio = DataIO(dirname='test_peeler')
    initial_catalogue = dataio.load_catalogue(chan_grp=0)

    peeler = Peeler(dataio)
    
    peeler.change_params(engine='classic', catalogue=initial_catalogue,chunksize=1024)
    
    t1 = time.perf_counter()
    peeler.run(progressbar=False)
    t2 = time.perf_counter()
    print('peeler.run_loop', t2-t1)
Exemplo n.º 8
0
def test_peeler_several_chunksize():

    dataio = DataIO(dirname='test_peeler')
    print(dataio)
    catalogue = dataio.load_catalogue(chan_grp=0)

    all_spikes = []
    sig_length = dataio.get_segment_length(0)
    chunksizes = [174, 512, 1024, 1023, 10000, 150000]
    #~ chunksizes = [512, 1024,]
    for chunksize in chunksizes:
        print('**', chunksize, '**')
        peeler = Peeler(dataio)
        peeler.change_params(catalogue=catalogue, chunksize=chunksize)
        t1 = time.perf_counter()
        peeler.run_offline_loop_one_segment(seg_num=0, progressbar=False)
        t2 = time.perf_counter()
        print('n_side', peeler.n_side, 'n_span', peeler.n_span, 'peak_width',
              peeler.peak_width)
        print('peeler.run_loop', t2 - t1)

        spikes = dataio.get_spikes(seg_num=0, chan_grp=0)
        all_spikes.append(spikes)

    # clip to last spike
    last = min([spikes[-1]['index'] for spikes in all_spikes])
    for i, chunksize in enumerate(chunksizes):
        spikes = all_spikes[i]
        all_spikes[i] = spikes[spikes['index'] <= last]

    previsous_spikes = None
    for i, chunksize in enumerate(chunksizes):
        print('**', chunksize, '**')
        spikes = all_spikes[i]
        is_sorted = np.all(np.diff(spikes['index']) >= 0)
        assert is_sorted

        labeled_spike = spikes[spikes['cluster_label'] >= 0]
        unlabeled_spike = spikes[spikes['cluster_label'] < 0]
        print('labeled_spike.size', labeled_spike.size, 'unlabeled_spike.size',
              unlabeled_spike.size)

        if previsous_spikes is not None:
            assert previsous_spikes.size == spikes.size
            np.testing.assert_array_equal(previsous_spikes['index'],
                                          spikes['index'])
            np.testing.assert_array_equal(previsous_spikes['cluster_label'],
                                          spikes['cluster_label'])

        previsous_spikes = spikes
Exemplo n.º 9
0
def run_peeler(dirname, chan_grp):
    dataio = DataIO(dirname=dirname, ch_grp=chan_grp)
    initial_catalogue = dataio.load_catalogue(chan_grp=chan_grp)

    peeler = Peeler(dataio)
    peeler.change_params(catalogue=initial_catalogue,
                         chunksize=32768,
                         use_sparse_template=False,
                         sparse_threshold_mad=1.5,
                         use_opencl_with_sparse=False)

    t1 = time.perf_counter()
    peeler.run()
    t2 = time.perf_counter()
    print('peeler.run', t2 - t1)
Exemplo n.º 10
0
def run_peeler(dirname):
    dataio = DataIO(dirname=dirname)
    initial_catalogue = dataio.load_catalogue(chan_grp=0)

    peeler = Peeler(dataio)
    peeler.change_params(catalogue=initial_catalogue)

    t1 = time.perf_counter()
    peeler.run()
    t2 = time.perf_counter()
    print('peeler.run', t2 - t1)

    print()
    for seg_num in range(dataio.nb_segment):
        spikes = dataio.get_spikes(seg_num)
        print('seg_num', seg_num, 'nb_spikes', spikes.size)
Exemplo n.º 11
0
def test_peeler_sparse_opencl():
    dataio = DataIO(dirname='test_peeler')
    initial_catalogue = dataio.load_catalogue(chan_grp=0)

    peeler = Peeler(dataio)

    peeler.change_params(
        catalogue=initial_catalogue,
        chunksize=1024,
        use_sparse_template=True,
        sparse_threshold_mad=1.5,
        use_opencl_with_sparse=True,
    )

    t1 = time.perf_counter()
    peeler.run(progressbar=False)
    t2 = time.perf_counter()
    print('peeler.run_loop', t2 - t1)
Exemplo n.º 12
0
def test_peeler_with_and_without_preprocessor():

    if ON_CI_CLOUD:
        engines = ['geometrical']
    else:
        engines = ['geometrical', 'geometrical_opencl']

    #~ engines = ['geometrical_opencl']

    for engine in engines:
        for i in range(2):
            #~ for i in [1]:

            print()
            if i == 0:
                print(engine, 'without processing')
                dataio = DataIO(dirname='test_peeler')
            else:
                print(engine, 'with processing')
                dataio = DataIO(dirname='test_peeler2')

            catalogue = dataio.load_catalogue(chan_grp=0)

            peeler = Peeler(dataio)
            peeler.change_params(engine=engine,
                                 catalogue=catalogue,
                                 chunksize=1024)
            t1 = time.perf_counter()
            peeler.run(progressbar=False)
            t2 = time.perf_counter()
            print('peeler run_time', t2 - t1)
            spikes = dataio.get_spikes(chan_grp=0).copy()
            labels = catalogue['clusters']['cluster_label']
            count_by_label = [
                np.sum(spikes['cluster_label'] == label) for label in labels
            ]
            print(labels)
            print(count_by_label)
Exemplo n.º 13
0
def test_peeler_argmin_methods():
    dataio = DataIO(dirname='test_peeler')
    catalogue = dataio.load_catalogue(chan_grp=0, name='with_oversampling')

    argmin_methods = ['opencl', 'numba', 'pythran']
    #~ argmin_methods = ['opencl', 'pythran']

    for argmin_method in argmin_methods:

        peeler = Peeler(dataio)

        peeler.change_params(
            engine='classic',
            catalogue=catalogue,
            chunksize=1024,
            argmin_method=argmin_method,
            cl_platform_index=0,
            cl_device_index=0,
        )

        t1 = time.perf_counter()
        peeler.run(progressbar=False)
        t2 = time.perf_counter()
        print(argmin_method, 'peeler.run_loop', t2 - t1)
Exemplo n.º 14
0
def test_peeler_empty_catalogue():
    """
    This test peeler with empty catalogue.
    This is like a peak detector.
    Check several chunksize and compare to offline-one-buffer.
    
    """
    dataio = DataIO(dirname='test_peeler')
    #~ print(dataio)
    catalogue = dataio.load_catalogue(chan_grp=0)
    
    # empty catalogue for debug peak detection
    s = catalogue['centers0'].shape
    empty_centers = np.zeros((0, s[1], s[2]), dtype='float32')
    catalogue['centers0'] = empty_centers
    catalogue['centers1'] = empty_centers
    catalogue['centers2'] = empty_centers
    catalogue['cluster_labels'] = np.zeros(0, dtype=catalogue['cluster_labels'].dtype)
        
    
    sig_length = dataio.get_segment_length(0)
    chunksizes = [ 101, 174, 512, 1024, 1023, 10000, 150000]
    #~ chunksizes = [1024,]
    
    previous_peak = None
    
    for chunksize in chunksizes:
        print('**',  chunksize, '**')
        peeler = Peeler(dataio)
        peeler.change_params(engine='classic', catalogue=catalogue,chunksize=chunksize)
        t1 = time.perf_counter()
        #~ peeler.run(progressbar=False)
        peeler.run_offline_loop_one_segment(seg_num=0, progressbar=False)
        t2 = time.perf_counter()
        
        #~ print('n_side', peeler.n_side, 'n_span', peeler.n_span, 'peak_width', peeler.peak_width)
        #~ print('peeler.run_loop', t2-t1)
        
        spikes = dataio.get_spikes(seg_num=0, chan_grp=0)
        labeled_spike = spikes[spikes['cluster_label']>=0]
        unlabeled_spike = spikes[spikes['cluster_label']<0]
        assert labeled_spike.size == 0
        
        is_sorted = np.all(np.diff(unlabeled_spike['index'])>=0)
        assert is_sorted
        
        
        online_peaks = unlabeled_spike['index']
        engine = peeler.peeler_engine
        
        i_stop = sig_length-catalogue['signal_preprocessor_params']['lostfront_chunksize']-engine.n_side+engine.n_span
        sigs = dataio.get_signals_chunk(signal_type='processed', i_stop=i_stop)
        offline_peaks  = detect_peaks_in_chunk(sigs, engine.n_span, engine.relative_threshold, engine.peak_sign)
        
        offline_peaks  = offline_peaks[offline_peaks<=online_peaks[-1]]
        
        assert offline_peaks.size == online_peaks.size
        np.testing.assert_array_equal(offline_peaks, online_peaks)
        
        if previous_peak is not None:
            last = min(previous_peak[-1], online_peaks[-1])
            previous_peak = previous_peak[previous_peak<=last]
            online_peaks_cliped = online_peaks[online_peaks<=last]
            assert  previous_peak.size == online_peaks_cliped.size
            np.testing.assert_array_equal(previous_peak, online_peaks_cliped)
        
        previous_peak = online_peaks
Exemplo n.º 15
0
def debug_compare_peeler_engines():

    dataio = DataIO(dirname='test_peeler')
    print(dataio)

    engine_list = [
        ('classic argmin opencl', 'classic', {
            'argmin_method': 'opencl'
        }),
        ('classic argmin numba', 'classic', {
            'argmin_method': 'numba'
        }),
        ('geometrical argmin opencl', 'geometrical', {
            'argmin_method': 'opencl'
        }),
        ('geometrical argmin numba', 'geometrical', {
            'argmin_method': 'numba'
        }),
        ('geometrical_opencl', 'geometrical_opencl', {}),
    ]

    all_spikes = []
    for name, engine, kargs in engine_list:
        #~ print()
        #~ print(name)
        catalogue = dataio.load_catalogue(chan_grp=0, name='with_oversampling')

        peeler = Peeler(dataio)
        peeler.change_params(engine=engine,
                             catalogue=catalogue,
                             chunksize=1024,
                             **kargs)

        t1 = time.perf_counter()
        peeler.run(progressbar=False, duration=None)
        t2 = time.perf_counter()
        print(name, 'run', t2 - t1)

        spikes = dataio.get_spikes(chan_grp=0).copy()
        #~ print(spikes.size)
        all_spikes.append(spikes)

        #~ print(dataio.get_spikes(chan_grp=0).size)

    print()
    #~ all_spikes[0] = all_spikes[0][88+80:88+81+10]
    #~ all_spikes[1] = all_spikes[1][88+80:88+81+10]

    #~ all_spikes[0] = all_spikes[0][:88+81]
    #~ all_spikes[1] = all_spikes[1][:88+81]

    labels = catalogue['clusters']['cluster_label']

    for i, spikes in enumerate(all_spikes):
        name = engine_list[i][0]
        print()
        print(name)
        print(spikes[:10])
        print(spikes.size)

        count_by_label = [
            np.sum(spikes['cluster_label'] == label) for label in labels
        ]
        print(count_by_label)