Beispiel #1
0
 def test_write(self):
     sp_dict = {'data': self.data, 'FS': self.sampfreq}
     spt_dict = {'data': self.spt}
     self.filter = PyTablesFilter("test2.h5")
     self.filter.write_sp(sp_dict, self.el_node + "/raw")
     self.filter.write_spt(spt_dict, self.cell_node)
     self.filter.close()
     exit_code = os.system('h5diff ' + self.fname + ' test2.h5')
     os.unlink("test2.h5")
     ok_(exit_code == 0)
Beispiel #2
0
class TestHDF:
    def setUp(self):
        self.data = np.random.randint(1000, size=(4, 100))
        self.spt = np.random.randint(0, 100, (10, )) / 200.
        self.el_node = '/Subject/Session/Electrode'
        self.fname = 'test.h5'
        self.cell_node = self.el_node + '/cell'
        self.h5f = tables.openFile(self.fname, 'a')

        self.spt.sort()
        atom = tables.Atom.from_dtype(self.data.dtype)
        shape = self.data.shape
        filter = tables.Filters(complevel=0, complib='zlib')
        new_array = self.h5f.createCArray(self.el_node,
                                          "raw",
                                          atom,
                                          shape,
                                          filters=filter,
                                          createparents=True)
        self.sampfreq = 5.E3
        new_array.attrs['sampfreq'] = self.sampfreq
        new_array[:] = self.data
        spt_array = self.h5f.createArray(self.el_node,
                                         "cell",
                                         self.spt,
                                         title="",
                                         createparents="True")
        self.h5f.close()

    def tearDown(self):
        self.filter.close()
        os.unlink(self.fname)

    def test_write(self):
        sp_dict = {'data': self.data, 'FS': self.sampfreq}
        spt_dict = {'data': self.spt}
        self.filter = PyTablesFilter("test2.h5")
        self.filter.write_sp(sp_dict, self.el_node + "/raw")
        self.filter.write_spt(spt_dict, self.cell_node)
        self.filter.close()
        exit_code = os.system('h5diff ' + self.fname + ' test2.h5')
        os.unlink("test2.h5")
        ok_(exit_code == 0)

    def test_read_sp(self):
        self.filter = PyTablesFilter(self.fname)
        sp = self.filter.read_sp(self.el_node)
        ok_((sp['data'][:] == self.data).all())

    def test_read_sp_attr(self):
        #check n_contacts attribute
        self.filter = PyTablesFilter(self.fname)
        sp = self.filter.read_sp(self.el_node)
        n_contacts = sp['n_contacts']
        ok_(n_contacts == self.data.shape[0])

    def test_read_spt(self):
        self.filter = PyTablesFilter(self.fname)
        spt = self.filter.read_spt(self.cell_node)
        ok_((spt['data'] == self.spt).all())
Beispiel #3
0
class TestHDF:
    def setUp(self):
        self.data = np.random.randint(1000,size=(4, 100))
        self.spt = np.random.randint(0,100, (10,))/200.
        self.el_node = '/Subject/Session/Electrode'
        self.fname = 'test.h5'
        self.cell_node = self.el_node+'/cell'
        self.h5f =  tables.openFile(self.fname,'a')
        
        self.spt.sort()
        atom = tables.Atom.from_dtype(self.data.dtype)
        shape = self.data.shape
        filter = tables.Filters(complevel=0, complib='zlib')
        new_array = self.h5f.createCArray(self.el_node, "raw", atom, shape, 
                                     filters=filter, 
                                     createparents=True)
        self.sampfreq=5.E3
        new_array.attrs['sampfreq']=self.sampfreq
        new_array[:] = self.data
        spt_array = self.h5f.createArray(self.el_node, "cell", self.spt, 
                                         title="",
                                         createparents="True")
        self.h5f.close()
        
    def tearDown(self):
        self.filter.close()
        os.unlink(self.fname)
    
    def test_write(self):
        sp_dict = {'data':self.data,'FS':self.sampfreq}
        spt_dict = {'data':self.spt}
        self.filter = PyTablesFilter("test2.h5")
        self.filter.write_sp(sp_dict, self.el_node+"/raw")
        self.filter.write_spt(spt_dict, self.cell_node)
        self.filter.close()
        exit_code = os.system('h5diff ' + self.fname + ' test2.h5')
        os.unlink("test2.h5")
        ok_(exit_code==0)
        
    def test_read_sp(self):
        self.filter = PyTablesFilter(self.fname)
        sp = self.filter.read_sp(self.el_node)
        ok_((sp['data'][:]==self.data).all())
        
    def test_read_sp_attr(self):
        #check n_contacts attribute
        self.filter = PyTablesFilter(self.fname)
        sp = self.filter.read_sp(self.el_node)
        n_contacts = sp['n_contacts']
        ok_(n_contacts==self.data.shape[0])
    
    def test_read_spt(self):
        self.filter = PyTablesFilter(self.fname)
        spt = self.filter.read_spt(self.cell_node)
        ok_((spt['data']==self.spt).all())
Beispiel #4
0
class TestHDF(object):
    def setUp(self):
        self.data = np.random.randint(1000, size=(4, 100))
        self.spt = np.random.randint(0, 100, (10,)) / 200.0
        self.el_node = "/Subject/Session/Electrode"
        self.fname = "test.h5"
        self.cell_node = self.el_node + "/cell"
        self.h5f = tables.openFile(self.fname, "a")

        self.spt.sort()
        atom = tables.Atom.from_dtype(self.data.dtype)
        shape = self.data.shape
        filter = tables.Filters(complevel=0, complib="zlib")
        new_array = self.h5f.createCArray(self.el_node, "raw", atom, shape, filters=filter, createparents=True)
        self.sampfreq = 5.0e3
        new_array.attrs["sampfreq"] = self.sampfreq
        new_array[:] = self.data
        self.h5f.createArray(self.el_node, "cell", self.spt, title="", createparents="True")
        self.h5f.close()

    def tearDown(self):
        self.filter.close()
        os.unlink(self.fname)

    def test_write(self):
        sp_dict = {"data": self.data, "FS": self.sampfreq}
        spt_dict = {"data": self.spt}
        self.filter = PyTablesFilter("test2.h5")
        self.filter.write_sp(sp_dict, self.el_node + "/raw")
        self.filter.write_spt(spt_dict, self.cell_node)
        self.filter.close()
        exit_code = os.system("h5diff " + self.fname + " test2.h5")
        os.unlink("test2.h5")
        ok_(exit_code == 0)

    def test_read_sp(self):
        self.filter = PyTablesFilter(self.fname)
        sp = self.filter.read_sp(self.el_node)
        ok_((sp["data"][:] == self.data).all())

    def test_read_sp_attr(self):
        # check n_contacts attribute
        self.filter = PyTablesFilter(self.fname)
        sp = self.filter.read_sp(self.el_node)
        n_contacts = sp["n_contacts"]
        ok_(n_contacts == self.data.shape[0])

    def test_read_spt(self):
        self.filter = PyTablesFilter(self.fname)
        spt = self.filter.read_spt(self.cell_node)
        ok_((spt["data"] == self.spt).all())
Beispiel #5
0
 def test_export_cells(self):
     n_cells = 4
     self.spt_data = np.random.randint(0, 10000, (100, n_cells))
     self.spt_data.sort(0)
     self.cells_dict = dict([(i, {
         "data": self.spt_data[:, i]
     }) for i in range(n_cells)])
     fname = os.path.join(tempfile.mkdtemp(), "test.h5")
     filter = PyTablesFilter(fname)
     tmpl = "/Subject/Session/Electrode/Cell{cell_id}"
     export.export_cells(filter, tmpl, self.cells_dict)
     test = []
     for i in range(n_cells):
         spt_dict = filter.read_spt(tmpl.format(cell_id=i))
         test.append((spt_dict['data'] == self.spt_data[:, i]).all())
     test = np.array(test)
     filter.close()
     os.unlink(fname)
     ok_(test.all())
Beispiel #6
0
 def test_write(self):
     sp_dict = {"data": self.data, "FS": self.sampfreq}
     spt_dict = {"data": self.spt}
     self.filter = PyTablesFilter("test2.h5")
     self.filter.write_sp(sp_dict, self.el_node + "/raw")
     self.filter.write_spt(spt_dict, self.cell_node)
     self.filter.close()
     exit_code = os.system("h5diff " + self.fname + " test2.h5")
     os.unlink("test2.h5")
     ok_(exit_code == 0)
Beispiel #7
0
    def test_export_cells(self):
        n_cells = 4
        self.spt_data = np.random.randint(0, 10000, (100, n_cells))
        self.spt_data.sort(0)
        self.cells_dict = dict([(i, {"data": self.spt_data[:, i]}) for i in range(n_cells)])
        tempdir = tempfile.mkdtemp()
        fname = os.path.join(tempdir, "test.h5")
        ptfilter = PyTablesFilter(fname)
        tmpl = "/Subject/Session/Electrode/Cell{cell_id}"
        export.export_cells(ptfilter, tmpl, self.cells_dict)
        test = []
        for i in range(n_cells):
            spt_dict = ptfilter.read_spt(tmpl.format(cell_id=i))
            test.append((spt_dict["data"] == self.spt_data[:, i]).all())
        test = np.array(test)
        ptfilter.close()

        os.unlink(fname)
        os.rmdir(tempdir)

        ok_(test.all())
#!/usr/bin/env python
#coding=utf-8

from spike_sort.io.filters import PyTablesFilter
from spike_sort import extract
from spike_sort import features
from spike_sort import cluster
from spike_sort.ui import plotting
import os

dataset = '/SubjectA/session01/el1'
datapath = '../../../data/tutorial.h5'

io_filter = PyTablesFilter(datapath)
raw = io_filter.read_sp(dataset)
spt = extract.detect_spikes(raw, contact=3, thresh='auto')

sp_win = [-0.2, 0.8]
spt = extract.align_spikes(raw, spt, sp_win, type="max", resample=10)
sp_waves = extract.extract_spikes(raw, spt, sp_win)
sp_feats = features.combine(
    (features.fetP2P(sp_waves), features.fetPCs(sp_waves)))

clust_idx = cluster.cluster("gmm", sp_feats, 4)
plotting.plot_features(sp_feats, clust_idx)
plotting.show()
io_filter.close()
Beispiel #9
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from spike_sort.io.filters import PyTablesFilter
from spike_sort import extract
from spike_sort import features
from spike_sort import cluster
from spike_sort.ui import plotting
import os

dataset = '/SubjectA/session01/el1'
datapath = '../../../data/tutorial.h5'

io_filter = PyTablesFilter(datapath)
raw = io_filter.read_sp(dataset)
spt = extract.detect_spikes(raw,  contact=3, thresh='auto')

sp_win = [-0.2, 0.8]
spt = extract.align_spikes(raw, spt, sp_win, type="max", resample=10)
sp_waves = extract.extract_spikes(raw, spt, sp_win)
sp_feats = features.combine(
     (
      features.fetP2P(sp_waves),
      features.fetPCA(sp_waves)
     )
)
   
plotting.plot_features(sp_feats)
plotting.show()
Beispiel #10
0
"""

import os

import matplotlib
matplotlib.use("TkAgg")
matplotlib.interactive(True)

import spike_sort as sort
from spike_sort.io.filters import PyTablesFilter

DATAPATH = os.environ['DATAPATH']

if __name__ == "__main__":
    h5_fname = os.path.join(DATAPATH, "tutorial.h5")
    h5filter = PyTablesFilter(h5_fname, 'r')

    dataset = "/SubjectA/session01/el1"
    sp_win = [-0.2, 0.8]

    sp = h5filter.read_sp(dataset)
    spt = sort.extract.detect_spikes(sp, contact=3, thresh=300)

    spt = sort.extract.align_spikes(sp, spt, sp_win, type="max", resample=10)
    sp_waves = sort.extract.extract_spikes(sp, spt, sp_win)
    features = sort.features.combine(
        (sort.features.fetSpIdx(sp_waves), sort.features.fetP2P(sp_waves),
         sort.features.fetPCA(sp_waves)),
        norm=True)

    clust_idx = sort.ui.manual_sort.manual_sort(features,
Beispiel #11
0
#!/usr/bin/env python
#coding=utf-8

from spike_sort.io.filters import PyTablesFilter
from spike_sort import extract
from spike_sort import features
from spike_sort import cluster
from spike_sort.ui import plotting
import os

dataset = '/SubjectA/session01/el1'
datapath = '../../../data/tutorial.h5'

io_filter = PyTablesFilter(datapath)
raw = io_filter.read_sp(dataset)
spt = extract.detect_spikes(raw,  contact=3, thresh='auto')

sp_win = [-0.2, 0.8]
spt = extract.align_spikes(raw, spt, sp_win, type="max", resample=10)
sp_waves = extract.extract_spikes(raw, spt, sp_win)
plotting.plot_spikes(sp_waves, n_spikes=200)
plotting.show()
io_filter.close()
Beispiel #12
0
 def __init__(self, h5file, dataset, overwrite=False, f_filter=None):
     GenericSource.__init__(self, dataset, overwrite, f_filter)
     PyTablesFilter.__init__(self, h5file)
Beispiel #13
0
 def __init__(self, h5file, dataset, overwrite=False, f_filter=None):
     GenericSource.__init__(self, dataset, overwrite, f_filter)
     PyTablesFilter.__init__(self, h5file)
Beispiel #14
0
 def test_read_spt(self):
     self.filter = PyTablesFilter(self.fname)
     spt = self.filter.read_spt(self.cell_node)
     ok_((spt["data"] == self.spt).all())
Beispiel #15
0
 def test_read_sp_attr(self):
     # check n_contacts attribute
     self.filter = PyTablesFilter(self.fname)
     sp = self.filter.read_sp(self.el_node)
     n_contacts = sp["n_contacts"]
     ok_(n_contacts == self.data.shape[0])
Beispiel #16
0
 def test_read_sp(self):
     self.filter = PyTablesFilter(self.fname)
     sp = self.filter.read_sp(self.el_node)
     ok_((sp["data"][:] == self.data).all())
Beispiel #17
0
 def test_read_sp(self):
     self.filter = PyTablesFilter(self.fname)
     sp = self.filter.read_sp(self.el_node)
     ok_((sp['data'][:] == self.data).all())
Beispiel #18
0
#!/usr/bin/env python
#coding=utf-8
"""
Simple raw data browser.

Keyboard shortcuts:

    +/- - zoom in/out
"""

import spike_sort as sort
from spike_sort.io.filters import PyTablesFilter
from spike_sort.ui import spike_browser
import os

DATAPATH = os.environ['DATAPATH']

if __name__ == "__main__":
    dataset = "/SubjectA/session01/el1"
    data_fname = os.path.join(DATAPATH, "tutorial.h5")

    io_filter = PyTablesFilter(data_fname)
    sp = io_filter.read_sp(dataset)
    spt = sort.extract.detect_spikes(sp, contact=3, thresh='auto')

    spike_browser.browse_data_tk(sp, spt, win=50)
Beispiel #19
0
 def test_read_sp_attr(self):
     #check n_contacts attribute
     self.filter = PyTablesFilter(self.fname)
     sp = self.filter.read_sp(self.el_node)
     n_contacts = sp['n_contacts']
     ok_(n_contacts == self.data.shape[0])
Beispiel #20
0
import time
from spike_sort.ui import spike_browser


def calc_metrics(features, clust_idx):
    
    uni_metric = eval.univariate_metric(eval.mutual_information, 
                                          features, clust_idx)
    multi_metric = eval.k_nearest(features, clust_idx, n_pts=1000)
    
    return uni_metric, multi_metric

if __name__ == "__main__":

    h5_fname = "simulated.h5"
    h5filter = PyTablesFilter(h5_fname)

    dataset = "/TestSubject/sSession01/el1"
    sp_win = [-0.4, 0.8]
    f_filter=None
    thresh = 'auto'
    type='max'
    
    sp = h5filter.read_sp(dataset)
    spt_orig = h5filter.read_spt(dataset+"/cell1_orig")
    stim = h5filter.read_spt(dataset+"/stim")
    
    sp = eval.filter_data(sp, f_filter)

    spt, clust_idx, n_missing = eval.spike_clusters(sp, spt_orig,
                                                    stim,
Beispiel #21
0
 def test_read_spt(self):
     self.filter = PyTablesFilter(self.fname)
     spt = self.filter.read_spt(self.cell_node)
     ok_((spt['data'] == self.spt).all())
Beispiel #22
0
"""
Based on raw recordings detect spikes, calculate features and do automatic 
clustering with gaussian mixture models.
"""

import os

import spike_sort as sort
from spike_sort.io.filters import PyTablesFilter
import spike_sort.ui.manual_sort

DATAPATH = os.environ['DATAPATH'] 

if __name__ == "__main__":
    h5_fname = os.path.join(DATAPATH, "tutorial.h5")
    h5filter = PyTablesFilter(h5_fname, 'a')

    dataset = "/SubjectA/session01/el1"
    sp_win = [-0.2, 0.8]
    
    sp = h5filter.read_sp(dataset)
    spt = sort.extract.detect_spikes(sp, contact=3, thresh='auto')
    
    spt = sort.extract.align_spikes(sp, spt, sp_win, type="max", resample=10)
    sp_waves = sort.extract.extract_spikes(sp, spt, sp_win)
    features = sort.features.combine(
            (sort.features.fetP2P(sp_waves),
             sort.features.fetPCA(sp_waves)),
            norm=True
    )
#!/usr/bin/env python
#coding=utf-8

from spike_sort.io.filters import PyTablesFilter, BakerlabFilter

in_dataset = "/Gollum/s5gollum01/el3"
out_dataset = "/SubjectA/session01/el1/raw"

in_filter = BakerlabFilter("gollum.inf")
out_filter = PyTablesFilter("tutorial.h5")

sp = in_filter.read_sp(in_dataset)
out_filter.write_sp(sp, out_dataset)

in_filter.close()
out_filter.close()