예제 #1
0
    def _calc_features(self):
        spikes = self.spikes_src.spikes
        feats = [f(spikes) for f in self.feature_methods.values()]
        ft_data = features.combine(feats,
                norm=self.normalize,
                feat_method_names = self.feature_methods.keys())
        
        # Filter feature_data to remove _hidden_features.
        # This routine is O(n^2), to deal woth possible repetitions
        names, idx = [], []
        for i, name in enumerate(ft_data['names']):
            if name not in self._hidden_features:
                names.append(name)
                idx.append(i)

        ft_data['names'] = names
        ft_data['data'] = ft_data['data'][:, idx]

        self._feature_data = ft_data
예제 #2
0
    def _calc_features(self):
        spikes = self.spikes_src.spikes
        feats = [f(spikes) for f in self.feature_methods.values()]
        ft_data = features.combine(
            feats,
            norm=self.normalize,
            feat_method_names=self.feature_methods.keys())

        # Filter feature_data to remove _hidden_features.
        # This routine is O(n^2), to deal woth possible repetitions
        names, idx = [], []
        for i, name in enumerate(ft_data['names']):
            if name not in self._hidden_features:
                names.append(name)
                idx.append(i)

        ft_data['names'] = names
        ft_data['data'] = ft_data['data'][:, idx]

        self._feature_data = ft_data
예제 #3
0
#!/usr/bin/env python
#coding=utf-8

from spike_sort.io.filters import PyTablesFilter
from spike_sort import extract
from spike_sort import features
from spike_sort import cluster
from spike_sort.ui import plotting
import os

dataset = '/SubjectA/session01/el1'
datapath = '../../../data/tutorial.h5'

io_filter = PyTablesFilter(datapath)
raw = io_filter.read_sp(dataset)
spt = extract.detect_spikes(raw, contact=3, thresh='auto')

sp_win = [-0.2, 0.8]
spt = extract.align_spikes(raw, spt, sp_win, type="max", resample=10)
sp_waves = extract.extract_spikes(raw, spt, sp_win)
sp_feats = features.combine(
    (features.fetP2P(sp_waves), features.fetPCs(sp_waves)))

clust_idx = cluster.cluster("gmm", sp_feats, 4)
plotting.plot_features(sp_feats, clust_idx)
plotting.show()
io_filter.close()
예제 #4
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from spike_sort.io.filters import PyTablesFilter
from spike_sort import extract
from spike_sort import features
from spike_sort import cluster
from spike_sort.ui import plotting
import os

dataset = '/SubjectA/session01/el1'
datapath = '../../../data/tutorial.h5'

io_filter = PyTablesFilter(datapath)
raw = io_filter.read_sp(dataset)
spt = extract.detect_spikes(raw,  contact=3, thresh='auto')

sp_win = [-0.2, 0.8]
spt = extract.align_spikes(raw, spt, sp_win, type="max", resample=10)
sp_waves = extract.extract_spikes(raw, spt, sp_win)
sp_feats = features.combine(
     (
      features.fetP2P(sp_waves),
      features.fetPCA(sp_waves)
     )
)
   
plotting.plot_features(sp_feats)
plotting.show()
예제 #5
0
 def _calc_features(self):
     spikes = self.spikes_src.spikes
     feats = [f(spikes) for f in self.feature_methods]
     self._feature_data = features.combine(feats, norm=self.normalize)
예제 #6
0
 def _calc_features(self):
     spikes = self.spikes_src.spikes
     feats = [f(spikes) for f in self.feature_methods]
     self._feature_data = features.combine(feats, norm=self.normalize)