class TestCovarianceEstimator(ut.TestCase): def setUp(self): self.dlen = 250000 self.tf = 65 self.nc = 4 self.white_noise = sp.randn(self.dlen, self.nc) self.CE = TimeSeriesCovE(tf_max=self.tf, nc=self.nc) self.CE.new_chan_set((1, 2)) self.CE.update(self.white_noise) def testTrivial(self): p_4_20 = {'tf': 20, 'chan_set': (0, 1, 2, 3)} C_4_20 = self.CE.get_cmx(**p_4_20) self.assertTupleEqual(C_4_20.shape, (4 * 20, 4 * 20)) assert_equal(C_4_20, C_4_20.T) p_2_10 = {'tf': 10, 'chan_set': (0, 1)} C_2_10 = self.CE.get_cmx(**p_2_10) self.assertTupleEqual(C_2_10.shape, (2 * 10, 2 * 10)) assert_equal(C_2_10, C_2_10.T) def testInverse(self): p_4_20 = {'tf': 20, 'chan_set': (0, 1, 2, 3)} C_4_20 = self.CE.get_cmx(**p_4_20) iC_4_20 = self.CE.get_icmx(**p_4_20) should_be_eye80 = sp.dot(C_4_20, iC_4_20) assert_almost_equal(should_be_eye80, sp.eye(80), decimal=5) p_2_10 = {'tf': 10, 'chan_set': (0, 1)} C_2_10 = self.CE.get_cmx(**p_2_10) iC_2_10 = self.CE.get_icmx(**p_2_10) should_be_eye20 = sp.dot(C_2_10, iC_2_10) assert_almost_equal(should_be_eye20, sp.eye(20), decimal=5)
class TestCovarianceEstimator(ut.TestCase): def setUp(self): self.dlen = 250000 self.tf = 65 self.nc = 4 self.white_noise = sp.randn(self.dlen, self.nc) self.CE = TimeSeriesCovE(tf_max=self.tf, nc=self.nc) self.CE.new_chan_set((1, 2)) self.CE.update(self.white_noise) def testTrivial(self): p_4_20 = {'tf':20, 'chan_set':(0, 1, 2, 3)} C_4_20 = self.CE.get_cmx(**p_4_20) self.assertTupleEqual(C_4_20.shape, (4 * 20, 4 * 20 )) assert_equal(C_4_20, C_4_20.T) p_2_10 = {'tf':10, 'chan_set':(0, 1)} C_2_10 = self.CE.get_cmx(**p_2_10) self.assertTupleEqual(C_2_10.shape, (2 * 10, 2 * 10 )) assert_equal(C_2_10, C_2_10.T) def testInverse(self): p_4_20 = {'tf':20, 'chan_set':(0, 1, 2, 3)} C_4_20 = self.CE.get_cmx(**p_4_20) iC_4_20 = self.CE.get_icmx(**p_4_20) should_be_eye80 = sp.dot(C_4_20, iC_4_20) assert_almost_equal(should_be_eye80, sp.eye(80), decimal=5) p_2_10 = {'tf':10, 'chan_set':(0, 1)} C_2_10 = self.CE.get_cmx(**p_2_10) iC_2_10 = self.CE.get_icmx(**p_2_10) should_be_eye20 = sp.dot(C_2_10, iC_2_10) assert_almost_equal(should_be_eye20, sp.eye(20), decimal=5)
def testMainSingle(self, verbose=VERBOSE.PLOT): import time # setup V = VERBOSE(verbose) TF = 21 NC = 2 spike_proto_sc = sp.cos(sp.linspace(-sp.pi, 3 * sp.pi, TF)) spike_proto_sc *= sp.hanning(TF) scale = sp.linspace(0, 2, TF) xi1 = sp.vstack( (spike_proto_sc * 5 * scale, spike_proto_sc * 4 * scale)).T xi2 = sp.vstack((spike_proto_sc * .5 * scale[::-1], spike_proto_sc * 9 * scale[::-1])).T templates = sp.asarray([xi1, xi2]) LEN = 2000 noise = sp.randn(LEN, NC) ce = TimeSeriesCovE(tf_max=TF, nc=NC) ce.update(noise) FB = BOTMNode(templates=templates, ce=ce, verbose=V, ovlp_taus=None) signal = sp.zeros_like(noise) NPOS = 4 POS = [(int(i * LEN / (NPOS + 1)), 100) for i in xrange(1, NPOS + 1)] POS.append((100, 2)) POS.append((150, 2)) for pos, tau in POS: signal[pos:pos + TF] += xi1 signal[pos + tau:pos + tau + TF] += xi2 x = sp.ascontiguousarray(signal + noise, dtype=sp.float32) # test against if V.has_print: print '### constructed spike times ###' test_u0 = sorted([t_tpl[0] for t_tpl in POS]) test_u1 = sorted([t_tpl[0] + t_tpl[1] for t_tpl in POS]) test_rval = { 0: sp.array(test_u0) + TF / 2, 1: sp.array(test_u1) + TF / 2 } if V.has_print: print test_rval # sort tic_o = time.clock() FB(x) toc_o = time.clock() if V.has_print: print '### sorting spike times ###' print FB.rval if V.has_plot: FB.plot_template_set(show=False) FB.plot_sorting(show=True) if V.has_print: print '###' print 'duration:', toc_o - tic_o for k in FB.rval: assert_array_almost_equal(FB.rval[k], test_rval[k], decimal=0)
def setUp(self): self.dlen = 250000 self.tf = 65 self.nc = 4 self.white_noise = sp.randn(self.dlen, self.nc) self.CE = TimeSeriesCovE(tf_max=self.tf, nc=self.nc) self.CE.new_chan_set((1, 2)) self.CE.update(self.white_noise)
def setUp(self): self.tf = 10 self.nc = 2 self.xi = sp.vstack( [sp.arange(self.tf).astype(sp.float32)] * self.nc).T * 0.5 self.len = 1000 self.pos = [int(i * self.len / 4.0) for i in xrange(1, 4)] self.noise = sp.randn(self.len, self.nc) self.ce = TimeSeriesCovE(tf_max=self.tf, nc=self.nc) self.ce.update(self.noise)
def setUp(self): self.tf = 10 self.nc = 2 self.xi = sp.vstack([sp.arange(self.tf).astype(sp.float32)] * self.nc).T * 0.5 self.len = 1000 self.pos = [int(i * self.len / 4.0) for i in xrange(1, 4)] self.noise = sp.randn(self.len, self.nc) self.ce = TimeSeriesCovE(tf_max=self.tf, nc=self.nc) self.ce.update(self.noise)
def load_input_data(tf): MAT = loadmat('/home/phil/matlab.mat') noise = MAT['noise'].T signal = MAT['signal'].T nc = noise.shape[1] ce = TimeSeriesCovE.white_noise_init(tf, nc, std=.98) temps_ml = MAT['T'] temps = sp.empty((temps_ml.shape[0], temps_ml.shape[1] / nc, nc)) for i in xrange(temps_ml.shape[0]): temps[i] = mcvec_from_conc(temps_ml[i], nc=nc) return signal, noise, ce, temps
class TestFilterNodes(ut.TestCase): def setUp(self): self.tf = 10 self.nc = 2 self.xi = sp.vstack([sp.arange(self.tf).astype(sp.float32)] * self.nc).T * 0.5 self.len = 1000 self.pos = [int(i * self.len / 4.0) for i in xrange(1, 4)] self.noise = sp.randn(self.len, self.nc) self.ce = TimeSeriesCovE(tf_max=self.tf, nc=self.nc) self.ce.update(self.noise) def testFilterTrivial(self): mf_h = MatchedFilterNode(self.tf, self.nc, self.ce) mf_h.append_xi_buf(self.xi, recalc=True) nmf_h = NormalisedMatchedFilterNode(self.tf, self.nc, self.ce) nmf_h.append_xi_buf(self.xi, recalc=True) f = sp.dot(mcvec_to_conc(self.xi), self.ce.get_icmx(tf=self.tf)) nf = sp.dot(f, mcvec_to_conc(self.xi)) f = mcvec_from_conc(f, nc=self.nc) assert_equal(mf_h.f, f) assert_equal(nmf_h.f, f / nf) """
class TestFilterNodes(ut.TestCase): def setUp(self): self.tf = 10 self.nc = 2 self.xi = sp.vstack( [sp.arange(self.tf).astype(sp.float32)] * self.nc).T * 0.5 self.len = 1000 self.pos = [int(i * self.len / 4.0) for i in xrange(1, 4)] self.noise = sp.randn(self.len, self.nc) self.ce = TimeSeriesCovE(tf_max=self.tf, nc=self.nc) self.ce.update(self.noise) def testFilterTrivial(self): mf_h = MatchedFilterNode(self.tf, self.nc, self.ce) mf_h.append_xi_buf(self.xi, recalc=True) nmf_h = NormalisedMatchedFilterNode(self.tf, self.nc, self.ce) nmf_h.append_xi_buf(self.xi, recalc=True) f = sp.dot(mcvec_to_conc(self.xi), self.ce.get_icmx(tf=self.tf)) nf = sp.dot(f, mcvec_to_conc(self.xi)) f = mcvec_from_conc(f, nc=self.nc) assert_equal(mf_h.f, f) assert_equal(nmf_h.f, f / nf) """
def get_input_data(tf): noise = loadmat('/home/phil/matlab.mat')['noise'].T nc = noise.shape[1] spike_proto_sc = sp.cos(sp.linspace(-sp.pi, 3 * sp.pi, tf)) spike_proto_sc *= sp.hanning(tf) scale = sp.linspace(0, 2, tf) cvals = [(5., .5), (4., 9.), (3., 3.), (7., 2.5)] xi1 = sp.vstack([spike_proto_sc * cvals[i][0] * scale for i in xrange(nc)]).T xi2 = sp.vstack([spike_proto_sc * cvals[i][1] * scale[::-1] for i in xrange(nc)]).T temps = sp.asarray([xi1, xi2]) ce = TimeSeriesCovE.white_noise_init(tf, nc, std=.98) signal = sp.zeros_like(noise) NPOS = 4 LEN = len(noise) POS = [(int(i * LEN / (NPOS + 1)), 100) for i in xrange(1, NPOS + 1)] POS.append((100, 2)) POS.append((150, 2)) print POS for pos, tau in POS: signal[pos:pos + tf] += temps[0] signal[pos + tau:pos + tau + tf] += temps[1] return signal, noise, ce, temps
def testMainSingle(self, verbose=VERBOSE.PLOT): import time # setup V = VERBOSE(verbose) TF = 21 NC = 2 spike_proto_sc = sp.cos(sp.linspace(-sp.pi, 3 * sp.pi, TF)) spike_proto_sc *= sp.hanning(TF) scale = sp.linspace(0, 2, TF) xi1 = sp.vstack((spike_proto_sc * 5 * scale, spike_proto_sc * 4 * scale)).T xi2 = sp.vstack((spike_proto_sc * .5 * scale[::-1], spike_proto_sc * 9 * scale[::-1])).T templates = sp.asarray([xi1, xi2]) LEN = 2000 noise = sp.randn(LEN, NC) ce = TimeSeriesCovE(tf_max=TF, nc=NC) ce.update(noise) FB = BOTMNode( templates=templates, ce=ce, verbose=V, ovlp_taus=None) signal = sp.zeros_like(noise) NPOS = 4 POS = [(int(i * LEN / (NPOS + 1)), 100) for i in xrange(1, NPOS + 1)] POS.append((100, 2)) POS.append((150, 2)) for pos, tau in POS: signal[pos:pos + TF] += xi1 signal[pos + tau:pos + tau + TF] += xi2 x = sp.ascontiguousarray(signal + noise, dtype=sp.float32) # test against if V.has_print: print '### constructed spike times ###' test_u0 = sorted([t_tpl[0] for t_tpl in POS]) test_u1 = sorted([t_tpl[0] + t_tpl[1] for t_tpl in POS]) test_rval = {0: sp.array(test_u0) + TF / 2, 1: sp.array(test_u1) + TF / 2} if V.has_print: print test_rval # sort tic_o = time.clock() FB(x) toc_o = time.clock() if V.has_print: print '### sorting spike times ###' print FB.rval if V.has_plot: FB.plot_template_set(show=False) FB.plot_sorting(show=True) if V.has_print: print '###' print 'duration:', toc_o - tic_o for k in FB.rval: assert_array_almost_equal(FB.rval[k], test_rval[k], decimal=0)