def test_patterns_raw(self): file_contents = np.load( os.path.join(os.path.dirname(__file__), 'test_data/tiny_spikes.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) hdlog.info(spikes._spikes) patterns = PatternsRaw() patterns.chomp_spikes(spikes) hdlog.info(patterns._counts) self.assertEqual(len(patterns), 3) patterns = PatternsRaw() patterns.chomp_spikes(spikes, window_size=3) self.assertEqual(len(patterns), 4) file_contents = np.load( os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) patterns = PatternsRaw() patterns.chomp_spikes(spikes, window_size=3) self.assertEqual(len(patterns), 9) patterns.save(os.path.join(self.TMP_PATH, 'raw')) patterns2 = PatternsRaw.load(os.path.join(self.TMP_PATH, 'raw')) self.assertTrue(isinstance(patterns2, PatternsRaw)) self.assertEqual(len(patterns), len(patterns2))
def test_patterns_hopfield(self): file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/tiny_spikes.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) learner = Learner(spikes) learner.learn_from_spikes(spikes) patterns = PatternsHopfield(learner=learner) patterns.chomp_spikes(spikes) # print spikes.spikes self.assertEqual(len(patterns), 3) # print "%d fixed-points (entropy H = %1.3f):" % (len(patterns), patterns.entropy()) # print map(patterns.pattern_for_key, patterns.counts.keys()) patterns.save(os.path.join(self.TMP_PATH, 'patterns')) patterns2 = PatternsHopfield.load(os.path.join(self.TMP_PATH, 'patterns')) self.assertTrue(isinstance(patterns2, PatternsHopfield)) self.assertEqual(len(patterns2), 3) self.assertEqual(len(patterns2.mtas), 3) self.assertEqual(len(patterns2.mtas_raw), 3) learner.learn_from_spikes(spikes, window_size=3) patterns = PatternsHopfield(learner=learner) patterns.chomp_spikes(spikes, window_size=3) # print spikes.spikes # print patterns.counts self.assertEqual(len(patterns), 4) # print "%d fixed-points (entropy H = %1.3f):" % (len(patterns), patterns.entropy()) # for x in patterns.list_patterns(): print x spikes_arr1 = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 0]]) spikes = Spikes(spikes=spikes_arr1) learner = Learner(spikes) learner.learn_from_spikes(spikes) # test recording fixed-points file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) learner = Learner(spikes) learner.learn_from_spikes(spikes) patterns = PatternsHopfield(learner, save_sequence=True) patterns.chomp_spikes(spikes) self.assertEqual(patterns._sequence, [0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1]) file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) learner = Learner(spikes) learner.learn_from_spikes(spikes, window_size=2) patterns = PatternsHopfield(learner, save_sequence=True) patterns.chomp_spikes(spikes, window_size=2) # print patterns.mtas # print patterns.sequence # for x in patterns.list_patterns(): print x # print spikes.spikes self.assertEqual(patterns._sequence, [0, 1, 2, 3, 0, 1, 4, 5, 6, 5, 7, 3]) # self.assertTrue(np.mean(patterns.pattern_to_binary_matrix(1) == [[0, 0], [0, 1], [1, 0]])) # self.assertTrue(np.mean(patterns.pattern_to_mta_matrix(1) == [[0, 0], [0, 1], [1, .5]])) hdlog.info(spikes._spikes) hdlog.info(patterns.pattern_to_trial_raster(3))
def test_saving(self): spikes = Spikes(spikes=np.array([[1, 1, 1, 0, 1, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0]])) hdlog.info(spikes.spikes) spikes.save(os.path.join(self.TMP_PATH, 'spikes')) spikes2 = Spikes.load(os.path.join(self.TMP_PATH, 'spikes')) hdlog.info(spikes2.spikes) self.assertTrue((spikes.spikes == spikes2.spikes).all())
def test_saving(self): spikes = Spikes(spikes=np.array([[1, 1, 1, 0, 1, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0]])) hdlog.info(spikes.spikes) spikes.save(os.path.join(self.TMP_PATH, 'spikes')) spikes2 = Spikes.load(os.path.join(self.TMP_PATH, 'spikes')) hdlog.info(spikes2.spikes) self.assertTrue((spikes.spikes == spikes2.spikes).all())
def test_basic(self): # spikes = Spikes(npz_file='test_data/tiny_spikes.npz') # spikes_model = SpikeModel(spikes=spikes) # spikes_model.fit() # spikes_model.chomp() np.random.seed(42) file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) spikes_model = SpikeModel(spikes=spikes) spikes_model.fit(remove_zeros=True) spikes_model.chomp() spikes_model.save(os.path.join(self.TMP_PATH, 'spikes_model')) spikes_model2 = SpikeModel.load(os.path.join(self.TMP_PATH, 'spikes_model')) self.assertEqual(len(spikes_model.hopfield_patterns), len(spikes_model2.hopfield_patterns)) spikes_model.fit(remove_zeros=False) spikes_model.chomp() wss = [1, 2] counts, entropies = spikes_model.distinct_patterns_over_windows(wss, remove_zeros=False) bernoulli_model = BernoulliHomogeneous(spikes=spikes) bernoulli_model.fit() bernoulli_model.chomp() shuffle_model = Shuffled(spikes=spikes) shuffle_model.fit() shuffle_model.chomp() bernoulli_model = BernoulliHomogeneous(spikes=spikes) bernoulli_model.fit() bernoulli_model.chomp() wss = [1, 2, 3] counts, entropies = bernoulli_model.distinct_patterns_over_windows(wss) # sanity check on large Bernoulli example spikes_arr = np.random.randn(4, 10000) spikes = Spikes(spikes=spikes_arr) bernoulli_model = BernoulliHomogeneous(spikes=spikes) wss = [1, 2, 3] counts, entropies = bernoulli_model.distinct_patterns_over_windows(wss) # self.assertTrue(np.abs((entropies[0, 0] / np.array(wss)).mean() - spikes.N) < .1) bernoulli_inhom_model = BernoulliInhomogeneous(spikes=spikes) bernoulli_inhom_model.fit() bernoulli_inhom_model.chomp() dichotomized_gaussian = DichotomizedGaussian(spikes=spikes) dichotomized_gaussian.sample_from_model() dichotomized_gaussian_poiss = DichotomizedGaussianPoisson(spikes=spikes) spikes = dichotomized_gaussian_poiss.sample_from_model()
def sample_from_model(self, trials=None, trial_independence=True, reshape=False): """ returns new Spikes object: permutes spikes in time trial_independence: diff permutation for each trial Parameters ---------- trials : Type, optional Description (default None) trial_independence : bool, optional Description (default True) reshape : bool, optional Description (default False) Returns ------- Value : Type Description """ idx = np.random.permutation(self._original_spikes.M) new_arr = np.zeros(self._original_spikes.spikes.shape) for i in xrange(self._original_spikes.T): if trial_independence: idx = np.random.permutation(self._original_spikes.M) arr = self._original_spikes.spikes[i, :, :].copy() new_arr[i] = arr[:, idx] return Spikes(new_arr).to_windowed(window_size=self._window_size, trials=trials)
def read_spk_folder(spk_folder, bin_size=1): """ Loads spike times from all spk files in a given folder. The j-th item in the list corresponds to the j-th neuron. It is the 1d array of spike times (microsec) for that neuron. Parameters ---------- spk_folder : str Path containing spk file names bin_size : int, optional Bin size in milliseconds (default 1) Returns ------- spikes : numpy array numpy array containing binned spike times """ from bitstring import Bits neuron_to_file = [] time_stamps = [] bin_size = bin_size or 1 fns = os.listdir(spk_folder) for i, fn in enumerate(fns): ext = os.path.splitext(fn)[1] if ext in ('.spk', ): # Blanche spike format neuron_to_file.append(fn) f = open(os.path.join(spk_folder, fn), 'rb') p = Bits(f) fmt = str(p.length / 64) + ' * (intle:64)' time_stamps.append(p.unpack(fmt)) spikes = SpkReader.load_from_spikes_times(time_stamps, bin_size=bin_size) return Spikes(spikes)
def read_spk_files(spk_files, bin_size=1): """ Loads spike times from a list of spk files. The j-th item in the list corresponds to the j-th neuron. It is the 1d array of spike times (microsec) for that neuron. Parameters ---------- spk_files : list of str List of strings containing spk file names bin_size : int, optional Bin size in milliseconds (default 1) Returns ------- spikes : numpy array numpy array containing binned spike times """ from bitstring import Bits neuron_to_file = [] time_stamps = [] bin_size = bin_size or 1 for fn in spk_files: neuron_to_file.append(fn) f = open(fn, 'rb') p = Bits(f) fmt = str(p.length / 64) + ' * (intle:64)' time_stamps.append(p.unpack(fmt)) spikes = SpkReader.load_from_spikes_times(time_stamps, bin_size=bin_size) return Spikes(spikes)
def test_basic(self): file_contents = np.load( os.path.join(os.path.dirname(__file__), 'test_data/tiny_spikes.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) learner = Learner(spikes) self.assertEqual(learner._spikes.N, 3) learner.learn_from_spikes() self.assertTrue(learner._network.J.mean() != 0.) learner.learn_from_spikes(spikes) self.assertTrue(learner._network.J.mean() != 0.) learner.learn_from_spikes(spikes, window_size=3) self.assertTrue(learner._network.J.mean() != 0.) self.assertTrue(learner._network.J.shape == (9, 9)) learner._params['hi'] = 'chris' learner.save(os.path.join(self.TMP_PATH, 'learner')) learner2 = Learner.load(os.path.join(self.TMP_PATH, 'learner')) self.assertEqual(learner2.params['hi'], 'chris') self.assertEqual(learner2.window_size, 3) self.assertTrue(learner2.network.J.mean() != 0.) self.assertTrue(learner2.network.J.shape == (9, 9))
def fit_ising_matlab(n, X): X = _convert_2d_array_matlab(np.array(X).astype(np.int), n) spikes = Spikes(spikes = X) learner = Learner(spikes) learner.learn_from_spikes(spikes, window_size = 1) J = learner.network.J theta = learner.network.theta return J.ravel().tolist(), theta.ravel().tolist()
def test_basic(self): file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/tiny_spikes.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) self.assertEqual(spikes._spikes.sum(), 9) self.assertEqual(spikes.rasterize(stop=5).sum(), 7) spikes.rasterize(save_png_name=os.path.join(self.TMP_PATH, 'spikes')) self.assertTrue(os.path.exists(os.path.join(self.TMP_PATH, 'spikes.png'))) file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) spikes.rasterize(save_png_name=os.path.join(self.TMP_PATH, 'spikes')) self.assertTrue(os.path.exists(os.path.join(self.TMP_PATH, 'spikes.png'))) file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) spikes.restrict_to_most_active_neurons(top_neurons=2) self.assertEqual(spikes._N, 2)
def sample_from_model(self, averaging_window_size=20, trials=None, reshape=False): """ Missing documentation Parameters ---------- averaging_window_size : int, optional Description (default 20) trials : Type, optional Description (default None) reshape : bool, optional Description (default False) Returns ------- Value : Type Description """ trials = trials or xrange(self._original_spikes.T) X = np.zeros((len(trials), self._window_size * self._original_spikes.N, self._original_spikes.M - self._window_size + 1)) for c, t in enumerate(trials): num_neurons = self._original_spikes.N num_samples = self._original_spikes.M spikes = self._original_spikes.spikes ps = [] for i in xrange(num_neurons): ps.append([spikes[0, i, 0:averaging_window_size].mean()] + [ spikes[0, i, (j - 1) * averaging_window_size:j * averaging_window_size].mean() for j in xrange(1, num_samples / averaging_window_size) ]) ps = np.array(ps) for j in xrange(num_neurons): for i in xrange( 0, self._original_spikes.M - self._window_size + 1): X[c, j, i] = int( np.random.random() < ps[j, i / averaging_window_size]) # sample_from_Bernoulli([ps[j,i/numbins]], 1).ravel()[0] if reshape: Y = np.zeros((X.shape[0] * X.shape[2], X.shape[1])) tot = 0 for t in xrange(len(trials)): for c in xrange(X.shape[2]): Y[tot, :] = X[t, :, c] tot += 1 return Y return Spikes(spikes=X)
def apply_dynamics(self, spikes, window_size=1, trials=None, reshape=True, as_spikes=True): """ Computes Hopfield fixed points over data obtained from `spikes` using a sliding window of size `window_size`. Parameters ---------- spikes : :class:`.Spikes` or numpy array Instance of :class:`.Spikes` to operate on or numpy array of spike data window_size : int, optional Window size to use (default 1) trials : int, optional Number of trials to use for reshape (default None) reshape : bool, optional Flag whether to reshape the spike vectors into matrix form before returning (default True) as_spikes : bool, optional Flag whether to return a Spikes class instance (when True) or a plain numpy array (default True) Returns ------- spikes : :class:`.Spikes` Instance of spikes class with converged spikes """ if isinstance(spikes, Spikes): X = spikes.to_windowed(window_size=window_size, trials=trials, reshape=True) else: X = spikes.T # TODO warn if no network Y = self._learner.network(X) if reshape: N = Y.shape[1] if trials is None: T = spikes.T else: T = len(trials) M = Y.shape[0] // T Y_ = np.zeros((T, N, M)) for n in range(N): Y_[:, n, :] = Y[:, n].reshape((T, M)) Y = Y_ if as_spikes: return Spikes(spikes=Y) else: return Y
def sample_from_model(self, trials=None, reshape=False): """ Missing documentation Parameters ---------- trials : Type, optional Description (default None) reshape : bool, optional Description (default False) Returns ------- Value : Type Description """ trials = trials or range(self._original_spikes.T) spikes_windowed = self._original_spikes.to_windowed( self._window_size, trials) X = np.zeros( (len(trials), self._original_spikes.N, self._original_spikes.M)) # statistics for c, t in enumerate(trials): bin_means = spikes_windowed.spikes[t, :, :].mean(axis=1) bin_cov = np.cov(spikes_windowed.spikes[t, :, :]) gauss_means, gauss_cov = find_latent_gaussian(bin_means, bin_cov) for i in xrange(0, spikes_windowed.M, self._window_size): x = sample_from_dichotomized_gaussian(bin_means, bin_cov, 1, gauss_means, gauss_cov) X[c, :, i:i + self._window_size] = x.reshape(self._original_spikes.N, self._window_size) if spikes_windowed.M % self._window_size != 0: stub = spikes_windowed.M % self._window_size x = sample_from_dichotomized_gaussian(bin_means, bin_cov, 1, gauss_means, gauss_cov) X[c, :, spikes_windowed.M - stub : spikes_windowed.M] = \ x.reshape(self._original_spikes.N, self._window_size)[:, :stub] if reshape: Y = np.zeros((X.shape[0] * X.shape[2], X.shape[1])) tot = 0 for t in xrange(len(trials)): for c in xrange(X.shape[2]): Y[tot, :] = X[t, :, c] tot += 1 return Y return Spikes(spikes=X)
def sample_from_model(self, trials=None, reshape=False): """ Missing documentation Parameters ---------- trials : Type, optional Description (default None) reshape : bool, optional Description (default False) Returns ------- Value : Type Description """ trials = trials or range(self._original_spikes.T) spikes_windowed = self._original_spikes.to_windowed( self._window_size, trials) X = np.zeros( (len(trials), self._original_spikes.N, self._original_spikes.M)) # statistics for c, t in enumerate(trials): bin_means = spikes_windowed.spikes[t, :, :].mean(axis=1) bin_cov = np.cov(spikes_windowed.spikes[t, :, :]) # calculate marginal distribution of Poisson pmfs, cmfs, supports = poisson_marginals(bin_means) # find paramters of DG gauss_means, gauss_cov, joints = find_dg_any_marginal( pmfs, bin_cov, supports) # generate samples samples, hists = sample_dg_any_marginal(gauss_means, gauss_cov, self._original_spikes.M, supports) X[c, :, :] = samples.T if reshape: Y = np.zeros((X.shape[0] * X.shape[2], X.shape[1])) tot = 0 for t in xrange(len(trials)): for c in xrange(X.shape[2]): Y[tot, :] = X[t, :, c] tot += 1 return Y return Spikes(spikes=X)
def test_basic_patterns(self): np.random.seed(42) spikes = (np.random.random((2, 10, 200)) < .05).astype(int) spikes[0, [1, 5], ::5] = 1 spikes[1, [2, 3, 6], ::11] = 1 spikes = Spikes(spikes=spikes) spikes_model = SpikeModel(spikes=spikes) spikes_model.fit() spikes_model.chomp() self.assertEqual(len(spikes_model.hopfield_patterns.sequence), 400) self.assertEqual(len(spikes_model.hopfield_patterns), 3) self.assertEqual(len(spikes_model.raw_patterns.sequence), 400) self.assertEqual(len(spikes_model.raw_patterns), 51)
def test_counter(self): file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/tiny_spikes.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) hdlog.info(spikes._spikes) counter = Counter() counter.chomp_spikes(spikes) hdlog.info(counter._counts) self.assertEqual(len(counter), 4) counter = Counter() counter.chomp_spikes(spikes, window_size=3) self.assertEqual(len(counter), 4) file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) counter = Counter() counter.chomp_spikes(spikes, window_size=3) self.assertEqual(len(counter), 9) counter.save(os.path.join(self.TMP_PATH, 'counter')) counter2 = Counter.load(os.path.join(self.TMP_PATH, 'counter')) self.assertTrue(isinstance(counter2, Counter)) self.assertEqual(len(counter), len(counter2)) spikes_arr1 = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 0]]) spikes = Spikes(spikes=spikes_arr1) counter1 = Counter() counter1.chomp_spikes(spikes) counter2 = Counter() counter2.chomp_spikes(spikes) counter2.merge_counts(counter1) self.assertEqual(sum(counter2._counts.values()), 6) counter3 = counter2 + counter1 self.assertEqual(counter3, counter2) self.assertEqual(sum(counter3.counts.values()), 9) spikes_arr2 = np.array([[0, 0, 1], [1, 0, 1], [0, 0, 0]]) spikes = Spikes(spikes=spikes_arr2) counter4 = Counter().chomp_spikes(spikes).merge_counts(counter3) self.assertEqual(len(counter4.counts.keys()), 5) self.assertEqual(len(counter4.patterns), 5) file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) counter = Counter(save_sequence=True) counter.chomp_spikes(spikes) self.assertEqual(counter._sequence, [0, 1, 0, 2, 3, 4, 1, 5, 4, 6, 2, 0, 6, 2, 2]) np.random.seed(42) spikes_arr = (np.random.randn(5, 10000) < .05).astype(np.int) spikes = Spikes(spikes=spikes_arr) empirical = PatternsRaw() empirical.chomp_spikes(spikes) empirical_w2 = PatternsRaw() empirical_w2.chomp_spikes(spikes, window_size=2) self.assertTrue(np.abs(empirical_w2.entropy() - 2 * empirical.entropy()) < .1)
def sample_from_model(self, J=None, theta=None, trials=None, reshape=False): """ Returns new spikes object with iid Ising spike trains: (with Ising model determined by learning with MPF) .. warning: MIGHT NOT BE WORKING PROPERLY! Parameters ---------- J : Type, optional Description (default None) theta : Type, optional Description (default None) trials : Type, optional Description (default None) reshape : bool, optional Description (default False) Returns ------- Value : Type Description """ trials = trials or range(self._original_spikes.T) X = np.zeros( (len(trials), self._original_spikes.N, self._original_spikes.M)) learner = Learner(spikes=self._original_spikes) no_net = False if J is None or theta is None: no_net = True for c, t in enumerate(trials): if no_net: learner.learn_from_spikes(window_size=1, trials=[t]) J = learner._network.J theta = learner._network.theta X[c, :, :] = sample_from_ising(J, theta, self._original_spikes.M) return Spikes(spikes=X)
def sample_from_model(self, trials=None, reshape=False): """ Returns Spikes object of 3d numpy arr of windowed iid Bernouli spike trains: (with probabilities = spike rates of each neuron in self at trial t) X: T (num trials) x (window_size * N) x (M - window_size + 1) binary vector out of a spike time series reshape: returns T(M - window_size + 1) x (ws * N) numpy binary vector Parameters ---------- trials : Type, optional Description (default None) reshape : bool, optional Description (default False) Returns ------- Value : Type Description """ trials = trials or xrange(self._original_spikes.T) X = np.zeros((len(trials), self._window_size * self._original_spikes.N, self._original_spikes.M - self._window_size + 1)) sample_spikes = self._original_spikes.to_windowed( trials=trials, window_size=self._window_size) for c, t in enumerate(trials): p = sample_spikes.spikes[c, :, :].mean(axis=1) X[c, :, :] = sample_from_bernoulli( p, self._original_spikes.M - self._window_size + 1) if reshape: Y = np.zeros((X.shape[0] * X.shape[2], X.shape[1])) tot = 0 for t in xrange(len(trials)): for c in xrange(X.shape[2]): Y[tot, :] = X[t, :, c] tot += 1 return Y return Spikes(spikes=X)
def test_basic(self): file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/tiny_spikes.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) self.assertEqual(spikes._spikes.sum(), 10) self.assertEqual(spikes.rasterize(stop=5).sum(), 8) spikes.rasterize(save_png_name=os.path.join(self.TMP_PATH, 'spikes')) self.assertTrue(os.path.exists(os.path.join(self.TMP_PATH, 'spikes.png'))) file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) spikes.rasterize(save_png_name=os.path.join(self.TMP_PATH, 'spikes')) self.assertTrue(os.path.exists(os.path.join(self.TMP_PATH, 'spikes.png'))) file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz')) spikes = Spikes(file_contents[file_contents.keys()[0]]) spikes.restrict_to_most_active_neurons(top_neurons=2) self.assertEqual(spikes._N, 2)
# # C. Hillar, Dec. 2014 # import numpy as np import matplotlib.pyplot as plt from hdnet.stimulus import Stimulus from hdnet.spikes import Spikes from hdnet.spikes_model import SpikeModel, BernoulliHomogeneous, DichotomizedGaussian # Let's first make up some simuilated spikes: 2 trials spikes = (np.random.random((2, 10, 200)) < .05).astype(int) spikes[0, [1, 5], ::5] = 1 # insert correlations spikes[1, [2, 3, 6], ::11] = 1 # insert correlations spikes = Spikes(spikes=spikes) # let's look at them: quick save as PNG or make PSTH pyplot plt.matshow(spikes.rasterize(), cmap='gray') plt.title('Raw spikes') plt.show() buff = input('Press a key to continue!') plt.close() #spikes.rasterize(save_png_name='raster') plt.matshow(spikes.covariance().reshape((2 * 10, 10)), cmap='gray') plt.title('Raw spikes covariance') plt.show() buff = input('Press a key to continue!') plt.close() #spikes.covariance(save_png_name='simul_cov_matrices')
# C. Hillar, Dec. 2014 # import numpy as np import matplotlib.pyplot as plt from hdnet.stimulus import Stimulus from hdnet.spikes import Spikes from hdnet.spikes_model import SpikeModel, BernoulliHomogeneous, DichotomizedGaussian # Let's first make up some simuilated spikes: 2 trials spikes = (np.random.random((2, 10, 200)) < .05).astype(int) spikes[0, [1, 5], ::5] = 1 # insert correlations spikes[1, [2, 3, 6], ::11] = 1 # insert correlations spikes = Spikes(spikes=spikes) # let's look at them: quick save as PNG or make PSTH pyplot plt.matshow(spikes.rasterize(), cmap='gray') plt.title('Raw spikes') #spikes.rasterize(save_png_name='raster') plt.matshow(spikes.covariance().reshape((2 * 10, 10)), cmap='gray') plt.title('Raw spikes covariance') #spikes.covariance(save_png_name='simul_cov_matrices') # let's examine the structure in spikes using a spike modeler spikes_model = BernoulliHomogeneous(spikes=spikes) BH_sample_spikes = spikes_model.sample_from_model() plt.matshow(BH_sample_spikes.rasterize(), cmap='gray') plt.title('BernoulliHomogeneous sample') print "%1.4f means" % BH_sample_spikes.spikes.mean()
def bin_spike_times(spike_times, bin_size, cells=None, t_min=None, t_max=None): """ Bins given spike_times into bins of size bin_size. Spike times expected in seconds (i.e. 1.0 for a spike at second 1, 0.5 for a spike happening at 500ms). Takes optional arguments cells, t_min and t_max that can be used to restrict the cell indices (defaults to all cells) and time range (default t_min = minimum of all spike times in spike_times, default t_max = maximum of all spike times in spike_times). Parameters ---------- spike_times : 2d numpy array 2d array of spike times of cells, cells as rows bin_size : float bin size to be used for binning (1ms = 0.001) cells : array_like, optional indices of cells to process (default None, i.e. all cells) t_min : float, optional time of leftmost bin (default None) t_max : float, optional time of rightmost bin (default None) Returns ------- spikes : :class:`.Spikes` Spikes class containing binned spikes. """ t_min_dat = np.inf t_max_dat = -np.inf spike_times = np.atleast_1d(spike_times) if cells is None: cells = np.array(range(len(spike_times))) spike_times_nonempty = [x for x in spike_times[cells] if len(x) > 0] if len(spike_times_nonempty) > 0: t_min_dat = min([t_min_dat] + map(min, spike_times_nonempty)) t_max_dat = max([t_max_dat] + map(max, spike_times_nonempty)) if t_min is None: t_min = t_min_dat if t_max is None: t_max = t_max_dat if t_min == np.inf or t_max == -np.inf: hdlog.info('No spikes!') return np.zeros((len(spike_times), 1)) bins = np.arange(t_min, t_max + bin_size, bin_size) binned = np.zeros((len(spike_times[cells]), len(bins)), dtype=int) hdlog.info( 'Binning {c} cells between t_min={m} and t_max={M}, {bins} bins'. format(c=binned.shape[0], m=t_min, M=t_max, bins=len(bins))) pos = 0 for st in spike_times: if len(st) > 0: indices = np.digitize(st, bins) - 1 binned[pos, indices] = 1 pos += 1 return Spikes(spikes=binned)
def bin_spike_times_trials(spike_times, bin_size, cells=None, t_min=None, t_max=None): """ Bins given spike_times into bins of size bin_size. Spike times expected in seconds (i.e. 1.0 for a spike at second 1, 0.5 for a spike happening at 500ms). Takes optional arguments cells, t_min and t_max that can be used to restrict the cell indices (defaults to all cells) and time range (default t_min = minimum of all spike times in spike_times, default t_max = maximum of all spike times in spike_times). Parameters ---------- spike_times : 3d numpy array 3d array of spike times of cells, dimensions: trials, cells, spike times bin_size : float bin size to be used for binning (1ms = 0.001) cells : array_like, optional indices of cells to process (default None, i.e. all cells) t_min : float, optional time of leftmost bin (default None) t_max : float, optional time of rightmost bin (default None) Returns ------- spikes : :class:`.Spikes` Spikes class containing binned spikes. """ t_min_dat = np.inf t_max_dat = -np.inf if cells is None: cells = np.array(range(spike_times.shape[1])) for t in range(spike_times.shape[0]): spike_times_nonempty = [ x for x in spike_times[t][cells] if len(x) > 0 ] if len(spike_times_nonempty) > 0: t_min_dat = min([t_min_dat] + map(min, spike_times_nonempty)) t_max_dat = max([t_max_dat] + map(max, spike_times_nonempty)) if t_min is None: t_min = t_min_dat if t_max is None: t_max = t_max_dat alls = [] for t in range(spike_times.shape[0]): s = Binner.bin_spike_times(spike_times[t], cells=cells, bin_size=bin_size, t_min=t_min, t_max=t_max) alls.append(s.spikes[0]) return Spikes(np.array(alls))