def test_isi_empty(): st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([], edges=(0.0, 1.0)) d = spk.isi_distance(st1, st2) assert_allclose(d, 0.0) prof = spk.isi_profile(st1, st2) assert_allclose(d, prof.avrg()) assert_array_equal(prof.x, [0.0, 1.0]) assert_array_equal(prof.y, [ 0.0, ]) st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([ 0.4, ], edges=(0.0, 1.0)) d = spk.isi_distance(st1, st2) assert_allclose(d, 0.6 * 0.4 + 0.4 * 0.6) prof = spk.isi_profile(st1, st2) assert_allclose(d, prof.avrg()) assert_array_equal(prof.x, [0.0, 0.4, 1.0]) assert_array_equal(prof.y, [0.6, 0.4]) st1 = SpikeTrain([ 0.6, ], edges=(0.0, 1.0)) st2 = SpikeTrain([ 0.4, ], edges=(0.0, 1.0)) d = spk.isi_distance(st1, st2) assert_almost_equal(d, 0.2 / 0.6 * 0.4 + 0.0 + 0.2 / 0.6 * 0.4, decimal=15) prof = spk.isi_profile(st1, st2) assert_allclose(d, prof.avrg()) assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15) assert_array_almost_equal(prof.y, [0.2 / 0.6, 0.0, 0.2 / 0.6], decimal=15)
def test_isi_empty(): st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([], edges=(0.0, 1.0)) d = spk.isi_distance(st1, st2) assert_equal(d, 0.0) prof = spk.isi_profile(st1, st2) assert_equal(d, prof.avrg()) assert_array_equal(prof.x, [0.0, 1.0]) assert_array_equal(prof.y, [0.0, ]) st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) d = spk.isi_distance(st1, st2) assert_equal(d, 0.6*0.4+0.4*0.6) prof = spk.isi_profile(st1, st2) assert_equal(d, prof.avrg()) assert_array_equal(prof.x, [0.0, 0.4, 1.0]) assert_array_equal(prof.y, [0.6, 0.4]) st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0)) st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) d = spk.isi_distance(st1, st2) assert_almost_equal(d, 0.2/0.6*0.4 + 0.0 + 0.2/0.6*0.4, decimal=15) prof = spk.isi_profile(st1, st2) assert_equal(d, prof.avrg()) assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15) assert_array_almost_equal(prof.y, [0.2/0.6, 0.0, 0.2/0.6], decimal=15)
def test_isi(): # generate two spike trains: t1 = SpikeTrain([0.2, 0.4, 0.6, 0.7], 1.0) t2 = SpikeTrain([0.3, 0.45, 0.8, 0.9, 0.95], 1.0) # pen&paper calculation of the isi distance expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] expected_isi = [ 0.1 / 0.3, 0.1 / 0.3, 0.05 / 0.2, 0.05 / 0.2, 0.15 / 0.35, 0.25 / 0.35, 0.05 / 0.35, 0.2 / 0.3, 0.25 / 0.3, 0.25 / 0.3 ] expected_times = np.array(expected_times) expected_isi = np.array(expected_isi) expected_isi_val = sum( (expected_times[1:] - expected_times[:-1]) * expected_isi) / (expected_times[-1] - expected_times[0]) f = spk.isi_profile(t1, t2) # print("ISI: ", f.y) print("ISI value:", expected_isi_val) assert_equal(f.x, expected_times) assert_array_almost_equal(f.y, expected_isi, decimal=15) assert_equal(f.avrg(), expected_isi_val) assert_equal(spk.isi_distance(t1, t2), expected_isi_val) # check with some equal spike times t1 = SpikeTrain([0.2, 0.4, 0.6], [0.0, 1.0]) t2 = SpikeTrain([0.1, 0.4, 0.5, 0.6], [0.0, 1.0]) expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0] expected_isi = [ 0.1 / 0.3, 0.1 / 0.3, 0.1 / 0.3, 0.1 / 0.2, 0.1 / 0.2, 0.0 / 0.5 ] expected_times = np.array(expected_times) expected_isi = np.array(expected_isi) expected_isi_val = sum( (expected_times[1:] - expected_times[:-1]) * expected_isi) / (expected_times[-1] - expected_times[0]) f = spk.isi_profile(t1, t2) assert_equal(f.x, expected_times) assert_array_almost_equal(f.y, expected_isi, decimal=15) assert_equal(f.avrg(), expected_isi_val) assert_equal(spk.isi_distance(t1, t2), expected_isi_val)
def distance_isi(spike_train_a, spike_train_b, interval): """ ISI-distance (Kreutz) using pyspike """ spike_train_1 = pyspike.SpikeTrain(spike_train_a, interval) spike_train_2 = pyspike.SpikeTrain(spike_train_b, interval) return pyspike.isi_distance(spike_train_1, spike_train_2, interval)
def test_regression_spiky(): # standard example st1 = SpikeTrain(np.arange(100, 1201, 100), 1300) st2 = SpikeTrain(np.arange(100, 1201, 110), 1300) isi_dist = spk.isi_distance(st1, st2) assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15) isi_profile = spk.isi_profile(st1, st2) assert_equal(isi_profile.y, 0.1/1.1 * np.ones_like(isi_profile.y)) spike_dist = spk.spike_distance(st1, st2) assert_equal(spike_dist, 2.1105878248735391e-01) spike_sync = spk.spike_sync(st1, st2) assert_equal(spike_sync, 8.6956521739130432e-01) # multivariate check spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt", (0.0, 4000.0)) isi_dist = spk.isi_distance_multi(spike_trains) # get the full precision from SPIKY assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15) spike_profile = spk.spike_profile_multi(spike_trains) assert_equal(len(spike_profile.y1)+len(spike_profile.y2), 1252) spike_dist = spk.spike_distance_multi(spike_trains) # get the full precision from SPIKY assert_almost_equal(spike_dist, 2.4432433330596512e-01, decimal=15) spike_sync = spk.spike_sync_multi(spike_trains) # get the full precision from SPIKY assert_equal(spike_sync, 0.7183531505298066)
def test_regression_spiky(): # standard example st1 = SpikeTrain(np.arange(100, 1201, 100), 1300) st2 = SpikeTrain(np.arange(100, 1201, 110), 1300) isi_dist = spk.isi_distance(st1, st2) assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15) isi_profile = spk.isi_profile(st1, st2) assert_equal(isi_profile.y, 0.1 / 1.1 * np.ones_like(isi_profile.y)) spike_dist = spk.spike_distance(st1, st2) assert_equal(spike_dist, 0.211058782487353908) spike_sync = spk.spike_sync(st1, st2) assert_equal(spike_sync, 8.6956521739130432e-01) # multivariate check spike_trains = spk.load_spike_trains_from_txt( os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0)) isi_dist = spk.isi_distance_multi(spike_trains) # get the full precision from SPIKY assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15) spike_profile = spk.spike_profile_multi(spike_trains) assert_equal(len(spike_profile.y1) + len(spike_profile.y2), 1252) spike_dist = spk.spike_distance_multi(spike_trains) # get the full precision from SPIKY assert_almost_equal(spike_dist, 0.25188056475463755, decimal=15) spike_sync = spk.spike_sync_multi(spike_trains) # get the full precision from SPIKY assert_equal(spike_sync, 0.7183531505298066) # Eero's edge correction example st1 = SpikeTrain([0.5, 1.5, 2.5], 6.0) st2 = SpikeTrain([3.5, 4.5, 5.5], 6.0) f = spk.spike_profile(st1, st2) expected_times = np.array([0.0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.0]) y_all = np.array([ 0.271604938271605, 0.271604938271605, 0.271604938271605, 0.617283950617284, 0.617283950617284, 0.444444444444444, 0.285714285714286, 0.285714285714286, 0.444444444444444, 0.617283950617284, 0.617283950617284, 0.271604938271605, 0.271604938271605, 0.271604938271605 ]) expected_y1 = y_all[::2] expected_y2 = y_all[1::2] assert_equal(f.x, expected_times) assert_array_almost_equal(f.y1, expected_y1, decimal=14) assert_array_almost_equal(f.y2, expected_y2, decimal=14)
def test_isi(): # generate two spike trains: t1 = SpikeTrain([0.2, 0.4, 0.6, 0.7], 1.0) t2 = SpikeTrain([0.3, 0.45, 0.8, 0.9, 0.95], 1.0) # pen&paper calculation of the isi distance expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] expected_isi = [0.1/0.3, 0.1/0.3, 0.05/0.2, 0.05/0.2, 0.15/0.35, 0.25/0.35, 0.05/0.35, 0.2/0.3, 0.25/0.3, 0.25/0.3] expected_times = np.array(expected_times) expected_isi = np.array(expected_isi) expected_isi_val = sum((expected_times[1:] - expected_times[:-1]) * expected_isi)/(expected_times[-1]-expected_times[0]) f = spk.isi_profile(t1, t2) # print("ISI: ", f.y) assert_equal(f.x, expected_times) assert_array_almost_equal(f.y, expected_isi, decimal=15) assert_equal(f.avrg(), expected_isi_val) assert_equal(spk.isi_distance(t1, t2), expected_isi_val) # check with some equal spike times t1 = SpikeTrain([0.2, 0.4, 0.6], [0.0, 1.0]) t2 = SpikeTrain([0.1, 0.4, 0.5, 0.6], [0.0, 1.0]) expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0] expected_isi = [0.1/0.3, 0.1/0.3, 0.1/0.3, 0.1/0.2, 0.1/0.2, 0.0/0.5] expected_times = np.array(expected_times) expected_isi = np.array(expected_isi) expected_isi_val = sum((expected_times[1:] - expected_times[:-1]) * expected_isi)/(expected_times[-1]-expected_times[0]) f = spk.isi_profile(t1, t2) assert_equal(f.x, expected_times) assert_array_almost_equal(f.y, expected_isi, decimal=15) assert_equal(f.avrg(), expected_isi_val) assert_equal(spk.isi_distance(t1, t2), expected_isi_val)
def _similarity(self, v1, v2, method='isi', **kwargs): assert v1.shape == v2.shape assert v1.ndim == 1 if method == 'isi': thresh = kwargs.pop('thresh', 10) spikes1 = np.diff((v1 > thresh).astype('int')) spikes2 = np.diff((v2 > thresh).astype('int')) spike_times_1 = np.where(spikes1 > 0)[0] spike_times_2 = np.where(spikes2 > 0)[0] # spike_train_1 = pyspike.SpikeTrain(spike_times_1, 5500, 14500) # spike_train_2 = pyspike.SpikeTrain(spike_times_2, 5500, 14500) spike_train_1 = pyspike.SpikeTrain(spike_times_1, 9000 * .02) # TODO: need # timebins spike_train_2 = pyspike.SpikeTrain(spike_times_2, 9000 * .02) return np.abs(pyspike.isi_distance(spike_train_1, spike_train_2)) elif method == 'efel': trace1 = self._make_efel_trace(v1) trace2 = self._make_efel_trace(v2) efel1, efel2 = efel.getFeatureValues([trace1, trace2], ['mean_frequency']) return np.abs(efel1['mean_frequency'] - efel2['mean_frequency'])[0] try: y = np.abs( [efel1[feat] - efel2[feat] for feat in EFEL_FEATURES]) except: import matplotlib.pyplot as plt plt.plot(v1) plt.plot(v2) plt.show() return y else: raise ValueError("unknown similarity metric")
def analyze_result(name, stim, result, fs=10000, save=True): analysis = {} spikes_e = result['spikes_e'] spikes_stim = result['spikes_stim'] spikes_i = result['spikes_i'] # Get Ns and ts ns_e, ts_e = spikes_e.i, spikes_e.t / second ns_i, ts_i = spikes_i.i, spikes_i.t / second ns_stim, ts_stim = spikes_stim.i, spikes_stim.t / second # Keep only neurons 0-199 mask = ns_e < 200 ns_e, ts_e = ns_e[mask], ts_e[mask] mask = ns_i < 200 ns_i, ts_i = ns_i[mask], ts_i[mask] # ------------------------------------------------------------- # Drop first 200 ms mask = ts_e > 0.2 ns_e, ts_e = ns_e[mask], ts_e[mask] mask = ts_i > 0.2 ns_i, ts_i = ns_i[mask], ts_i[mask] # ------------------------------------------------------------- # Look at pre-stim first mask = ts_e < stim ns_pre_e, ts_pre_e = ns_e[mask], ts_e[mask] mask = ts_i < stim ns_pre_i, ts_pre_i = ns_i[mask], ts_i[mask] # kappa r_e = futil.kappa(ns_pre_e, ts_pre_e, ns_pre_e, ts_pre_e, (0, 1), 1.0 / 1000) # 1 ms bins analysis['kappa_pre_e'] = r_e r_i = futil.kappa(ns_pre_i, ts_pre_i, ns_pre_i, ts_pre_i, (0, 1), 1.0 / 1000) # 1 ms bins analysis['kappa_pre_i'] = r_i # fano fanos_e = futil.fano(ns_pre_e, ts_pre_e) mfano_e = np.nanmean([x for x in fanos_e.values()]) analysis['fano_pre_e'] = mfano_e # ------------------------------------------------------------- # Drop times before stim time mask = ts_e > stim ns_e, ts_e = ns_e[mask], ts_e[mask] mask = ts_i > stim ns_i, ts_i = ns_i[mask], ts_i[mask] # kappa r_e = futil.kappa(ns_e, ts_e, ns_e, ts_e, (0, 1), 1.0 / 1000) # 1 ms bins analysis['kappa_e'] = r_e r_i = futil.kappa(ns_i, ts_i, ns_i, ts_i, (0, 1), 1.0 / 1000) # 1 ms bins analysis['kappa_i'] = r_i # fano fanos_e = futil.fano(ns_e, ts_e) mfano_e = np.nanmean([x for x in fanos_e.values()]) analysis['fano_e'] = mfano_e # ISI and SPIKE sto_e = spk.SpikeTrain(ts_e, (0.5, 1)) sto_stim = spk.SpikeTrain(ts_stim, (0.5, 1)) sto_e.sort() sto_stim.sort() isi = spk.isi_distance(sto_stim, sto_e) sync = spk.spike_sync(sto_stim, sto_e) analysis['isi_e'] = isi analysis['sync_e'] = sync # l distance and spike ordered_e, _ = futil.ts_sort(ns_e, ts_e) ordered_stim, _ = futil.ts_sort(ns_stim, ts_stim) lev = futil.levenshtein(list(ordered_stim), list(ordered_e)) analysis['lev_e'] = lev if save: with open(name + '_analysis.csv', 'w') as f: [f.write('{0},{1}\n'.format(k, v)) for k, v in analysis.items()] return analysis
run(runTime*ms) #code below calculates and stores the pyspike metrics firingValuesWithUnits = Sp1.spike_trains().values() firingValues = [] for i in range(len(firingValuesWithUnits)): firingValues.append(array(firingValuesWithUnits[i])) fV = open('fv.txt','w') for item in firingValues: item = (" ".join(map(str,item))) fV.write("%s\n" % item) fV.close() spikeTrains = psp.load_spike_trains_from_txt("fv.txt",edges=(0,runTime/1000.0)) qvalues.iloc[currentLine,0] = tc qvalues.iloc[currentLine,1] = delay qvalues.iloc[currentLine,2] = psyn qvalues.iloc[currentLine,3] = synw qvalues.iloc[currentLine,4] = psp.spike_distance(spikeTrains) qvalues.iloc[currentLine,5] = psp.isi_distance(spikeTrains) qvalues.iloc[currentLine,6] = psp.spike_sync(spikeTrains) currentLine += 1 del G1 del S1 del Sp1 del firingValuesWithUnits del firingValues del spikeTrains qvalues.to_excel('qvalues.xlsx', sheet_name='Sheet1')