def test_spike_empty(): st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([], edges=(0.0, 1.0)) d = spk.spike_distance(st1, st2) assert_allclose(d, 0.0) prof = spk.spike_profile(st1, st2) assert_allclose(d, prof.avrg()) assert_array_equal(prof.x, [0.0, 1.0]) assert_array_equal(prof.y1, [ 0.0, ]) assert_array_equal(prof.y2, [ 0.0, ]) st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([ 0.4, ], edges=(0.0, 1.0)) d = spk.spike_distance(st1, st2) d_expect = 2 * 0.4 * 0.4 * 1.0 / (0.4 + 1.0)**2 + 2 * 0.6 * 0.4 * 1.0 / ( 0.6 + 1.0)**2 assert_almost_equal(d, d_expect, decimal=15) prof = spk.spike_profile(st1, st2) assert_allclose(d, prof.avrg()) assert_array_equal(prof.x, [0.0, 0.4, 1.0]) assert_array_almost_equal( prof.y1, [2 * 0.4 * 1.0 / (0.4 + 1.0)**2, 2 * 0.4 * 1.0 / (0.6 + 1.0)**2], decimal=15) assert_array_almost_equal( prof.y2, [2 * 0.4 * 1.0 / (0.4 + 1.0)**2, 2 * 0.4 * 1.0 / (0.6 + 1.0)**2], decimal=15) st1 = SpikeTrain([ 0.6, ], edges=(0.0, 1.0)) st2 = SpikeTrain([ 0.4, ], edges=(0.0, 1.0)) d = spk.spike_distance(st1, st2) s1 = np.array([0.2, 0.2, 0.2, 0.2]) s2 = np.array([0.2, 0.2, 0.2, 0.2]) isi1 = np.array([0.6, 0.6, 0.4]) isi2 = np.array([0.4, 0.6, 0.6]) expected_y1 = (s1[:-1] * isi2 + s2[:-1] * isi1) / (0.5 * (isi1 + isi2)**2) expected_y2 = (s1[1:] * isi2 + s2[1:] * isi1) / (0.5 * (isi1 + isi2)**2) expected_times = np.array([0.0, 0.4, 0.6, 1.0]) expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) * (expected_y1 + expected_y2) / 2) expected_spike_val /= (expected_times[-1] - expected_times[0]) assert_almost_equal(d, expected_spike_val, decimal=15) prof = spk.spike_profile(st1, st2) assert_allclose(d, prof.avrg()) assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15) assert_array_almost_equal(prof.y1, expected_y1, decimal=15) assert_array_almost_equal(prof.y2, expected_y2, decimal=15)
def test_spike_empty(): st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([], edges=(0.0, 1.0)) d = spk.spike_distance(st1, st2) assert_equal(d, 0.0) prof = spk.spike_profile(st1, st2) assert_equal(d, prof.avrg()) assert_array_equal(prof.x, [0.0, 1.0]) assert_array_equal(prof.y1, [0.0, ]) assert_array_equal(prof.y2, [0.0, ]) st1 = SpikeTrain([], edges=(0.0, 1.0)) st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) d = spk.spike_distance(st1, st2) assert_almost_equal(d, 0.4*0.4*1.0/(0.4+1.0)**2 + 0.6*0.4*1.0/(0.6+1.0)**2, decimal=15) prof = spk.spike_profile(st1, st2) assert_equal(d, prof.avrg()) assert_array_equal(prof.x, [0.0, 0.4, 1.0]) assert_array_almost_equal(prof.y1, [0.0, 2*0.4*1.0/(0.6+1.0)**2], decimal=15) assert_array_almost_equal(prof.y2, [2*0.4*1.0/(0.4+1.0)**2, 0.0], decimal=15) st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0)) st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0)) d = spk.spike_distance(st1, st2) s1 = np.array([0.0, 0.4*0.2/0.6, 0.2, 0.0]) s2 = np.array([0.0, 0.2, 0.2*0.4/0.6, 0.0]) isi1 = np.array([0.6, 0.6, 0.4]) isi2 = np.array([0.4, 0.6, 0.6]) expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2) expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2) expected_times = np.array([0.0, 0.4, 0.6, 1.0]) expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) * (expected_y1+expected_y2)/2) expected_spike_val /= (expected_times[-1]-expected_times[0]) assert_almost_equal(d, expected_spike_val, decimal=15) prof = spk.spike_profile(st1, st2) assert_equal(d, prof.avrg()) assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15) assert_array_almost_equal(prof.y1, expected_y1, decimal=15) assert_array_almost_equal(prof.y2, expected_y2, decimal=15)
def test_regression_spiky(): # standard example st1 = SpikeTrain(np.arange(100, 1201, 100), 1300) st2 = SpikeTrain(np.arange(100, 1201, 110), 1300) isi_dist = spk.isi_distance(st1, st2) assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15) isi_profile = spk.isi_profile(st1, st2) assert_equal(isi_profile.y, 0.1 / 1.1 * np.ones_like(isi_profile.y)) spike_dist = spk.spike_distance(st1, st2) assert_equal(spike_dist, 0.211058782487353908) spike_sync = spk.spike_sync(st1, st2) assert_equal(spike_sync, 8.6956521739130432e-01) # multivariate check spike_trains = spk.load_spike_trains_from_txt( os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0)) isi_dist = spk.isi_distance_multi(spike_trains) # get the full precision from SPIKY assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15) spike_profile = spk.spike_profile_multi(spike_trains) assert_equal(len(spike_profile.y1) + len(spike_profile.y2), 1252) spike_dist = spk.spike_distance_multi(spike_trains) # get the full precision from SPIKY assert_almost_equal(spike_dist, 0.25188056475463755, decimal=15) spike_sync = spk.spike_sync_multi(spike_trains) # get the full precision from SPIKY assert_equal(spike_sync, 0.7183531505298066) # Eero's edge correction example st1 = SpikeTrain([0.5, 1.5, 2.5], 6.0) st2 = SpikeTrain([3.5, 4.5, 5.5], 6.0) f = spk.spike_profile(st1, st2) expected_times = np.array([0.0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.0]) y_all = np.array([ 0.271604938271605, 0.271604938271605, 0.271604938271605, 0.617283950617284, 0.617283950617284, 0.444444444444444, 0.285714285714286, 0.285714285714286, 0.444444444444444, 0.617283950617284, 0.617283950617284, 0.271604938271605, 0.271604938271605, 0.271604938271605 ]) expected_y1 = y_all[::2] expected_y2 = y_all[1::2] assert_equal(f.x, expected_times) assert_array_almost_equal(f.y1, expected_y1, decimal=14) assert_array_almost_equal(f.y2, expected_y2, decimal=14)
def get_data(): print 'Starting analysis of spike times per cell: ' + str(label_network) sync_data = [[[[] for i in range(2)] for x in range(len(gids))] for p in range(len(stats))] spktsRange = [ spkt for spkt in spkts if timeRange[0] <= spkt <= timeRange[1] ] for i, stat in enumerate(stats): for ii, subset in enumerate(gids): spkmat = [ pyspike.SpikeTrain([ spkt for spkind, spkt in zip(spkinds, spkts) if (spkind == gid and spkt in spktsRange) ], timeRange) for gid in set(subset) ] if stat == 'spike_sync_profile': print str(stat) + ", subset: " + str( ii) + ", number of trains: " + str(len(spkmat)) syncMat1 = pyspike.spike_sync_profile(spkmat) x, y = syncMat1.get_plottable_data() sync_data[i][ii][0] = x sync_data[i][ii][1] = y elif stat == 'spike_profile': print str(stat) + ", subset: " + str( ii) + ", number of trains: " + str(len(spkmat)) syncMat2 = pyspike.spike_profile(spkmat) x, y = syncMat2.get_plottable_data() sync_data[i][ii][0] = x sync_data[i][ii][1] = y elif stat == 'isi_profile': print str(stat) + ", subset: " + str( ii) + ", number of trains: " + str(len(spkmat)) syncMat3 = pyspike.isi_profile(spkmat) x, y = syncMat3.get_plottable_data() sync_data[i][ii][0] = x sync_data[i][ii][1] = y
f = spk.isi_profile(spike_trains[0], spike_trains[1]) print("ISI-distance: %.8f" % f.avrg()) isi1 = f.avrg(interval=(0, 1000)) isi2 = f.avrg(interval=(1000, 2000)) isi3 = f.avrg(interval=[(0, 1000), (2000, 3000)]) isi4 = f.avrg(interval=[(1000, 2000), (3000, 4000)]) print("ISI-distance (0-1000): %.8f" % isi1) print("ISI-distance (1000-2000): %.8f" % isi2) print("ISI-distance (0-1000) and (2000-3000): %.8f" % isi3) print("ISI-distance (1000-2000) and (3000-4000): %.8f" % isi4) print() f = spk.spike_profile(spike_trains[0], spike_trains[1]) print("SPIKE-distance: %.8f" % f.avrg()) spike1 = f.avrg(interval=(0, 1000)) spike2 = f.avrg(interval=(1000, 2000)) spike3 = f.avrg(interval=[(0, 1000), (2000, 3000)]) spike4 = f.avrg(interval=[(1000, 2000), (3000, 4000)]) print("SPIKE-distance (0-1000): %.8f" % spike1) print("SPIKE-distance (1000-2000): %.8f" % spike2) print("SPIKE-distance (0-1000) and (2000-3000): %.8f" % spike3) print("SPIKE-distance (1000-2000) and (3000-4000): %.8f" % spike4) print() f = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
t_start = time.clock() # load the data time_loading = time.clock() spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", edges=(0, 4000)) t_loading = time.clock() print("Number of spike trains: %d" % len(spike_trains)) num_of_spikes = sum([len(spike_trains[i]) for i in range(len(spike_trains))]) print("Number of spikes: %d" % num_of_spikes) # calculate the multivariate spike distance f = spk.spike_profile(spike_trains) t_spike = time.clock() # print the average avrg = f.avrg() print("Spike distance from average: %.8f" % avrg) t_avrg = time.clock() # compute average distance directly, should give the same result as above spike_dist = spk.spike_distance(spike_trains) print("Spike distance directly: %.8f" % spike_dist) t_dist = time.clock()
spike_trains = spk.load_spike_trains_from_txt("../test/SPIKE_Sync_Test.txt", edges=(0, 4000)) plt.figure() f = spk.spike_sync_profile(spike_trains[0], spike_trains[1]) x, y = f.get_plottable_data() plt.plot(x, y, '--ok', label="SPIKE-SYNC profile") print(f.x) print(f.y) print(f.mp) print("Average:", f.avrg()) f = spk.spike_profile(spike_trains[0], spike_trains[1]) x, y = f.get_plottable_data() plt.plot(x, y, '-b', label="SPIKE-profile") plt.axis([0, 4000, -0.1, 1.1]) plt.legend(loc="center right") plt.figure() plt.subplot(211) f = spk.spike_sync_profile_multi(spike_trains) x, y = f.get_plottable_data() plt.plot(x, y, '-b', alpha=0.7, label="SPIKE-Sync profile")
import pyspike as spk spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", edges=(0, 4000)) # plot the spike times for (i, spike_train) in enumerate(spike_trains): # print np.asarray(spike_train) plt.scatter(spike_train, i * np.ones_like(spike_train), marker='|') plt.savefig('fig/plot0.png') # profile of the first two spike trains f = spk.isi_profile(spike_trains, indices=[0, 1]) x, y = f.get_plottable_data() plt.figure() plt.plot(x, np.abs(y), '--k', label="ISI-profile") print("ISI-distance: %.8f" % f.avrg()) f = spk.spike_profile(spike_trains, indices=[0, 1]) x, y = f.get_plottable_data() plt.plot(x, y, '-b', label="SPIKE-profile") print("SPIKE-distance: %.8f" % f.avrg()) plt.legend(loc="upper left") plt.savefig('fig/plot1.png') # plt.show()
def test_spike(): # generate two spike trains: t1 = SpikeTrain([0.0, 2.0, 5.0, 8.0], 10.0) t2 = SpikeTrain([0.0, 1.0, 5.0, 9.0], 10.0) expected_times = np.array([0.0, 1.0, 2.0, 5.0, 8.0, 9.0, 10.0]) f = spk.spike_profile(t1, t2) assert_equal(f.x, expected_times) # from SPIKY: y_all = np.array([ 0.000000000000000000, 0.555555555555555580, 0.222222222222222210, 0.305555555555555580, 0.255102040816326536, 0.000000000000000000, 0.000000000000000000, 0.255102040816326536, 0.255102040816326536, 0.285714285714285698, 0.285714285714285698, 0.285714285714285698 ]) #assert_array_almost_equal(f.y1, y_all[::2]) assert_array_almost_equal(f.y2, y_all[1::2]) assert_almost_equal(f.avrg(), 0.186309523809523814, decimal=15) assert_equal(spk.spike_distance(t1, t2), f.avrg()) t1 = SpikeTrain([0.2, 0.4, 0.6, 0.7], 1.0) t2 = SpikeTrain([0.3, 0.45, 0.8, 0.9, 0.95], 1.0) # pen&paper calculation of the spike distance expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] s1 = np.array([ 0.1, 0.1, (0.1 * 0.1 + 0.05 * 0.1) / 0.2, 0.05, (0.05 * 0.15 * 2) / 0.2, 0.15, 0.1, (0.1 * 0.1 + 0.1 * 0.2) / 0.3, (0.1 * 0.2 + 0.1 * 0.1) / 0.3, (0.1 * 0.05 + 0.1 * 0.25) / 0.3, 0.1 ]) s2 = np.array([ 0.1, (0.1 * 0.2 + 0.1 * 0.1) / 0.3, 0.1, (0.1 * 0.05 * 2) / .15, 0.05, (0.05 * 0.2 + 0.1 * 0.15) / 0.35, (0.05 * 0.1 + 0.1 * 0.25) / 0.35, 0.1, 0.1, 0.05, 0.05 ]) isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.1, 0.3, 0.3, 0.3, 0.3]) isi2 = np.array([0.3, 0.3, 0.15, 0.15, 0.35, 0.35, 0.35, 0.1, 0.05, 0.05]) expected_y1 = (s1[:-1] * isi2 + s2[:-1] * isi1) / (0.5 * (isi1 + isi2)**2) expected_y2 = (s1[1:] * isi2 + s2[1:] * isi1) / (0.5 * (isi1 + isi2)**2) expected_times = np.array(expected_times) expected_y1 = np.array(expected_y1) expected_y2 = np.array(expected_y2) expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) * (expected_y1 + expected_y2) / 2) expected_spike_val /= (expected_times[-1] - expected_times[0]) print("SPIKE value:", expected_spike_val) f = spk.spike_profile(t1, t2) assert_equal(f.x, expected_times) assert_array_almost_equal(f.y1, expected_y1, decimal=15) assert_array_almost_equal(f.y2, expected_y2, decimal=15) assert_almost_equal(f.avrg(), expected_spike_val, decimal=15) assert_almost_equal(spk.spike_distance(t1, t2), expected_spike_val, decimal=15) # check with some equal spike times t1 = SpikeTrain([0.2, 0.4, 0.6], [0.0, 1.0]) t2 = SpikeTrain([0.1, 0.4, 0.5, 0.6], [0.0, 1.0]) expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0] # due to the edge correction in the beginning, s1 and s2 are different # for left and right values s1_r = np.array( [0.1, (0.1 * 0.1 + 0.1 * 0.1) / 0.2, 0.1, 0.0, 0.0, 0.0, 0.0]) s1_l = np.array( [0.1, (0.1 * 0.1 + 0.1 * 0.1) / 0.2, 0.1, 0.0, 0.0, 0.0, 0.0]) # s2_r = np.array([0.1*0.1/0.3, 0.1*0.3/0.3, 0.1*0.2/0.3, # 0.0, 0.1, 0.0, 0.0]) # s2_l = np.array([0.1*0.1/0.3, 0.1*0.1/0.3, 0.1*0.2/0.3, 0.0, # 0.1, 0.0, 0.0]) # eero's edge correction: s2_r = np.array( [0.1, 0.1 * 0.3 / 0.3, 0.1 * 0.2 / 0.3, 0.0, 0.1, 0.0, 0.0]) s2_l = np.array( [0.1, 0.1 * 0.3 / 0.3, 0.1 * 0.2 / 0.3, 0.0, 0.1, 0.0, 0.0]) isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.4]) isi2 = np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.4]) expected_y1 = (s1_r[:-1] * isi2 + s2_r[:-1] * isi1) / (0.5 * (isi1 + isi2)**2) expected_y2 = (s1_l[1:] * isi2 + s2_l[1:] * isi1) / (0.5 * (isi1 + isi2)**2) expected_times = np.array(expected_times) expected_y1 = np.array(expected_y1) expected_y2 = np.array(expected_y2) expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) * (expected_y1 + expected_y2) / 2) expected_spike_val /= (expected_times[-1] - expected_times[0]) f = spk.spike_profile(t1, t2) assert_equal(f.x, expected_times) assert_array_almost_equal(f.y1, expected_y1, decimal=14) assert_array_almost_equal(f.y2, expected_y2, decimal=14) assert_almost_equal(f.avrg(), expected_spike_val, decimal=16) assert_almost_equal(spk.spike_distance(t1, t2), expected_spike_val, decimal=16)
def iter_plot0(md): import seaborn as sns import pickle with open('cell_indexs.p', 'rb') as f: returned_list = pickle.load(f) index_exc = returned_list[0] index_inh = returned_list[1] index, mdf1 = md #wgf = {0.025:None,0.05:None,0.125:None,0.25:None,0.3:None,0.4:None,0.5:None,1.0:None,1.5:None,2.0:None,2.5:None,3.0:None} wgf = { 0.0025: None, 0.0125: None, 0.025: None, 0.05: None, 0.125: None, 0.25: None, 0.3: None, 0.4: None, 0.5: None, 1.0: None, 1.5: None, 2.0: None, 2.5: None, 3.0: None } weight_gain_factors = {k: v for k, v in enumerate(wgf.keys())} print(len(weight_gain_factors)) print(weight_gain_factors.keys()) #weight_gain_factors = {0:0.5,1:1.0,2:1.5,3:2.0,4:2.5,5:3} #weight_gain_factors = {:None,1.0:None,1.5:None,2.0:None,2.5:None} k = weight_gain_factors[index] #print(len(mdf1.segments),'length of block') ass = mdf1.analogsignals[0] time_points = ass.times avg = np.mean(ass, axis=0) # Average over signals of Segment #maxx = np.max(ass, axis=0) # Average over signals of Segment std = np.std(ass, axis=0) # Average over signals of Segment #avg_minus = plt.figure() plt.plot([i for i in range(0, len(avg))], avg) plt.plot([i for i in range(0, len(std))], std) plt.title("Mean and Standard Dev of $V_{m}$ amplitude per neuron ") plt.xlabel('time $(ms)$') plt.xlabel('Voltage $(mV)$') plt.savefig(str(index) + 'prs.png') vm_spiking = [] vm_not_spiking = [] spike_trains = [] binary_trains = [] max_spikes = 0 vms = np.array(mdf1.analogsignals[0].as_array().T) #print(data) #for i,vm in enumerate(data): cnt = 0 for spiketrain in mdf1.spiketrains: #spiketrain = mdf1.spiketrains[index] y = np.ones_like(spiketrain) * spiketrain.annotations['source_id'] #import sklearn #sklearn.decomposition.NMF(y) # argument edges is the time interval you want to be considered. pspikes = pyspike.SpikeTrain(spiketrain, edges=(0, len(ass))) spike_trains.append(pspikes) if len(spiketrain) > max_spikes: max_spikes = len(spiketrain) if np.max(ass[spiketrain.annotations['source_id']]) > 0.0: vm_spiking.append(vms[spiketrain.annotations['source_id']]) else: vm_not_spiking.append(vms[spiketrain.annotations['source_id']]) cnt += 1 for spiketrain in mdf1.spiketrains: x = conv.BinnedSpikeTrain(spiketrain, binsize=1 * pq.ms, t_start=0 * pq.s) binary_trains.append(x) end_floor = np.floor(float(mdf1.t_stop)) dt = float(mdf1.t_stop) % end_floor mdf1.t_start #v = mdf1.take_slice_of_analogsignalarray_by_unit() t_axis = np.arange(float(mdf1.t_start), float(mdf1.t_stop), dt) plt.figure() plt.clf() plt.figure() plt.clf() cleaned = [] data = np.array(mdf1.analogsignals[0].as_array().T) #print(data) for i, vm in enumerate(data): if np.max(vm) > 900.0 or np.min(vm) < -900.0: pass else: plt.plot(ass.times, vm) #,label='neuron identifier '+str(i))) cleaned.append(vm) #vm = s#.as_array()[:,i] assert len(cleaned) < len(ass) print(len(cleaned)) plt.title('neuron $V_{m}$') #plt.legend(loc="upper left") plt.savefig(str('weight_') + str(k) + 'analogsignals' + '.png') plt.xlabel('Time $(ms)$') plt.ylabel('Voltage $(mV)$') plt.close() #pass plt.figure() plt.clf() plt.title('Single Neuron $V_{m}$ trace') plt.plot(ass.times[0:int(len(ass.times) / 10)], vm_not_spiking[index_exc[0]][0:int(len(ass.times) / 10)]) plt.xlabel('$ms$') plt.ylabel('$mV$') plt.xlabel('Time $(ms)$') plt.ylabel('Voltage $(mV)$') plt.savefig(str('weight_') + str(k) + 'eespecific_analogsignals' + '.png') plt.close() plt.figure() plt.clf() plt.title('Single Neuron $V_{m}$ trace') plt.plot(ass.times[0:int(len(ass.times) / 10)], vm_not_spiking[index_inh[0]][0:int(len(ass.times) / 10)]) plt.xlabel('$ms$') plt.ylabel('$mV$') plt.savefig(str('weight_') + str(k) + 'inhibitory_analogsignals' + '.png') plt.close() cvs = [0 for i in range(0, len(spike_trains))] cvsd = {} cvs = [] cvsi = [] rates = [] # firing rates per cell. in spikes a second. for i, j in enumerate(spike_trains): rates.append(float(len(j) / 2.0)) cva = cv(j) if np.isnan(cva) or cva == 0: pass #cvs[i] = 0 #cvsd[i] = 0 else: pass #cvs[i] = cva #cvsd[i] = cva cvs.append(cva) #import pickle #with open(str('weight_')+str(k)+'coefficients_of_variation.p','wb') as f: # pickle.dump([cvs,cvsd],f) import numpy a = numpy.asarray(cvs) numpy.savetxt('pickles/' + str('weight_') + str(k) + 'coefficients_of_variation.csv', a, delimiter=",") import numpy a = numpy.asarray(rates) numpy.savetxt('pickles/' + str('weight_') + str(k) + 'firing_of_rate.csv', a, delimiter=",") cvs = [i for i in cvs if i != 0] cells = [i for i in range(0, len(cvs))] plt.clf() fig, axes = plt.subplots() axes.set_title('Coefficient of Variation Versus Neuron') axes.set_xlabel('Neuron number') axes.set_ylabel('CV estimate') mcv = np.mean(cvs) #plt.scatter(cells,cvs) cvs = np.array(cvs) plt.scatter(index_inh, cvs[index_inh], label="inhibitory cells") plt.scatter(index_exc, cvs[index_exc], label="excitatory cells") plt.legend(loc="upper left") fig.tight_layout() plt.savefig(str('weight_') + str(k) + 'cvs_mean_' + str(mcv) + '.png') plt.close() plt.clf() #frequencies, power = elephant.spectral.welch_psd(ass) #mfreq = frequencies[np.where(power==np.max(power))[0][0]] #fig, axes = plt.subplots() axes.set_title('Firing Rate Versus Neuron Number at mean f=' + str(np.mean(rates)) + str('(Spike Per Second)')) axes.set_xlabel('Neuron number') axes.set_ylabel('Spikes per second') rates = np.array(rates) plt.scatter(index_inh, rates[index_inh], label="inhibitory cells") plt.scatter(index_exc, rates[index_exc], label="excitatory cells") plt.legend(loc="upper left") fig.tight_layout() plt.savefig(str('firing_rates_per_cell_') + str(k) + str(mcv) + '.png') plt.close() ''' import pandas as pd d = {'coefficent_of_variation': cvs, 'cells': cells} df = pd.DataFrame(data=d) ax = sns.regplot(x='cells', y='coefficent_of_variation', data=df)#, fit_reg=False) plt.savefig(str('weight_')+str(k)+'cvs_regexp_'+str(mcv)+'.png'); plt.close() ''' spike_trains = [] ass = mdf1.analogsignals[0] tstop = mdf1.t_stop np.max(ass.times) == mdf1.t_stop #assert tstop == 2000 tstop = 2000 vm_spiking = [] for spiketrain in mdf1.spiketrains: vm_spiking.append( mdf1.analogsignals[0][spiketrain.annotations['source_id']]) y = np.ones_like(spiketrain) * spiketrain.annotations['source_id'] # argument edges is the time interval you want to be considered. pspikes = pyspike.SpikeTrain(spiketrain, edges=(0, tstop)) spike_trains.append(pspikes) # plot the spike times plt.clf() for (i, spike_train) in enumerate(spike_trains): plt.scatter(spike_train, i * np.ones_like(spike_train), marker='.') plt.xlabel('Time (ms)') plt.ylabel('Cell identifier') plt.title('Raster Plot for weight strength:' + str(k)) plt.savefig(str('weight_') + str(k) + 'raster_plot' + '.png') plt.close() f = spk.isi_profile(spike_trains, indices=[0, 1]) x, y = f.get_plottable_data() #text_file.close() text_file = open(str('weight_') + str(index) + 'net_out.txt', 'w') plt.figure() plt.plot(x, np.abs(y), '--k', label="ISI-profile") print("ISI-distance: %.8f" % f.avrg()) f = spk.spike_profile(spike_trains, indices=[0, 1]) x, y = f.get_plottable_data() plt.plot(x, y, '-b', label="SPIKE-profile") #print("SPIKE-distance: %.8f" % f.avrg()) string_to_write = str("ISI-distance:") + str(f.avrg()) + str("\n\n") plt.title(string_to_write) plt.xlabel('Time $(ms)$') plt.ylabel('ISI distance') plt.legend(loc="upper left") plt.savefig(str('weight_') + str(k) + 'ISI_distance_bivariate' + '.png') plt.close() text_file.write(string_to_write) #text_file.write("SPIKE-distance: %.8f" % f.avrg()) #text_file.write("\n\n") plt.figure() f = spk.spike_sync_profile(spike_trains[0], spike_trains[1]) x, y = f.get_plottable_data() plt.plot(x, y, '--ok', label="SPIKE-SYNC profile") print(f, f.avrg()) print("Average:" + str(f.avrg())) #print(len(f.avrg()),f.avrg()) string_to_write = str("instantaneous synchrony:") + str( f.avrg()) + 'weight: ' + str(index) plt.title(string_to_write) plt.xlabel('Time $(ms)$') plt.ylabel('instantaneous synchrony') text_file.write(string_to_write) #text_file.write(list()) f = spk.spike_profile(spike_trains[0], spike_trains[1]) x, y = f.get_plottable_data() plt.plot(x, y, '-b', label="SPIKE-profile") plt.axis([0, 4000, -0.1, 1.1]) plt.legend(loc="center right") plt.clf() plt.figure() plt.subplot(211) f = spk.spike_sync_profile(spike_trains) x, y = f.get_plottable_data() plt.plot(x, y, '-b', alpha=0.7, label="SPIKE-Sync profile") x1, y1 = f.get_plottable_data(averaging_window_size=50) plt.plot(x1, y1, '-k', lw=2.5, label="averaged SPIKE-Sync profile") plt.subplot(212) f_psth = spk.psth(spike_trains, bin_size=50.0) x, y = f_psth.get_plottable_data() plt.plot(x, y, '-k', alpha=1.0, label="PSTH") plt.savefig(str('weight_') + str(k) + 'multivariate_PSTH' + '.png') plt.close() plt.xlabel('Time $(ms)$') plt.ylabel('Spikes per bin') plt.clf() plt.figure() f_psth = spk.psth(spike_trains, bin_size=50.0) x, y = f_psth.get_plottable_data() plt.plot(x, y, '-k', alpha=1.0, label="PSTH") plt.savefig(str('weight_') + str(k) + 'exclusively_PSTH' + '.png') plt.close() plt.figure() isi_distance = spk.isi_distance_matrix(spike_trains) plt.imshow(isi_distance, interpolation='none') plt.title('Pairwise ISI distance, T=0-2000') plt.xlabel('post-synaptic neuron number') plt.ylabel('pre-synaptic neuron number') plt.title("ISI-distance") plt.savefig(str('weight_') + str(k) + 'ISI_distance' + '.png') plt.close() #plt.show() plt.figure() plt.clf() import seaborn as sns sns.set() sns.clustermap(isi_distance) #,vmin=-,vmax=1); plt.savefig(str('weight_') + str(k) + 'cluster_isi_distance' + '.png') plt.close() plt.figure() spike_distance = spk.spike_distance_matrix(spike_trains, interval=(0, float(tstop))) import pickle with open('spike_distance_matrix.p', 'wb') as f: pickle.dump(spike_distance, f) plt.imshow(spike_distance, interpolation='none') plt.title("Pairwise SPIKE-distance, T=0-2000") plt.xlabel('post-synaptic neuron number') plt.ylabel('pre-synaptic neuron number') plt.savefig(str('weight_') + str(k) + 'spike_distance_matrix' + '.png') plt.close() plt.figure() plt.clf() sns.set() sns.clustermap(spike_distance) plt.savefig(str('weight_') + str(k) + 'cluster_spike_distance' + '.png') plt.close() plt.figure() spike_sync = spk.spike_sync_matrix(spike_trains, interval=(0, float(tstop))) plt.imshow(spike_sync, interpolation='none') plt.title('Pairwise Spike Synchony, T=0-2000') plt.xlabel('post-synaptic neuron number') plt.ylabel('pre-synaptic neuron number') import numpy a = numpy.asarray(spike_sync) numpy.savetxt("spike_sync_matrix.csv", a, delimiter=",") plt.figure() plt.clf() sns.clustermap(spike_sync) plt.savefig( str('weight_') + str(k) + 'cluster_spike_sync_distance' + '.png') plt.close()
#Grab all the spike times and neurons indices from the "SpikeMonitor" objects poiSpikeTimes = numpy.asarray(PoiMonitor.t) poiSpikeNeuro = numpy.asarray(PoiMonitor.i) neuSpikeTimes = numpy.asarray(NeuMonitor1.t) #Make a list where we will put the set of "SpikeTrain" objects spikiTimesObjList = [] #Fill the list with spike trains for each neuron in the simulation for i in numpy.unique(poiSpikeNeuro): inds = numpy.argwhere(poiSpikeNeuro==i) #FInd tthe spike-times that correspond to neuron i inds = inds[:,0] #the variable "inds" is a 2D array, I just want a 1D array to give to the "SpikeTrain" class. spikiTimesObjList.append(spk.SpikeTrain(poiSpikeTimes[inds], edges=(0., 1.), is_sorted=True)) #Append the spikiTimesObjList with a "SpikeTrain" object #Make a "spike_profile", which essentially is a simultaneous comparison of all the spike trains together simutaneously spike_profile = spk.spike_profile(spikiTimesObjList) #Make a "isi_profile", which essentially is a simultaneous comparison of all the spike trains together simutaneously isi_profile = spk.isi_profile(spikiTimesObjList) #Make a "spike_sync_profile", which essentially is a simultaneous comparison of all the spike trains together simutaneously sync_profile = spk.spike_sync_profile(spikiTimesObjList) #Now I want to pring out want these profiles that have just been created look like. plt.figure(1) x, y = spike_profile.get_plottable_data() plt.plot(x, y, '--k') plt.figure(2) x, y = isi_profile.get_plottable_data() plt.plot(x,y, '--k') plt.figure(3)
import pyspike as spk spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", edges=(0, 4000)) # plot the spike times for (i, spike_train) in enumerate(spike_trains): plt.scatter(spike_train, i*np.ones_like(spike_train), marker='|') # profile of the first two spike trains f = spk.isi_profile(spike_trains, indices=[0, 1]) x, y = f.get_plottable_data() plt.figure() plt.plot(x, np.abs(y), '--k', label="ISI-profile") print("ISI-distance: %.8f" % f.avrg()) f = spk.spike_profile(spike_trains, indices=[0, 1]) x, y = f.get_plottable_data() plt.plot(x, y, '-b', label="SPIKE-profile") print("SPIKE-distance: %.8f" % f.avrg()) plt.legend(loc="upper left") plt.show()
def get_spike_distance(spike_train_1, spike_train_2): return spk.spike_profile(spike_train_1, spike_train_2).avrg()
def test_spike(): # generate two spike trains: t1 = SpikeTrain([0.0, 2.0, 5.0, 8.0], 10.0) t2 = SpikeTrain([0.0, 1.0, 5.0, 9.0], 10.0) expected_times = np.array([0.0, 1.0, 2.0, 5.0, 8.0, 9.0, 10.0]) f = spk.spike_profile(t1, t2) assert_equal(f.x, expected_times) assert_almost_equal(f.avrg(), 1.6624149659863946e-01, decimal=15) assert_almost_equal(f.y2[-1], 0.1394558, decimal=6) t1 = SpikeTrain([0.2, 0.4, 0.6, 0.7], 1.0) t2 = SpikeTrain([0.3, 0.45, 0.8, 0.9, 0.95], 1.0) # pen&paper calculation of the spike distance expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] s1 = np.array([0.1, 0.1, (0.1*0.1+0.05*0.1)/0.2, 0.05, (0.05*0.15 * 2)/0.2, 0.15, 0.1, (0.1*0.1+0.1*0.2)/0.3, (0.1*0.2+0.1*0.1)/0.3, (0.1*0.05+0.1*0.25)/0.3, 0.1]) s2 = np.array([0.1, (0.1*0.2+0.1*0.1)/0.3, 0.1, (0.1*0.05 * 2)/.15, 0.05, (0.05*0.2+0.1*0.15)/0.35, (0.05*0.1+0.1*0.25)/0.35, 0.1, 0.1, 0.05, 0.05]) isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.1, 0.3, 0.3, 0.3, 0.3]) isi2 = np.array([0.3, 0.3, 0.15, 0.15, 0.35, 0.35, 0.35, 0.1, 0.05, 0.05]) expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2) expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2) expected_times = np.array(expected_times) expected_y1 = np.array(expected_y1) expected_y2 = np.array(expected_y2) expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) * (expected_y1+expected_y2)/2) expected_spike_val /= (expected_times[-1]-expected_times[0]) f = spk.spike_profile(t1, t2) assert_equal(f.x, expected_times) assert_array_almost_equal(f.y1, expected_y1, decimal=15) assert_array_almost_equal(f.y2, expected_y2, decimal=15) assert_almost_equal(f.avrg(), expected_spike_val, decimal=15) assert_almost_equal(spk.spike_distance(t1, t2), expected_spike_val, decimal=15) # check with some equal spike times t1 = SpikeTrain([0.2, 0.4, 0.6], [0.0, 1.0]) t2 = SpikeTrain([0.1, 0.4, 0.5, 0.6], [0.0, 1.0]) expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0] # due to the edge correction in the beginning, s1 and s2 are different # for left and right values s1_r = np.array([0.1, (0.1*0.1+0.1*0.1)/0.2, 0.1, 0.0, 0.0, 0.0, 0.0]) s1_l = np.array([0.1, (0.1*0.1+0.1*0.1)/0.2, 0.1, 0.0, 0.0, 0.0, 0.0]) s2_r = np.array([0.1*0.1/0.3, 0.1*0.3/0.3, 0.1*0.2/0.3, 0.0, 0.1, 0.0, 0.0]) s2_l = np.array([0.1*0.1/0.3, 0.1*0.1/0.3, 0.1*0.2/0.3, 0.0, 0.1, 0.0, 0.0]) isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.4]) isi2 = np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.4]) expected_y1 = (s1_r[:-1]*isi2+s2_r[:-1]*isi1) / (0.5*(isi1+isi2)**2) expected_y2 = (s1_l[1:]*isi2+s2_l[1:]*isi1) / (0.5*(isi1+isi2)**2) expected_times = np.array(expected_times) expected_y1 = np.array(expected_y1) expected_y2 = np.array(expected_y2) expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) * (expected_y1+expected_y2)/2) expected_spike_val /= (expected_times[-1]-expected_times[0]) f = spk.spike_profile(t1, t2) assert_equal(f.x, expected_times) assert_array_almost_equal(f.y1, expected_y1, decimal=14) assert_array_almost_equal(f.y2, expected_y2, decimal=14) assert_almost_equal(f.avrg(), expected_spike_val, decimal=16) assert_almost_equal(spk.spike_distance(t1, t2), expected_spike_val, decimal=16)
b = spk.SpikeTrain(a, [0, 10], is_sorted=False) print b.spikes spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", edges=(0, 4000)) # compute the two spike trains and multivariate ISI profile f = spk.isi_profile(spike_trains[0], spike_trains[1]) f = spk.isi_profile(spike_trains) # t = [900, 1100, 2000, 3100] # print("ISI value at t =", t, ":", f(t)) # print("Average ISI distance:", f.avrg()) # compute the two spike trains and multivariate SPIKE profile f = spk.spike_profile(spike_trains[0], spike_trains[1]) f = spk.spike_profile(spike_trains) # t = [900, 1100, 2000, 3100] # print("Multivariate SPIKE value at t =", t, ":", f(t)) # print("Average multivariate SPIKE distance:", f.avrg()) # plot the spike times for (i, spike_train) in enumerate(spike_trains): plt.scatter(spike_train, i * np.ones_like(spike_train), marker='|') # print np.asarray(spike_train) # profile of the first two spike trains f = spk.isi_profile(spike_trains, indices=[0, 1]) x, y = f.get_plottable_data() # x = f.x