Exemple #1
0
def test_spike_sync_empty():
    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_allclose(d, 1.0)
    prof = spk.spike_sync_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 1.0])
    assert_array_equal(prof.y, [1.0, 1.0])

    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([
        0.4,
    ], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_allclose(d, 0.0)
    prof = spk.spike_sync_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 0.4, 1.0])
    assert_array_equal(prof.y, [0.0, 0.0, 0.0])

    st1 = SpikeTrain([
        0.6,
    ], edges=(0.0, 1.0))
    st2 = SpikeTrain([
        0.4,
    ], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_almost_equal(d, 1.0, decimal=15)
    prof = spk.spike_sync_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15)
    assert_array_almost_equal(prof.y, [1.0, 1.0, 1.0, 1.0], decimal=15)

    st1 = SpikeTrain([
        0.2,
    ], edges=(0.0, 1.0))
    st2 = SpikeTrain([
        0.8,
    ], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_almost_equal(d, 0.0, decimal=15)
    prof = spk.spike_sync_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_almost_equal(prof.x, [0.0, 0.2, 0.8, 1.0], decimal=15)
    assert_array_almost_equal(prof.y, [0.0, 0.0, 0.0, 0.0], decimal=15)

    # test with empty intervals
    st1 = SpikeTrain([2.0, 5.0], [0, 10.0])
    st2 = SpikeTrain([2.1, 7.0], [0, 10.0])
    st3 = SpikeTrain([5.1, 6.0], [0, 10.0])
    res = spk.spike_sync_profile(st1, st2).avrg(interval=[3.0, 4.0])
    assert_allclose(res, 1.0)
    res = spk.spike_sync(st1, st2, interval=[3.0, 4.0])
    assert_allclose(res, 1.0)

    sync_matrix = spk.spike_sync_matrix([st1, st2, st3], interval=[3.0, 4.0])
    assert_array_equal(sync_matrix, np.ones((3, 3)) - np.diag(np.ones(3)))
Exemple #2
0
def test_multi_spike_sync():
    # some basic multivariate check
    spikes1 = SpikeTrain(
        [100, 300, 400, 405, 410, 500, 700, 800, 805, 810, 815, 900], 1000)
    spikes2 = SpikeTrain(
        [100, 200, 205, 210, 295, 350, 400, 510, 600, 605, 700, 910], 1000)
    spikes3 = SpikeTrain(
        [100, 180, 198, 295, 412, 420, 510, 640, 695, 795, 820, 920], 1000)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes1, spikes3), 0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes2, spikes3), 0.5, decimal=15)

    f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3])
    # hands on definition of the average multivariate spike synchronization
    # expected = (f1.integral() + f2.integral() + f3.integral()) / \
    #            (np.sum(f1.mp[1:-1])+np.sum(f2.mp[1:-1])+np.sum(f3.mp[1:-1]))
    expected = 0.5
    assert_almost_equal(f.avrg(), expected, decimal=15)
    assert_almost_equal(spk.spike_sync_multi([spikes1, spikes2, spikes3]),
                        expected,
                        decimal=15)

    # multivariate regression test
    spike_trains = spk.load_spike_trains_from_txt(os.path.join(
        TEST_PATH, "SPIKE_Sync_Test.txt"),
                                                  edges=[0, 4000])
    # extract all spike times
    spike_times = np.array([])
    for st in spike_trains:
        spike_times = np.append(spike_times, st.spikes)
    spike_times = np.unique(np.sort(spike_times))

    f = spk.spike_sync_profile_multi(spike_trains)

    assert_equal(spike_times, f.x[1:-1])
    assert_equal(len(f.x), len(f.y))

    assert_equal(np.sum(f.y[1:-1]), 39932)
    assert_equal(np.sum(f.mp[1:-1]), 85554)

    # example with 2 empty spike trains
    sts = []
    sts.append(SpikeTrain([1, 9], [0, 10]))
    sts.append(SpikeTrain([1, 3], [0, 10]))
    sts.append(SpikeTrain([], [0, 10]))
    sts.append(SpikeTrain([], [0, 10]))

    assert_almost_equal(spk.spike_sync_multi(sts), 1.0 / 6.0, decimal=15)
    assert_almost_equal(spk.spike_sync_profile_multi(sts).avrg(),
                        1.0 / 6.0,
                        decimal=15)
def test_regression_spiky():
    # standard example
    st1 = SpikeTrain(np.arange(100, 1201, 100), 1300)
    st2 = SpikeTrain(np.arange(100, 1201, 110), 1300)

    isi_dist = spk.isi_distance(st1, st2)
    assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15)
    isi_profile = spk.isi_profile(st1, st2)
    assert_equal(isi_profile.y, 0.1/1.1 * np.ones_like(isi_profile.y))

    spike_dist = spk.spike_distance(st1, st2)
    assert_equal(spike_dist, 2.1105878248735391e-01)

    spike_sync = spk.spike_sync(st1, st2)
    assert_equal(spike_sync, 8.6956521739130432e-01)

    # multivariate check

    spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
                                                  (0.0, 4000.0))
    isi_dist = spk.isi_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15)

    spike_profile = spk.spike_profile_multi(spike_trains)
    assert_equal(len(spike_profile.y1)+len(spike_profile.y2), 1252)

    spike_dist = spk.spike_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(spike_dist, 2.4432433330596512e-01, decimal=15)

    spike_sync = spk.spike_sync_multi(spike_trains)
    # get the full precision from SPIKY
    assert_equal(spike_sync, 0.7183531505298066)
def Synchro(spikes1, spikes2):
    synchs = []
    for i in range(len(spikes1)):
        sp1 = pyspike.SpikeTrain(np.where(spikes1[i] > 0)[0], len(spikes1[i]))
        sp2 = pyspike.SpikeTrain(np.where(spikes2[i] > 0)[0], len(spikes2[i]))
        synchs.append(pyspike.spike_sync(sp1, sp2))
    return np.array(synchs)
Exemple #5
0
def test_regression_spiky():
    # standard example
    st1 = SpikeTrain(np.arange(100, 1201, 100), 1300)
    st2 = SpikeTrain(np.arange(100, 1201, 110), 1300)

    isi_dist = spk.isi_distance(st1, st2)
    assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15)
    isi_profile = spk.isi_profile(st1, st2)
    assert_equal(isi_profile.y, 0.1 / 1.1 * np.ones_like(isi_profile.y))

    spike_dist = spk.spike_distance(st1, st2)
    assert_equal(spike_dist, 0.211058782487353908)

    spike_sync = spk.spike_sync(st1, st2)
    assert_equal(spike_sync, 8.6956521739130432e-01)

    # multivariate check

    spike_trains = spk.load_spike_trains_from_txt(
        os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0))
    isi_dist = spk.isi_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15)

    spike_profile = spk.spike_profile_multi(spike_trains)
    assert_equal(len(spike_profile.y1) + len(spike_profile.y2), 1252)

    spike_dist = spk.spike_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(spike_dist, 0.25188056475463755, decimal=15)

    spike_sync = spk.spike_sync_multi(spike_trains)
    # get the full precision from SPIKY
    assert_equal(spike_sync, 0.7183531505298066)

    # Eero's edge correction example
    st1 = SpikeTrain([0.5, 1.5, 2.5], 6.0)
    st2 = SpikeTrain([3.5, 4.5, 5.5], 6.0)

    f = spk.spike_profile(st1, st2)

    expected_times = np.array([0.0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.0])
    y_all = np.array([
        0.271604938271605, 0.271604938271605, 0.271604938271605,
        0.617283950617284, 0.617283950617284, 0.444444444444444,
        0.285714285714286, 0.285714285714286, 0.444444444444444,
        0.617283950617284, 0.617283950617284, 0.271604938271605,
        0.271604938271605, 0.271604938271605
    ])
    expected_y1 = y_all[::2]
    expected_y2 = y_all[1::2]

    assert_equal(f.x, expected_times)
    assert_array_almost_equal(f.y1, expected_y1, decimal=14)
    assert_array_almost_equal(f.y2, expected_y2, decimal=14)
def test_spike_sync():
    spikes1 = SpikeTrain([1.0, 2.0, 3.0], 4.0)
    spikes2 = SpikeTrain([2.1], 4.0)

    expected_x = np.array([0.0, 1.0, 2.0, 2.1, 3.0, 4.0])
    expected_y = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0])

    f = spk.spike_sync_profile(spikes1, spikes2)

    assert_array_almost_equal(f.x, expected_x, decimal=16)
    assert_array_almost_equal(f.y, expected_y, decimal=16)

    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=16)

    # test with some small max_tau, spike_sync should be 0
    assert_almost_equal(spk.spike_sync(spikes1, spikes2, max_tau=0.05),
                        0.0, decimal=16)

    spikes2 = SpikeTrain([3.1], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=16)

    spikes2 = SpikeTrain([1.1], 4.0)

    expected_x = np.array([0.0, 1.0, 1.1, 2.0, 3.0, 4.0])
    expected_y = np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])

    f = spk.spike_sync_profile(spikes1, spikes2)

    assert_array_almost_equal(f.x, expected_x, decimal=16)
    assert_array_almost_equal(f.y, expected_y, decimal=16)

    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=16)

    spikes2 = SpikeTrain([0.9], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=16)

    spikes2 = SpikeTrain([3.0], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=16)

    spikes2 = SpikeTrain([1.0], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=16)

    spikes2 = SpikeTrain([1.5, 3.0], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.4, decimal=16)
Exemple #7
0
def spike_analysis(spikes, value):
    distances = []
    i = 0
    for spike in spikes:
        spike_train = SpikeTrain(spike, [0.0, 300.0])
        isi_profile = spk.spike_sync(prototype_trains[i], spike_train)
        distances.append(isi_profile)
        i += 1

    val, idx = max((val, idx) for (idx, val) in enumerate(distances))
    print("Distance: %.8f" % val)
    print("Index: %s" % idx)
def test_multi_spike_sync():
    # some basic multivariate check
    spikes1 = SpikeTrain([100, 300, 400, 405, 410, 500, 700, 800,
                          805, 810, 815, 900], 1000)
    spikes2 = SpikeTrain([100, 200, 205, 210, 295, 350, 400, 510,
                          600, 605, 700, 910], 1000)
    spikes3 = SpikeTrain([100, 180, 198, 295, 412, 420, 510, 640,
                          695, 795, 820, 920], 1000)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes1, spikes3),
                        0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes2, spikes3),
                        0.5, decimal=15)

    f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3])
    # hands on definition of the average multivariate spike synchronization
    # expected = (f1.integral() + f2.integral() + f3.integral()) / \
    #            (np.sum(f1.mp[1:-1])+np.sum(f2.mp[1:-1])+np.sum(f3.mp[1:-1]))
    expected = 0.5
    assert_almost_equal(f.avrg(), expected, decimal=15)
    assert_almost_equal(spk.spike_sync_multi([spikes1, spikes2, spikes3]),
                        expected, decimal=15)

    # multivariate regression test
    spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt",
                                                  edges=[0, 4000])
    # extract all spike times
    spike_times = np.array([])
    for st in spike_trains:
        spike_times = np.append(spike_times, st.spikes)
    spike_times = np.unique(np.sort(spike_times))

    f = spk.spike_sync_profile_multi(spike_trains)

    assert_equal(spike_times, f.x[1:-1])
    assert_equal(len(f.x), len(f.y))

    assert_equal(np.sum(f.y[1:-1]), 39932)
    assert_equal(np.sum(f.mp[1:-1]), 85554)
Exemple #9
0
def calculate_spike_synchrony_pyspike(ts, gids, interval=None):

    if interval is None:
        tmin = np.min(ts)
        tmax = np.max(ts)
        interval = [tmin, tmax]

    s = spike_trains_list_of_list(ts, gids)
    spike_trains = []
    for i in range(len(s)):
        spike_trains.append(spk.SpikeTrain(s[i], interval, is_sorted=False))

    spk_sync = spk.spike_sync(spike_trains, interval=interval, max_tau=0.1)

    return spk_sync
Exemple #10
0
def test_spike_sync_empty():
    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_equal(d, 1.0)
    prof = spk.spike_sync_profile(st1, st2)
    assert_equal(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 1.0])
    assert_array_equal(prof.y, [1.0, 1.0])

    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_equal(d, 0.0)
    prof = spk.spike_sync_profile(st1, st2)
    assert_equal(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 0.4, 1.0])
    assert_array_equal(prof.y, [0.0, 0.0, 0.0])

    st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0))
    st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_almost_equal(d, 1.0, decimal=15)
    prof = spk.spike_sync_profile(st1, st2)
    assert_equal(d, prof.avrg())
    assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15)
    assert_array_almost_equal(prof.y, [1.0, 1.0, 1.0, 1.0], decimal=15)

    st1 = SpikeTrain([0.2, ], edges=(0.0, 1.0))
    st2 = SpikeTrain([0.8, ], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_almost_equal(d, 0.0, decimal=15)
    prof = spk.spike_sync_profile(st1, st2)
    assert_equal(d, prof.avrg())
    assert_array_almost_equal(prof.x, [0.0, 0.2, 0.8, 1.0], decimal=15)
    assert_array_almost_equal(prof.y, [0.0, 0.0, 0.0, 0.0], decimal=15)
def test_spike_sync():
    spikes1 = SpikeTrain([1.0, 2.0, 3.0], 4.0)
    spikes2 = SpikeTrain([2.1], 4.0)

    expected_x = np.array([0.0, 1.0, 2.0, 2.1, 3.0, 4.0])
    expected_y = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0])

    f = spk.spike_sync_profile(spikes1, spikes2)

    assert_array_almost_equal(f.x, expected_x, decimal=16)
    assert_array_almost_equal(f.y, expected_y, decimal=16)

    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    # test with some small max_tau, spike_sync should be 0
    assert_almost_equal(spk.spike_sync(spikes1, spikes2, max_tau=0.05),
                        0.0,
                        decimal=16)

    spikes2 = SpikeTrain([3.1], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([1.1], 4.0)

    expected_x = np.array([0.0, 1.0, 1.1, 2.0, 3.0, 4.0])
    expected_y = np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])

    f = spk.spike_sync_profile(spikes1, spikes2)

    assert_array_almost_equal(f.x, expected_x, decimal=16)
    assert_array_almost_equal(f.y, expected_y, decimal=16)

    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([0.9], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([3.0], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([1.0], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([1.5, 3.0], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.4, decimal=16)
Exemple #12
0
                run(runTime*ms)

                #code below calculates and stores the pyspike metrics
                firingValuesWithUnits = Sp1.spike_trains().values()
                firingValues = []
                for i in range(len(firingValuesWithUnits)):
                    firingValues.append(array(firingValuesWithUnits[i])) 
                fV = open('fv.txt','w')
                for item in firingValues:
                    item = (" ".join(map(str,item)))
                    fV.write("%s\n" % item)
                fV.close()
                spikeTrains = psp.load_spike_trains_from_txt("fv.txt",edges=(0,runTime/1000.0))
                qvalues.iloc[currentLine,0] = tc
                qvalues.iloc[currentLine,1] = delay
                qvalues.iloc[currentLine,2] = psyn
                qvalues.iloc[currentLine,3] = synw
                qvalues.iloc[currentLine,4] = psp.spike_distance(spikeTrains)
                qvalues.iloc[currentLine,5] = psp.isi_distance(spikeTrains)
                qvalues.iloc[currentLine,6] = psp.spike_sync(spikeTrains)
                currentLine += 1 

                del G1
                del S1
                del Sp1
                del firingValuesWithUnits
                del firingValues
                del spikeTrains
     
qvalues.to_excel('qvalues.xlsx', sheet_name='Sheet1')
Exemple #13
0
# spike synchronization profile
f = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
f = spk.spike_sync_profile(spike_trains)
x, y = f.get_plottable_data()
plt.plot(x, y, '-b', alpha=0.7, label="SPIKE-Sync profile")

x1, y1 = f.get_plottable_data(averaging_window_size=50)
plt.plot(x1, y1, '-k', lw=2.5, label="averaged SPIKE-Sync profile")
# The optional parameter averaging_window_size determines the size
# of an averaging window to smoothen the profile. If this value is
# 0, no averaging is performed.
print("Average:", f.avrg())

# For the direct computation of the overall spike synchronization
# value within some interval, the spike_sync() function can be used:
spike_sync = spk.spike_sync(spike_trains[0], spike_trains[1], interval=ival)

# Computes the peri-stimulus time histogram
# The PSTH is simply the histogram of merged spike events.
f_psth = spk.psth(spike_trains, bin_size=50.0)
x, y = f_psth.get_plottable_data()
plt.plot(x, y, '-k', alpha=1.0, label="PSTH")

# print("Number of spike trains: %d" % len(spike_trains))
# num_of_spikes = sum([len(spk) for spk in spike_trains])
# print("Number of spikes: %d" % num_of_spikes)

# spike_train_order.
# Generates a Poisson spike train with the given
# rate in the given time interval
st1 = spk.generate_poisson_spikes(1.0, [0, 20])
Exemple #14
0
# Plot simple relationship within errors within an hourly window.
normal = pd.DataFrame(spk.spike_sync_matrix(spike_trains, max_tau=60 * 60),
                      index=labels,
                      columns=labels)
save_matrix_plot(normal, 'Priority errors, Norway',
                 '{}/Norway_errors.png'.format(sys.argv[2]))

# First simple experiment: shift every priority one hour. Somewhat arbirtrarily.
shifted = pd.DataFrame(index=labels, columns=labels)
for P in shifted.index:
    P_shifted = df_to_spike_train(
        fhs[fhs.priority == P].shift(-1, pd.Timedelta(hours=1)), t_start,
        edges)
    shifted[P] = [
        spk.spike_sync(P_shifted, s_t, max_tau=60 * 60) for s_t in spike_trains
    ]
shifted.index = ['$\mathregular{' + x + '_{-1h}}$' for x in labels]
save_matrix_plot(shifted, 'Priority errors shifted, Norway',
                 '{}/Norway_errors_shifted.png'.format(sys.argv[2]))

P1 = fhs.query("priority == 'P1'")
P2 = fhs.query("priority == 'P2'")

# More complex experiment: find which time shift led to the best synchronous spike correlation.
results = pd.DataFrame(index=np.linspace(0, 60, 13, dtype=int),
                       columns=np.unique(fhs.county),
                       dtype=float)

for delay in results.index:
    for county in results.columns:
Exemple #15
0
def analyze_result(name, stim, result, fs=10000, save=True):
    analysis = {}

    spikes_e = result['spikes_e']
    spikes_stim = result['spikes_stim']
    spikes_i = result['spikes_i']

    # Get Ns and ts
    ns_e, ts_e = spikes_e.i, spikes_e.t / second
    ns_i, ts_i = spikes_i.i, spikes_i.t / second
    ns_stim, ts_stim = spikes_stim.i, spikes_stim.t / second

    # Keep only neurons 0-199
    mask = ns_e < 200
    ns_e, ts_e = ns_e[mask], ts_e[mask]
    mask = ns_i < 200
    ns_i, ts_i = ns_i[mask], ts_i[mask]

    # -------------------------------------------------------------
    # Drop first 200 ms
    mask = ts_e > 0.2
    ns_e, ts_e = ns_e[mask], ts_e[mask]
    mask = ts_i > 0.2
    ns_i, ts_i = ns_i[mask], ts_i[mask]

    # -------------------------------------------------------------
    # Look at pre-stim first
    mask = ts_e < stim
    ns_pre_e, ts_pre_e = ns_e[mask], ts_e[mask]
    mask = ts_i < stim
    ns_pre_i, ts_pre_i = ns_i[mask], ts_i[mask]

    # kappa
    r_e = futil.kappa(ns_pre_e, ts_pre_e, ns_pre_e, ts_pre_e, (0, 1), 1.0 / 1000)  # 1 ms bins
    analysis['kappa_pre_e'] = r_e
    r_i = futil.kappa(ns_pre_i, ts_pre_i, ns_pre_i, ts_pre_i, (0, 1), 1.0 / 1000)  # 1 ms bins
    analysis['kappa_pre_i'] = r_i

    # fano
    fanos_e = futil.fano(ns_pre_e, ts_pre_e)
    mfano_e = np.nanmean([x for x in fanos_e.values()])
    analysis['fano_pre_e'] = mfano_e

    # -------------------------------------------------------------
    # Drop times before stim time
    mask = ts_e > stim
    ns_e, ts_e = ns_e[mask], ts_e[mask]
    mask = ts_i > stim
    ns_i, ts_i = ns_i[mask], ts_i[mask]

    # kappa
    r_e = futil.kappa(ns_e, ts_e, ns_e, ts_e, (0, 1), 1.0 / 1000)  # 1 ms bins
    analysis['kappa_e'] = r_e
    r_i = futil.kappa(ns_i, ts_i, ns_i, ts_i, (0, 1), 1.0 / 1000)  # 1 ms bins
    analysis['kappa_i'] = r_i

    # fano
    fanos_e = futil.fano(ns_e, ts_e)
    mfano_e = np.nanmean([x for x in fanos_e.values()])
    analysis['fano_e'] = mfano_e

    # ISI and SPIKE
    sto_e = spk.SpikeTrain(ts_e, (0.5, 1))
    sto_stim = spk.SpikeTrain(ts_stim, (0.5, 1))
    sto_e.sort()
    sto_stim.sort()
    isi = spk.isi_distance(sto_stim, sto_e)
    sync = spk.spike_sync(sto_stim, sto_e)
    analysis['isi_e'] = isi
    analysis['sync_e'] = sync

    # l distance and spike
    ordered_e, _ = futil.ts_sort(ns_e, ts_e)
    ordered_stim, _ = futil.ts_sort(ns_stim, ts_stim)
    lev = futil.levenshtein(list(ordered_stim), list(ordered_e))
    analysis['lev_e'] = lev

    if save:
        with open(name + '_analysis.csv', 'w') as f:
            [f.write('{0},{1}\n'.format(k, v)) for k, v in analysis.items()]

    return analysis
Exemple #16
0
plt.savefig('fig/spike_sync_2.png')
plt.close()

# multivariant---------------------------------------------#
plt.figure()
plt.subplot(211)

f = spk.spike_sync_profile(spike_trains)
x, y = f.get_plottable_data()
plt.plot(x, y, '-b', alpha=0.7, label="SPIKE-Sync profile")

x1, y1 = f.get_plottable_data(averaging_window_size=50)
plt.plot(x1, y1, '-k', lw=2.5, label="averaged SPIKE-Sync profile")
plt.legend(loc='lower right')

f_direct = spk.spike_sync(spike_trains, interval=[0, 4000])

print "Average from spike_sync_profile :", f.avrg()
print "directly from spike_sync        :", f_direct

plt.subplot(212)
f_psth = spk.psth(spike_trains, bin_size=50.0)
x, y = f_psth.get_plottable_data()
plt.plot(x, y, '-k', alpha=1.0, label="PSTH")
plt.legend()

plt.savefig('fig/spike_sync_multi.png')
plt.close()

# plt.show()
t_start = first - pd.Timedelta(seconds=60*60+1)
t_end = last + pd.Timedelta(seconds=1)

edges = (0, (t_end - t_start).total_seconds())

spike_trains = [ df_to_spike_train(fhs.query('priority == @priority'), t_start, edges) for priority in labels ]

# Plot simple relationship within errors within an hourly window.
normal = pd.DataFrame(spk.spike_sync_matrix(spike_trains, max_tau=60*60), index=labels, columns=labels)
save_matrix_plot(normal, 'Priority errors, Norway', '{}/Norway_errors.png'.format(sys.argv[2]))

# First simple experiment: shift every priority one hour. Somewhat arbirtrarily.
shifted = pd.DataFrame(index=labels, columns=labels)
for P in shifted.index:
    P_shifted = df_to_spike_train(fhs[ fhs.priority == P ].shift(-1, pd.Timedelta(hours=1)), t_start, edges)
    shifted[P] = [ spk.spike_sync(P_shifted, s_t, max_tau=60*60) for s_t in spike_trains ]
shifted.index = [ '$\mathregular{'+x+'_{-1h}}$' for x in labels ]
save_matrix_plot(shifted, 'Priority errors shifted, Norway', '{}/Norway_errors_shifted.png'.format(sys.argv[2]))

P1 = fhs.query("priority == 'P1'")
P2 = fhs.query("priority == 'P2'")

# More complex experiment: find which time shift led to the best synchronous spike correlation.
results = pd.DataFrame(index = np.linspace(0,60,13,dtype=int), columns = np.unique(fhs.county), dtype=float)

for delay in results.index:
    for county in results.columns:
        spike_trains = [ df_to_spike_train(fhs.query('county == @county and priority == @priority'), t_start, edges)
                         for priority in labels ]
        
        P1_shift = df_to_spike_train(P1.query('county == @county').shift(-1, pd.Timedelta(minutes=delay)), t_start, edges)
Exemple #18
0
def test_spike_sync():
    spikes1 = SpikeTrain([1.0, 2.0, 3.0], 4.0)
    spikes2 = SpikeTrain([2.1], 4.0)

    expected_x = np.array([0.0, 1.0, 2.0, 2.1, 3.0, 4.0])
    expected_y = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0])

    f = spk.spike_sync_profile(spikes1, spikes2)

    assert_array_almost_equal(f.x, expected_x, decimal=16)
    assert_array_almost_equal(f.y, expected_y, decimal=16)

    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    # test with some small max_tau, spike_sync should be 0
    assert_almost_equal(spk.spike_sync(spikes1, spikes2, max_tau=0.05),
                        0.0,
                        decimal=16)

    spikes2 = SpikeTrain([3.1], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([1.1], 4.0)

    expected_x = np.array([0.0, 1.0, 1.1, 2.0, 3.0, 4.0])
    expected_y = np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])

    f = spk.spike_sync_profile(spikes1, spikes2)

    assert_array_almost_equal(f.x, expected_x, decimal=16)
    assert_array_almost_equal(f.y, expected_y, decimal=16)

    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([0.9], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([3.0], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([1.0], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=16)

    spikes2 = SpikeTrain([1.5, 3.0], 4.0)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.4, decimal=16)

    spikes1 = SpikeTrain([1.0, 2.0, 4.0], 4.0)
    spikes2 = SpikeTrain([3.8], 4.0)
    spikes3 = SpikeTrain([
        3.9,
    ], 4.0)

    expected_x = np.array([0.0, 1.0, 2.0, 3.8, 4.0, 4.0])
    expected_y = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])

    f = spk.spike_sync_profile(spikes1, spikes2)

    assert_array_almost_equal(f.x, expected_x, decimal=16)
    assert_array_almost_equal(f.y, expected_y, decimal=16)

    f2 = spk.spike_sync_profile(spikes2, spikes3)

    i1 = f.integral()
    i2 = f2.integral()
    f.add(f2)
    i12 = f.integral()

    assert_equal(i1[0] + i2[0], i12[0])
    assert_equal(i1[1] + i2[1], i12[1])