예제 #1
0
def test_multi_spike_sync():
    # some basic multivariate check
    spikes1 = SpikeTrain(
        [100, 300, 400, 405, 410, 500, 700, 800, 805, 810, 815, 900], 1000)
    spikes2 = SpikeTrain(
        [100, 200, 205, 210, 295, 350, 400, 510, 600, 605, 700, 910], 1000)
    spikes3 = SpikeTrain(
        [100, 180, 198, 295, 412, 420, 510, 640, 695, 795, 820, 920], 1000)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes1, spikes3), 0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes2, spikes3), 0.5, decimal=15)

    f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3])
    # hands on definition of the average multivariate spike synchronization
    # expected = (f1.integral() + f2.integral() + f3.integral()) / \
    #            (np.sum(f1.mp[1:-1])+np.sum(f2.mp[1:-1])+np.sum(f3.mp[1:-1]))
    expected = 0.5
    assert_almost_equal(f.avrg(), expected, decimal=15)
    assert_almost_equal(spk.spike_sync_multi([spikes1, spikes2, spikes3]),
                        expected,
                        decimal=15)

    # multivariate regression test
    spike_trains = spk.load_spike_trains_from_txt(os.path.join(
        TEST_PATH, "SPIKE_Sync_Test.txt"),
                                                  edges=[0, 4000])
    # extract all spike times
    spike_times = np.array([])
    for st in spike_trains:
        spike_times = np.append(spike_times, st.spikes)
    spike_times = np.unique(np.sort(spike_times))

    f = spk.spike_sync_profile_multi(spike_trains)

    assert_equal(spike_times, f.x[1:-1])
    assert_equal(len(f.x), len(f.y))

    assert_equal(np.sum(f.y[1:-1]), 39932)
    assert_equal(np.sum(f.mp[1:-1]), 85554)

    # example with 2 empty spike trains
    sts = []
    sts.append(SpikeTrain([1, 9], [0, 10]))
    sts.append(SpikeTrain([1, 3], [0, 10]))
    sts.append(SpikeTrain([], [0, 10]))
    sts.append(SpikeTrain([], [0, 10]))

    assert_almost_equal(spk.spike_sync_multi(sts), 1.0 / 6.0, decimal=15)
    assert_almost_equal(spk.spike_sync_profile_multi(sts).avrg(),
                        1.0 / 6.0,
                        decimal=15)
예제 #2
0
def check_single_spike_train_set(index):
    """ Debuging function """
    np.set_printoptions(precision=15)
    spike_file = "regression_random_spikes.mat"
    spikes_name = "spikes"
    result_name = "Distances"
    result_file = "regression_random_results_cSPIKY.mat"

    spike_train_sets = loadmat(spike_file)[spikes_name][0]

    results_cSPIKY = loadmat(result_file)[result_name]

    spike_train_data = spike_train_sets[index]

    spike_trains = []
    N = 0
    for spikes in spike_train_data[0]:
        N += len(spikes.flatten())
        print("Spikes:", len(spikes.flatten()))
        spikes_array = spikes.flatten()
        if len(spikes_array > 0) and (spikes_array[-1] > 100.0):
            spikes_array[-1] = 100.0
        spike_trains.append(spk.SpikeTrain(spikes_array, 100.0))
        print(spike_trains[-1].spikes)

    print(N)

    print(spk.spike_sync_multi(spike_trains))

    print(spk.spike_sync_profile_multi(spike_trains).integral())
예제 #3
0
def test_multi_spike_sync():
    # some basic multivariate check
    spikes1 = SpikeTrain([100, 300, 400, 405, 410, 500, 700, 800,
                          805, 810, 815, 900], 1000)
    spikes2 = SpikeTrain([100, 200, 205, 210, 295, 350, 400, 510,
                          600, 605, 700, 910], 1000)
    spikes3 = SpikeTrain([100, 180, 198, 295, 412, 420, 510, 640,
                          695, 795, 820, 920], 1000)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes1, spikes3),
                        0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes2, spikes3),
                        0.5, decimal=15)

    f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3])
    # hands on definition of the average multivariate spike synchronization
    # expected = (f1.integral() + f2.integral() + f3.integral()) / \
    #            (np.sum(f1.mp[1:-1])+np.sum(f2.mp[1:-1])+np.sum(f3.mp[1:-1]))
    expected = 0.5
    assert_almost_equal(f.avrg(), expected, decimal=15)
    assert_almost_equal(spk.spike_sync_multi([spikes1, spikes2, spikes3]),
                        expected, decimal=15)

    # multivariate regression test
    spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt",
                                                  edges=[0, 4000])
    # extract all spike times
    spike_times = np.array([])
    for st in spike_trains:
        spike_times = np.append(spike_times, st.spikes)
    spike_times = np.unique(np.sort(spike_times))

    f = spk.spike_sync_profile_multi(spike_trains)

    assert_equal(spike_times, f.x[1:-1])
    assert_equal(len(f.x), len(f.y))

    assert_equal(np.sum(f.y[1:-1]), 39932)
    assert_equal(np.sum(f.mp[1:-1]), 85554)
def test_regression_random():

    spike_file = "test/numeric/regression_random_spikes.mat"
    spikes_name = "spikes"
    result_name = "Distances"
    result_file = "test/numeric/regression_random_results_cSPIKY.mat"

    spike_train_sets = loadmat(spike_file)[spikes_name][0]
    results_cSPIKY = loadmat(result_file)[result_name]

    for i, spike_train_data in enumerate(spike_train_sets):
        spike_trains = []
        for spikes in spike_train_data[0]:
            spike_trains.append(spk.SpikeTrain(spikes.flatten(), 100.0))

        isi = spk.isi_distance_multi(spike_trains)
        isi_prof = spk.isi_profile_multi(spike_trains).avrg()

        spike = spk.spike_distance_multi(spike_trains)
        spike_prof = spk.spike_profile_multi(spike_trains).avrg()

        spike_sync = spk.spike_sync_multi(spike_trains)
        spike_sync_prof = spk.spike_sync_profile_multi(spike_trains).avrg()

        assert_almost_equal(isi,
                            results_cSPIKY[i][0],
                            decimal=14,
                            err_msg="Index: %d, ISI" % i)
        assert_almost_equal(isi_prof,
                            results_cSPIKY[i][0],
                            decimal=14,
                            err_msg="Index: %d, ISI" % i)

        assert_almost_equal(spike,
                            results_cSPIKY[i][1],
                            decimal=14,
                            err_msg="Index: %d, SPIKE" % i)
        assert_almost_equal(spike_prof,
                            results_cSPIKY[i][1],
                            decimal=14,
                            err_msg="Index: %d, SPIKE" % i)

        assert_almost_equal(spike_sync,
                            spike_sync_prof,
                            decimal=14,
                            err_msg="Index: %d, SPIKE-Sync" % i)
예제 #5
0
print("Average:", f.avrg())


f = spk.spike_profile(spike_trains[0], spike_trains[1])
x, y = f.get_plottable_data()

plt.plot(x, y, '-b', label="SPIKE-profile")

plt.axis([0, 4000, -0.1, 1.1])
plt.legend(loc="center right")

plt.figure()

plt.subplot(211)

f = spk.spike_sync_profile_multi(spike_trains)
x, y = f.get_plottable_data()
plt.plot(x, y, '-b', alpha=0.7, label="SPIKE-Sync profile")

x1, y1 = f.get_plottable_data(averaging_window_size=50)
plt.plot(x1, y1, '-k', lw=2.5, label="averaged SPIKE-Sync profile")

plt.subplot(212)

f_psth = spk.psth(spike_trains, bin_size=50.0)
x, y = f_psth.get_plottable_data()
plt.plot(x, y, '-k', alpha=1.0, label="PSTH")


print("Average:", f.avrg())