Esempio n. 1
0
def test_multi_spike_sync():
    # some basic multivariate check
    spikes1 = SpikeTrain(
        [100, 300, 400, 405, 410, 500, 700, 800, 805, 810, 815, 900], 1000)
    spikes2 = SpikeTrain(
        [100, 200, 205, 210, 295, 350, 400, 510, 600, 605, 700, 910], 1000)
    spikes3 = SpikeTrain(
        [100, 180, 198, 295, 412, 420, 510, 640, 695, 795, 820, 920], 1000)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes1, spikes3), 0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes2, spikes3), 0.5, decimal=15)

    f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3])
    # hands on definition of the average multivariate spike synchronization
    # expected = (f1.integral() + f2.integral() + f3.integral()) / \
    #            (np.sum(f1.mp[1:-1])+np.sum(f2.mp[1:-1])+np.sum(f3.mp[1:-1]))
    expected = 0.5
    assert_almost_equal(f.avrg(), expected, decimal=15)
    assert_almost_equal(spk.spike_sync_multi([spikes1, spikes2, spikes3]),
                        expected,
                        decimal=15)

    # multivariate regression test
    spike_trains = spk.load_spike_trains_from_txt(os.path.join(
        TEST_PATH, "SPIKE_Sync_Test.txt"),
                                                  edges=[0, 4000])
    # extract all spike times
    spike_times = np.array([])
    for st in spike_trains:
        spike_times = np.append(spike_times, st.spikes)
    spike_times = np.unique(np.sort(spike_times))

    f = spk.spike_sync_profile_multi(spike_trains)

    assert_equal(spike_times, f.x[1:-1])
    assert_equal(len(f.x), len(f.y))

    assert_equal(np.sum(f.y[1:-1]), 39932)
    assert_equal(np.sum(f.mp[1:-1]), 85554)

    # example with 2 empty spike trains
    sts = []
    sts.append(SpikeTrain([1, 9], [0, 10]))
    sts.append(SpikeTrain([1, 3], [0, 10]))
    sts.append(SpikeTrain([], [0, 10]))
    sts.append(SpikeTrain([], [0, 10]))

    assert_almost_equal(spk.spike_sync_multi(sts), 1.0 / 6.0, decimal=15)
    assert_almost_equal(spk.spike_sync_profile_multi(sts).avrg(),
                        1.0 / 6.0,
                        decimal=15)
Esempio n. 2
0
def check_single_spike_train_set(index):
    """ Debuging function """
    np.set_printoptions(precision=15)
    spike_file = "regression_random_spikes.mat"
    spikes_name = "spikes"
    result_name = "Distances"
    result_file = "regression_random_results_cSPIKY.mat"

    spike_train_sets = loadmat(spike_file)[spikes_name][0]

    results_cSPIKY = loadmat(result_file)[result_name]

    spike_train_data = spike_train_sets[index]

    spike_trains = []
    N = 0
    for spikes in spike_train_data[0]:
        N += len(spikes.flatten())
        print("Spikes:", len(spikes.flatten()))
        spikes_array = spikes.flatten()
        if len(spikes_array > 0) and (spikes_array[-1] > 100.0):
            spikes_array[-1] = 100.0
        spike_trains.append(spk.SpikeTrain(spikes_array, 100.0))
        print(spike_trains[-1].spikes)

    print(N)

    print(spk.spike_sync_multi(spike_trains))

    print(spk.spike_sync_profile_multi(spike_trains).integral())
Esempio n. 3
0
def test_regression_spiky():
    # standard example
    st1 = SpikeTrain(np.arange(100, 1201, 100), 1300)
    st2 = SpikeTrain(np.arange(100, 1201, 110), 1300)

    isi_dist = spk.isi_distance(st1, st2)
    assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15)
    isi_profile = spk.isi_profile(st1, st2)
    assert_equal(isi_profile.y, 0.1/1.1 * np.ones_like(isi_profile.y))

    spike_dist = spk.spike_distance(st1, st2)
    assert_equal(spike_dist, 2.1105878248735391e-01)

    spike_sync = spk.spike_sync(st1, st2)
    assert_equal(spike_sync, 8.6956521739130432e-01)

    # multivariate check

    spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
                                                  (0.0, 4000.0))
    isi_dist = spk.isi_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15)

    spike_profile = spk.spike_profile_multi(spike_trains)
    assert_equal(len(spike_profile.y1)+len(spike_profile.y2), 1252)

    spike_dist = spk.spike_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(spike_dist, 2.4432433330596512e-01, decimal=15)

    spike_sync = spk.spike_sync_multi(spike_trains)
    # get the full precision from SPIKY
    assert_equal(spike_sync, 0.7183531505298066)
Esempio n. 4
0
def test_multi_variate_subsets():
    spike_trains = spk.load_spike_trains_from_txt(
        os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0))
    sub_set = [1, 3, 5, 7]
    spike_trains_sub_set = [spike_trains[i] for i in sub_set]

    v1 = spk.isi_distance_multi(spike_trains_sub_set)
    v2 = spk.isi_distance_multi(spike_trains, sub_set)
    assert_equal(v1, v2)

    v1 = spk.spike_distance_multi(spike_trains_sub_set)
    v2 = spk.spike_distance_multi(spike_trains, sub_set)
    assert_equal(v1, v2)

    v1 = spk.spike_sync_multi(spike_trains_sub_set)
    v2 = spk.spike_sync_multi(spike_trains, sub_set)
    assert_equal(v1, v2)
Esempio n. 5
0
def test_multi_variate_subsets():
    spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
                                                  (0.0, 4000.0))
    sub_set = [1, 3, 5, 7]
    spike_trains_sub_set = [spike_trains[i] for i in sub_set]

    v1 = spk.isi_distance_multi(spike_trains_sub_set)
    v2 = spk.isi_distance_multi(spike_trains, sub_set)
    assert_equal(v1, v2)

    v1 = spk.spike_distance_multi(spike_trains_sub_set)
    v2 = spk.spike_distance_multi(spike_trains, sub_set)
    assert_equal(v1, v2)

    v1 = spk.spike_sync_multi(spike_trains_sub_set)
    v2 = spk.spike_sync_multi(spike_trains, sub_set)
    assert_equal(v1, v2)
Esempio n. 6
0
def test_regression_spiky():
    # standard example
    st1 = SpikeTrain(np.arange(100, 1201, 100), 1300)
    st2 = SpikeTrain(np.arange(100, 1201, 110), 1300)

    isi_dist = spk.isi_distance(st1, st2)
    assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15)
    isi_profile = spk.isi_profile(st1, st2)
    assert_equal(isi_profile.y, 0.1 / 1.1 * np.ones_like(isi_profile.y))

    spike_dist = spk.spike_distance(st1, st2)
    assert_equal(spike_dist, 0.211058782487353908)

    spike_sync = spk.spike_sync(st1, st2)
    assert_equal(spike_sync, 8.6956521739130432e-01)

    # multivariate check

    spike_trains = spk.load_spike_trains_from_txt(
        os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0))
    isi_dist = spk.isi_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15)

    spike_profile = spk.spike_profile_multi(spike_trains)
    assert_equal(len(spike_profile.y1) + len(spike_profile.y2), 1252)

    spike_dist = spk.spike_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(spike_dist, 0.25188056475463755, decimal=15)

    spike_sync = spk.spike_sync_multi(spike_trains)
    # get the full precision from SPIKY
    assert_equal(spike_sync, 0.7183531505298066)

    # Eero's edge correction example
    st1 = SpikeTrain([0.5, 1.5, 2.5], 6.0)
    st2 = SpikeTrain([3.5, 4.5, 5.5], 6.0)

    f = spk.spike_profile(st1, st2)

    expected_times = np.array([0.0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.0])
    y_all = np.array([
        0.271604938271605, 0.271604938271605, 0.271604938271605,
        0.617283950617284, 0.617283950617284, 0.444444444444444,
        0.285714285714286, 0.285714285714286, 0.444444444444444,
        0.617283950617284, 0.617283950617284, 0.271604938271605,
        0.271604938271605, 0.271604938271605
    ])
    expected_y1 = y_all[::2]
    expected_y2 = y_all[1::2]

    assert_equal(f.x, expected_times)
    assert_array_almost_equal(f.y1, expected_y1, decimal=14)
    assert_array_almost_equal(f.y2, expected_y2, decimal=14)
def test_regression_random():

    spike_file = "test/numeric/regression_random_spikes.mat"
    spikes_name = "spikes"
    result_name = "Distances"
    result_file = "test/numeric/regression_random_results_cSPIKY.mat"

    spike_train_sets = loadmat(spike_file)[spikes_name][0]
    results_cSPIKY = loadmat(result_file)[result_name]

    for i, spike_train_data in enumerate(spike_train_sets):
        spike_trains = []
        for spikes in spike_train_data[0]:
            spike_trains.append(spk.SpikeTrain(spikes.flatten(), 100.0))

        isi = spk.isi_distance_multi(spike_trains)
        isi_prof = spk.isi_profile_multi(spike_trains).avrg()

        spike = spk.spike_distance_multi(spike_trains)
        spike_prof = spk.spike_profile_multi(spike_trains).avrg()

        spike_sync = spk.spike_sync_multi(spike_trains)
        spike_sync_prof = spk.spike_sync_profile_multi(spike_trains).avrg()

        assert_almost_equal(isi,
                            results_cSPIKY[i][0],
                            decimal=14,
                            err_msg="Index: %d, ISI" % i)
        assert_almost_equal(isi_prof,
                            results_cSPIKY[i][0],
                            decimal=14,
                            err_msg="Index: %d, ISI" % i)

        assert_almost_equal(spike,
                            results_cSPIKY[i][1],
                            decimal=14,
                            err_msg="Index: %d, SPIKE" % i)
        assert_almost_equal(spike_prof,
                            results_cSPIKY[i][1],
                            decimal=14,
                            err_msg="Index: %d, SPIKE" % i)

        assert_almost_equal(spike_sync,
                            spike_sync_prof,
                            decimal=14,
                            err_msg="Index: %d, SPIKE-Sync" % i)
Esempio n. 8
0
def test_multi_spike_sync():
    # some basic multivariate check
    spikes1 = SpikeTrain([100, 300, 400, 405, 410, 500, 700, 800,
                          805, 810, 815, 900], 1000)
    spikes2 = SpikeTrain([100, 200, 205, 210, 295, 350, 400, 510,
                          600, 605, 700, 910], 1000)
    spikes3 = SpikeTrain([100, 180, 198, 295, 412, 420, 510, 640,
                          695, 795, 820, 920], 1000)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes1, spikes3),
                        0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes2, spikes3),
                        0.5, decimal=15)

    f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3])
    # hands on definition of the average multivariate spike synchronization
    # expected = (f1.integral() + f2.integral() + f3.integral()) / \
    #            (np.sum(f1.mp[1:-1])+np.sum(f2.mp[1:-1])+np.sum(f3.mp[1:-1]))
    expected = 0.5
    assert_almost_equal(f.avrg(), expected, decimal=15)
    assert_almost_equal(spk.spike_sync_multi([spikes1, spikes2, spikes3]),
                        expected, decimal=15)

    # multivariate regression test
    spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt",
                                                  edges=[0, 4000])
    # extract all spike times
    spike_times = np.array([])
    for st in spike_trains:
        spike_times = np.append(spike_times, st.spikes)
    spike_times = np.unique(np.sort(spike_times))

    f = spk.spike_sync_profile_multi(spike_trains)

    assert_equal(spike_times, f.x[1:-1])
    assert_equal(len(f.x), len(f.y))

    assert_equal(np.sum(f.y[1:-1]), 39932)
    assert_equal(np.sum(f.mp[1:-1]), 85554)