Ejemplo n.º 1
0
def test_regression_spiky():
    # standard example
    st1 = SpikeTrain(np.arange(100, 1201, 100), 1300)
    st2 = SpikeTrain(np.arange(100, 1201, 110), 1300)

    isi_dist = spk.isi_distance(st1, st2)
    assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15)
    isi_profile = spk.isi_profile(st1, st2)
    assert_equal(isi_profile.y, 0.1/1.1 * np.ones_like(isi_profile.y))

    spike_dist = spk.spike_distance(st1, st2)
    assert_equal(spike_dist, 2.1105878248735391e-01)

    spike_sync = spk.spike_sync(st1, st2)
    assert_equal(spike_sync, 8.6956521739130432e-01)

    # multivariate check

    spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
                                                  (0.0, 4000.0))
    isi_dist = spk.isi_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15)

    spike_profile = spk.spike_profile_multi(spike_trains)
    assert_equal(len(spike_profile.y1)+len(spike_profile.y2), 1252)

    spike_dist = spk.spike_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(spike_dist, 2.4432433330596512e-01, decimal=15)

    spike_sync = spk.spike_sync_multi(spike_trains)
    # get the full precision from SPIKY
    assert_equal(spike_sync, 0.7183531505298066)
Ejemplo n.º 2
0
def test_merge_empty_spike_trains():
    # first load the data
    spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=(0, 4000))
    # take two non-empty trains, and one empty one
    empty = spk.SpikeTrain([],[spike_trains[0].t_start,spike_trains[0].t_end])
    merged_spikes = spk.merge_spike_trains([spike_trains[0], empty, spike_trains[1]])
    # test if result is sorted
    assert((merged_spikes.spikes == np.sort(merged_spikes.spikes)).all())
Ejemplo n.º 3
0
def test_regression_spiky():
    # standard example
    st1 = SpikeTrain(np.arange(100, 1201, 100), 1300)
    st2 = SpikeTrain(np.arange(100, 1201, 110), 1300)

    isi_dist = spk.isi_distance(st1, st2)
    assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15)
    isi_profile = spk.isi_profile(st1, st2)
    assert_equal(isi_profile.y, 0.1 / 1.1 * np.ones_like(isi_profile.y))

    spike_dist = spk.spike_distance(st1, st2)
    assert_equal(spike_dist, 0.211058782487353908)

    spike_sync = spk.spike_sync(st1, st2)
    assert_equal(spike_sync, 8.6956521739130432e-01)

    # multivariate check

    spike_trains = spk.load_spike_trains_from_txt(
        os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0))
    isi_dist = spk.isi_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15)

    spike_profile = spk.spike_profile_multi(spike_trains)
    assert_equal(len(spike_profile.y1) + len(spike_profile.y2), 1252)

    spike_dist = spk.spike_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(spike_dist, 0.25188056475463755, decimal=15)

    spike_sync = spk.spike_sync_multi(spike_trains)
    # get the full precision from SPIKY
    assert_equal(spike_sync, 0.7183531505298066)

    # Eero's edge correction example
    st1 = SpikeTrain([0.5, 1.5, 2.5], 6.0)
    st2 = SpikeTrain([3.5, 4.5, 5.5], 6.0)

    f = spk.spike_profile(st1, st2)

    expected_times = np.array([0.0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.0])
    y_all = np.array([
        0.271604938271605, 0.271604938271605, 0.271604938271605,
        0.617283950617284, 0.617283950617284, 0.444444444444444,
        0.285714285714286, 0.285714285714286, 0.444444444444444,
        0.617283950617284, 0.617283950617284, 0.271604938271605,
        0.271604938271605, 0.271604938271605
    ])
    expected_y1 = y_all[::2]
    expected_y2 = y_all[1::2]

    assert_equal(f.x, expected_times)
    assert_array_almost_equal(f.y1, expected_y1, decimal=14)
    assert_array_almost_equal(f.y2, expected_y2, decimal=14)
Ejemplo n.º 4
0
def test_load_time_series():
    spike_trains = spk.import_spike_trains_from_time_series(TIME_SERIES_DATA,
                                                            start_time=0,
                                                            time_bin=1)
    assert len(spike_trains) == 40
    spike_trains_check = spk.load_spike_trains_from_txt(TIME_SERIES_SPIKES,
                                                        edges=(0, 4000))

    # check spike trains
    for n in range(len(spike_trains)):
        assert_equal(spike_trains[n].spikes, spike_trains_check[n].spikes)
        assert_equal(spike_trains[n].t_start, 0)
        assert_equal(spike_trains[n].t_end, 4000)
Ejemplo n.º 5
0
def raster_plot(file, show_plot=False):

    spike_trains = spk.load_spike_trains_from_txt(file)
    f = plt.figure()
    for (i, spikes) in enumerate(spike_trains):
        plt.plot(spikes, (i + 1) * np.ones_like(spikes), 'b|')
    plt.xlabel("Time (ms)")
    plt.ylabel("Neuron ID")

    if show_plot:
        plt.show()

    return f
Ejemplo n.º 6
0
def test_multi_spike_sync():
    # some basic multivariate check
    spikes1 = SpikeTrain(
        [100, 300, 400, 405, 410, 500, 700, 800, 805, 810, 815, 900], 1000)
    spikes2 = SpikeTrain(
        [100, 200, 205, 210, 295, 350, 400, 510, 600, 605, 700, 910], 1000)
    spikes3 = SpikeTrain(
        [100, 180, 198, 295, 412, 420, 510, 640, 695, 795, 820, 920], 1000)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2), 0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes1, spikes3), 0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes2, spikes3), 0.5, decimal=15)

    f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3])
    # hands on definition of the average multivariate spike synchronization
    # expected = (f1.integral() + f2.integral() + f3.integral()) / \
    #            (np.sum(f1.mp[1:-1])+np.sum(f2.mp[1:-1])+np.sum(f3.mp[1:-1]))
    expected = 0.5
    assert_almost_equal(f.avrg(), expected, decimal=15)
    assert_almost_equal(spk.spike_sync_multi([spikes1, spikes2, spikes3]),
                        expected,
                        decimal=15)

    # multivariate regression test
    spike_trains = spk.load_spike_trains_from_txt(os.path.join(
        TEST_PATH, "SPIKE_Sync_Test.txt"),
                                                  edges=[0, 4000])
    # extract all spike times
    spike_times = np.array([])
    for st in spike_trains:
        spike_times = np.append(spike_times, st.spikes)
    spike_times = np.unique(np.sort(spike_times))

    f = spk.spike_sync_profile_multi(spike_trains)

    assert_equal(spike_times, f.x[1:-1])
    assert_equal(len(f.x), len(f.y))

    assert_equal(np.sum(f.y[1:-1]), 39932)
    assert_equal(np.sum(f.mp[1:-1]), 85554)

    # example with 2 empty spike trains
    sts = []
    sts.append(SpikeTrain([1, 9], [0, 10]))
    sts.append(SpikeTrain([1, 3], [0, 10]))
    sts.append(SpikeTrain([], [0, 10]))
    sts.append(SpikeTrain([], [0, 10]))

    assert_almost_equal(spk.spike_sync_multi(sts), 1.0 / 6.0, decimal=15)
    assert_almost_equal(spk.spike_sync_profile_multi(sts).avrg(),
                        1.0 / 6.0,
                        decimal=15)
Ejemplo n.º 7
0
def test_load_from_txt():
    spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=(0, 4000))
    assert len(spike_trains) == 40

    # check the first spike train
    spike_times = [64.886, 305.81, 696, 937.77, 1059.7, 1322.2, 1576.1,
                   1808.1, 2121.5, 2381.1, 2728.6, 2966.9, 3223.7, 3473.7,
                   3644.3, 3936.3]
    assert_allclose(spike_times, spike_trains[0].spikes)

    # check auxiliary spikes
    for spike_train in spike_trains:
        assert spike_train.t_start == 0.0
        assert spike_train.t_end == 4000
Ejemplo n.º 8
0
def test_load_from_txt():
    spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
                                                  edges=(0, 4000))
    assert len(spike_trains) == 40

    # check the first spike train
    spike_times = [64.886, 305.81, 696, 937.77, 1059.7, 1322.2, 1576.1,
                   1808.1, 2121.5, 2381.1, 2728.6, 2966.9, 3223.7, 3473.7,
                   3644.3, 3936.3]
    assert_equal(spike_times, spike_trains[0].spikes)

    # check auxiliary spikes
    for spike_train in spike_trains:
        assert spike_train.t_start == 0.0
        assert spike_train.t_end == 4000
Ejemplo n.º 9
0
def test_regression_15_isi():
    # load spike trains
    spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=[0, 4000])

    N = len(spike_trains)

    dist_mat = spk.isi_distance_matrix(spike_trains)
    assert_allclose(dist_mat.shape, (N, N))

    ind = np.arange(N // 2)
    dist_mat = spk.isi_distance_matrix(spike_trains, ind)
    assert_allclose(dist_mat.shape, (N // 2, N // 2))

    ind = np.arange(N // 2, N)
    dist_mat = spk.isi_distance_matrix(spike_trains, ind)
    assert_allclose(dist_mat.shape, (N // 2, N // 2))
Ejemplo n.º 10
0
def test_regression_15_sync():
    # load spike trains
    spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=[0, 4000])

    N = len(spike_trains)

    dist_mat = spk.spike_sync_matrix(spike_trains)
    assert_equal(dist_mat.shape, (N, N))

    ind = np.arange(N // 2)
    dist_mat = spk.spike_sync_matrix(spike_trains, ind)
    assert_equal(dist_mat.shape, (N // 2, N // 2))

    ind = np.arange(N // 2, N)
    dist_mat = spk.spike_sync_matrix(spike_trains, ind)
    assert_equal(dist_mat.shape, (N // 2, N // 2))
Ejemplo n.º 11
0
def test_multi_variate_subsets():
    spike_trains = spk.load_spike_trains_from_txt(
        os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0))
    sub_set = [1, 3, 5, 7]
    spike_trains_sub_set = [spike_trains[i] for i in sub_set]

    v1 = spk.isi_distance_multi(spike_trains_sub_set)
    v2 = spk.isi_distance_multi(spike_trains, sub_set)
    assert_equal(v1, v2)

    v1 = spk.spike_distance_multi(spike_trains_sub_set)
    v2 = spk.spike_distance_multi(spike_trains, sub_set)
    assert_equal(v1, v2)

    v1 = spk.spike_sync_multi(spike_trains_sub_set)
    v2 = spk.spike_sync_multi(spike_trains, sub_set)
    assert_equal(v1, v2)
Ejemplo n.º 12
0
def test_multi_variate_subsets():
    spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
                                                  (0.0, 4000.0))
    sub_set = [1, 3, 5, 7]
    spike_trains_sub_set = [spike_trains[i] for i in sub_set]

    v1 = spk.isi_distance_multi(spike_trains_sub_set)
    v2 = spk.isi_distance_multi(spike_trains, sub_set)
    assert_equal(v1, v2)

    v1 = spk.spike_distance_multi(spike_trains_sub_set)
    v2 = spk.spike_distance_multi(spike_trains, sub_set)
    assert_equal(v1, v2)

    v1 = spk.spike_sync_multi(spike_trains_sub_set)
    v2 = spk.spike_sync_multi(spike_trains, sub_set)
    assert_equal(v1, v2)
Ejemplo n.º 13
0
def test_merge_spike_trains():
    # first load the data
    spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=(0, 4000))

    merged_spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]])
    # test if result is sorted
    assert ((merged_spikes.spikes == np.sort(merged_spikes.spikes)).all())
    # check merging
    check_merged_spikes(merged_spikes.spikes,
                        [spike_trains[0].spikes, spike_trains[1].spikes])

    merged_spikes = spk.merge_spike_trains(spike_trains)
    # test if result is sorted
    assert ((merged_spikes.spikes == np.sort(merged_spikes.spikes)).all())
    # check merging
    check_merged_spikes(merged_spikes.spikes,
                        [st.spikes for st in spike_trains])
Ejemplo n.º 14
0
def test_merge_spike_trains():
    # first load the data
    spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
                                                  edges=(0, 4000))

    merged_spikes = spk.merge_spike_trains([spike_trains[0], spike_trains[1]])
    # test if result is sorted
    assert((merged_spikes.spikes == np.sort(merged_spikes.spikes)).all())
    # check merging
    check_merged_spikes(merged_spikes.spikes, [spike_trains[0].spikes,
                                               spike_trains[1].spikes])

    merged_spikes = spk.merge_spike_trains(spike_trains)
    # test if result is sorted
    assert((merged_spikes.spikes == np.sort(merged_spikes.spikes)).all())
    # check merging
    check_merged_spikes(merged_spikes.spikes,
                        [st.spikes for st in spike_trains])
Ejemplo n.º 15
0
def test_save_load():
    file_name = os.path.join(tempfile.mkdtemp(prefix='pyspike_'),
                             "save_load.txt")

    N = 10
    # generate some spike trains
    spike_trains = []
    for n in range(N):
        spike_trains.append(spk.generate_poisson_spikes(1.0, [0, 100]))

    # save them into txt file
    spk.save_spike_trains_to_txt(spike_trains, file_name, precision=17)

    # load again
    spike_trains_loaded = spk.load_spike_trains_from_txt(file_name, [0, 100])

    for n in range(N):
        assert_array_equal(spike_trains[n].spikes,
                           spike_trains_loaded[n].spikes)
Ejemplo n.º 16
0
def test_multi_spike_sync():
    # some basic multivariate check
    spikes1 = SpikeTrain([100, 300, 400, 405, 410, 500, 700, 800,
                          805, 810, 815, 900], 1000)
    spikes2 = SpikeTrain([100, 200, 205, 210, 295, 350, 400, 510,
                          600, 605, 700, 910], 1000)
    spikes3 = SpikeTrain([100, 180, 198, 295, 412, 420, 510, 640,
                          695, 795, 820, 920], 1000)
    assert_almost_equal(spk.spike_sync(spikes1, spikes2),
                        0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes1, spikes3),
                        0.5, decimal=15)
    assert_almost_equal(spk.spike_sync(spikes2, spikes3),
                        0.5, decimal=15)

    f = spk.spike_sync_profile_multi([spikes1, spikes2, spikes3])
    # hands on definition of the average multivariate spike synchronization
    # expected = (f1.integral() + f2.integral() + f3.integral()) / \
    #            (np.sum(f1.mp[1:-1])+np.sum(f2.mp[1:-1])+np.sum(f3.mp[1:-1]))
    expected = 0.5
    assert_almost_equal(f.avrg(), expected, decimal=15)
    assert_almost_equal(spk.spike_sync_multi([spikes1, spikes2, spikes3]),
                        expected, decimal=15)

    # multivariate regression test
    spike_trains = spk.load_spike_trains_from_txt("test/SPIKE_Sync_Test.txt",
                                                  edges=[0, 4000])
    # extract all spike times
    spike_times = np.array([])
    for st in spike_trains:
        spike_times = np.append(spike_times, st.spikes)
    spike_times = np.unique(np.sort(spike_times))

    f = spk.spike_sync_profile_multi(spike_trains)

    assert_equal(spike_times, f.x[1:-1])
    assert_equal(len(f.x), len(f.y))

    assert_equal(np.sum(f.y[1:-1]), 39932)
    assert_equal(np.sum(f.mp[1:-1]), 85554)
Ejemplo n.º 17
0
#       Author:      Robin van Emden, http://pavlov.tech
#       Affiliation: Jheronimus Academy of Data Science, http://jads.nl
#
##############################################################################################################

from __future__ import print_function
import matplotlib.pyplot as plt
import pyspike as spk
import csv


def t(l):
    return [list(i) for i in zip(*l)]


spike_trains = spk.load_spike_trains_from_txt("../export/spikes.txt",
                                              edges=(0, 420))

plt.figure()

f = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
x, y = f.get_plottable_data()
plt.plot(x, y, '--ok', label="SPIKE-SYNC profile")
# print(f.x)
# print(f.y)
# print(f.mp)

print("Average:", f.avrg())

with open('../export/spike_sync.csv', 'w') as fw:
    writer = csv.writer(fw)
    writer.writerows(zip(x, y))
Ejemplo n.º 18
0
    recall = TP / (TP + FN)
    F1 = 2 * (precision * recall) / (precision + recall) 





#%%

sys.path.insert(0, "/home/nel/Code/VolPy/PySpike/pyspike")
sys.path.insert(0, "/home/nel/Code/VolPy/PySpike")
sys.path.insert(0, "/home/nel/Code/VolPy/PySpike/examples")
import matplotlib.pyplot as plt
import pyspike as spk

spike_trains = spk.load_spike_trains_from_txt('/home/nel/Code/VolPy/PySpike/examples/PySpike_testdata.txt',
                                              edges=(0, 4000))
isi_profile = spk.isi_profile(spike_trains[0], spike_trains[1])
x, y = isi_profile.get_plottable_data()
plt.plot(x, y, '--k')
print("ISI distance: %.8f" % isi_profile.avrg())
plt.show()

spike_profile = spk.spike_profile(spike_trains[0], spike_trains[1])
x, y = spike_profile.get_plottable_data()
plt.plot(x, y, '--k')
print("SPIKE distance: %.8f" % spike_profile.avrg())
plt.show()

import numpy as np
from matplotlib import pyplot as plt
import pyspike as spk
Ejemplo n.º 19
0
from __future__ import print_function

import matplotlib.pyplot as plt

import pyspike as spk

spike_trains = spk.load_spike_trains_from_txt("../test/SPIKE_Sync_Test.txt",
                                              edges=(0, 4000))

plt.figure()

f = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
x, y = f.get_plottable_data()
plt.plot(x, y, '--ok', label="SPIKE-SYNC profile")
print(f.x)
print(f.y)
print(f.mp)

print("Average:", f.avrg())


f = spk.spike_profile(spike_trains[0], spike_trains[1])
x, y = f.get_plottable_data()

plt.plot(x, y, '-b', label="SPIKE-profile")

plt.axis([0, 4000, -0.1, 1.1])
plt.legend(loc="center right")

plt.figure()
Ejemplo n.º 20
0
""" averages.py

Simple example showing how to compute averages of distance profiles

Copyright 2014, Mario Mulansky <*****@*****.**>

Distributed under the BSD License
"""

from __future__ import print_function

import pyspike as spk

spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
                                              time_interval=(0, 4000))

f = spk.isi_profile(spike_trains[0], spike_trains[1])

print("ISI-distance: %.8f" % f.avrg())

isi1 = f.avrg(interval=(0, 1000))
isi2 = f.avrg(interval=(1000, 2000))
isi3 = f.avrg(interval=[(0, 1000), (2000, 3000)])
isi4 = f.avrg(interval=[(1000, 2000), (3000, 4000)])

print("ISI-distance (0-1000):                    %.8f" % isi1)
print("ISI-distance (1000-2000):                 %.8f" % isi2)
print("ISI-distance (0-1000) and (2000-3000):    %.8f" % isi3)
print("ISI-distance (1000-2000) and (3000-4000): %.8f" % isi4)
print()
Ejemplo n.º 21
0
from __future__ import print_function

import matplotlib.pyplot as plt

import pyspike as spk

spike_trains = spk.load_spike_trains_from_txt("../test/SPIKE_Sync_Test.txt",
                                              edges=(0, 4000))

plt.figure()

f = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
x, y = f.get_plottable_data()
plt.plot(x, y, '--ok', label="SPIKE-SYNC profile")
print(f.x)
print(f.y)
print(f.mp)

print("Average:", f.avrg())

f = spk.spike_profile(spike_trains[0], spike_trains[1])
x, y = f.get_plottable_data()

plt.plot(x, y, '-b', label="SPIKE-profile")

plt.axis([0, 4000, -0.1, 1.1])
plt.legend(loc="center right")

plt.figure()

plt.subplot(211)
Ejemplo n.º 22
0
""" averages.py

Simple example showing how to compute averages of distance profiles

Copyright 2014, Mario Mulansky <*****@*****.**>

Distributed under the BSD License
"""

from __future__ import print_function

import pyspike as spk

spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
                                              edges=(0, 4000))

f = spk.isi_profile(spike_trains[0], spike_trains[1])

print("ISI-distance: %.8f" % f.avrg())

isi1 = f.avrg(interval=(0, 1000))
isi2 = f.avrg(interval=(1000, 2000))
isi3 = f.avrg(interval=[(0, 1000), (2000, 3000)])
isi4 = f.avrg(interval=[(1000, 2000), (3000, 4000)])

print("ISI-distance (0-1000):                    %.8f" % isi1)
print("ISI-distance (1000-2000):                 %.8f" % isi2)
print("ISI-distance (0-1000) and (2000-3000):    %.8f" % isi3)
print("ISI-distance (1000-2000) and (3000-4000): %.8f" % isi4)
print()
Ejemplo n.º 23
0
                S1 = createSynapses(G1,populationSize,synw,psyn,delay)
                Sp1 = SpikeMonitor(G1)

                run(runTime*ms)

                #code below calculates and stores the pyspike metrics
                firingValuesWithUnits = Sp1.spike_trains().values()
                firingValues = []
                for i in range(len(firingValuesWithUnits)):
                    firingValues.append(array(firingValuesWithUnits[i])) 
                fV = open('fv.txt','w')
                for item in firingValues:
                    item = (" ".join(map(str,item)))
                    fV.write("%s\n" % item)
                fV.close()
                spikeTrains = psp.load_spike_trains_from_txt("fv.txt",edges=(0,runTime/1000.0))
                qvalues.iloc[currentLine,0] = tc
                qvalues.iloc[currentLine,1] = delay
                qvalues.iloc[currentLine,2] = psyn
                qvalues.iloc[currentLine,3] = synw
                qvalues.iloc[currentLine,4] = psp.spike_distance(spikeTrains)
                qvalues.iloc[currentLine,5] = psp.isi_distance(spikeTrains)
                qvalues.iloc[currentLine,6] = psp.spike_sync(spikeTrains)
                currentLine += 1 

                del G1
                del S1
                del Sp1
                del firingValuesWithUnits
                del firingValues
                del spikeTrains
Ejemplo n.º 24
0
from __future__ import print_function
import time
import pyspike as spk


def time_diff_in_ms(start, end):
    """ Returns the time difference end-start in ms.
    """
    return (end-start)*1000


t_start = time.clock()

# load the data
time_loading = time.clock()
spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
                                              edges=(0, 4000))
t_loading = time.clock()

print("Number of spike trains: %d" % len(spike_trains))
num_of_spikes = sum([len(spike_trains[i].spikes)
                     for i in xrange(len(spike_trains))])
print("Number of spikes: %d" % num_of_spikes)

# calculate the multivariate spike distance
f = spk.spike_profile_multi(spike_trains)

t_spike = time.clock()

# print the average
avrg = f.avrg()
print("Spike distance from average: %.8f" % avrg)
Ejemplo n.º 25
0
Distributed under the BSD License
"""


from __future__ import print_function

import numpy as np
import matplotlib.pyplot as plt

import pyspike as spk

import os
FOLDER_PATH = "C:\\Users\\jonat\\PycharmProjects\\PySpike\\examples\\data from brewer's lab"
DATA_PATH = os.path.join(FOLDER_PATH, "PySpike_testdata - Copy.txt")

spike_trains = spk.load_spike_trains_from_txt(DATA_PATH,
                                              edges=(0, 4000))
print(spike_trains)

# plot the spike times
'''for (i, spike_train) in enumerate(spike_trains):
    plt.scatter(spike_train, i*np.ones_like(spike_train), marker='|')'''

# profile of the first two spike trains
f = spk.isi_profile(spike_trains, indices=[0, 2])
x, y = f.get_plottable_data()

plt.figure()
plt.plot(x, np.abs(y), '-k')

print(x, y)
Ejemplo n.º 26
0
	we want to limit the the bi-variate analysis only to neighboring electrodes from each electrode. 
	
	The electrodes are laid out in a 64x64 grid with co-ordinates as (0,0),(0,1), (0,2).....(0,63)
									 (1,0),(1,1),(1,2)......(1,63)
									 ............................
	The co-ordinates to number mapping is as 			 0,1,2,3,............63,
									 64,65...............127

	Please see the excel sheet in the folder to get better sense of how the neighbors are selected. 	 														
	
	'''

    start_time = time.time()
    spike_trains = spk.load_spike_trains_from_txt("final_interpolated.txt",
                                                  5000,
                                                  separator=" ",
                                                  is_sorted=True,
                                                  ignore_empty_lines=False)
    #spike_trains = spk.load_spike_trains_from_txt("final_interpolated.txt", edges = (0,5000))
    #spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", 4000)

    #print (spike_trains)

    #lets load neighbor matrix and for each channel, we will only perform bi-variate isi_distance to the neighboring channels
    neighbor_biocam_location = "NeighborListNew_5.csv"
    row_counter = 0
    with open(neighbor_biocam_location, "r") as f:
        reader = csv.reader(f, delimiter=",")
        data = list(reader)
        #print(row_counter)
        row_counter = row_counter + 1
Ejemplo n.º 27
0
def raster_plot(file):
    spike_trains = spk.load_spike_trains_from_txt(file)
    plt.figure()
    for (i, spikes) in enumerate(spike_trains):
        plt.plot(spikes, i * np.ones_like(spikes), 'b|')
    plt.show()