Exemple #1
0
def test_spike_empty():
    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([], edges=(0.0, 1.0))
    d = spk.spike_distance(st1, st2)
    assert_allclose(d, 0.0)
    prof = spk.spike_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 1.0])
    assert_array_equal(prof.y1, [
        0.0,
    ])
    assert_array_equal(prof.y2, [
        0.0,
    ])

    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([
        0.4,
    ], edges=(0.0, 1.0))
    d = spk.spike_distance(st1, st2)
    d_expect = 2 * 0.4 * 0.4 * 1.0 / (0.4 + 1.0)**2 + 2 * 0.6 * 0.4 * 1.0 / (
        0.6 + 1.0)**2
    assert_almost_equal(d, d_expect, decimal=15)
    prof = spk.spike_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 0.4, 1.0])
    assert_array_almost_equal(
        prof.y1,
        [2 * 0.4 * 1.0 / (0.4 + 1.0)**2, 2 * 0.4 * 1.0 / (0.6 + 1.0)**2],
        decimal=15)
    assert_array_almost_equal(
        prof.y2,
        [2 * 0.4 * 1.0 / (0.4 + 1.0)**2, 2 * 0.4 * 1.0 / (0.6 + 1.0)**2],
        decimal=15)

    st1 = SpikeTrain([
        0.6,
    ], edges=(0.0, 1.0))
    st2 = SpikeTrain([
        0.4,
    ], edges=(0.0, 1.0))
    d = spk.spike_distance(st1, st2)
    s1 = np.array([0.2, 0.2, 0.2, 0.2])
    s2 = np.array([0.2, 0.2, 0.2, 0.2])
    isi1 = np.array([0.6, 0.6, 0.4])
    isi2 = np.array([0.4, 0.6, 0.6])
    expected_y1 = (s1[:-1] * isi2 + s2[:-1] * isi1) / (0.5 * (isi1 + isi2)**2)
    expected_y2 = (s1[1:] * isi2 + s2[1:] * isi1) / (0.5 * (isi1 + isi2)**2)
    expected_times = np.array([0.0, 0.4, 0.6, 1.0])
    expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) *
                             (expected_y1 + expected_y2) / 2)
    expected_spike_val /= (expected_times[-1] - expected_times[0])

    assert_almost_equal(d, expected_spike_val, decimal=15)
    prof = spk.spike_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15)
    assert_array_almost_equal(prof.y1, expected_y1, decimal=15)
    assert_array_almost_equal(prof.y2, expected_y2, decimal=15)
Exemple #2
0
def distance_spike(spike_train_a, spike_train_b, interval):
    """
    SPIKE-distance (Kreutz) using pyspike
    """
    spike_train_1 = pyspike.SpikeTrain(spike_train_a, interval)
    spike_train_2 = pyspike.SpikeTrain(spike_train_b, interval)
    return pyspike.spike_distance(spike_train_1, spike_train_2, interval)
Exemple #3
0
    def synchronyCalculation(
            t0, t1):  #counter is to name the file for easy processing later
        one_to_one_synchrony = pd.DataFrame(
            columns=['Ch_A', 'Ch_B', 'SyncVal'])
        #print(one_to_one_synchrony)
        dataframelist = [[]]
        #looking at first 20 spike trains
        for i in range(4096):
            data[i] = data[i][:-1]
            #print (data[i])
            NumberOfNeighbors = len(data[i])
            #print(NumberOfNeighbors)

            for j in range(NumberOfNeighbors):

                #print(data[i][j])
                #print ("source channel: {}, Neighbor channels: {}".format(i,data[i][j]))

                SyncVal = spk.spike_distance(spike_trains[i],
                                             spike_trains[int(data[i][j])],
                                             interval=(t0, t1))
                #print(SyncVal)
                data_input = [i, int(data[i][j]), SyncVal]
                dataframelist.append(data_input)
                #one_to_one_synchrony.append({'Ch_A':i, 'Ch_B':data[i][j], 'SyncVal':SyncVal}, ignore_index = True)

        del dataframelist[0]
        df = pd.DataFrame(dataframelist, columns=['Ch_A', 'Ch_B', 'SyncVal'])
        df = df.loc[(df['SyncVal'] > 0.2) & (df['SyncVal'] < 1)]

        #print(df)
        df.to_csv('SynchronyDataframes/{}.csv'.format(t0))
Exemple #4
0
def calc_stimuli_distance(stimulus_a: np.array, stimulus_b: np.array,
                          stimulus_duration: float) -> object:
    """
    This function computes the average distance between neurons in two stimuli
    using the spike-distance metric  (see: http://www.scholarpedia.org/article/SPIKE-distance)

    :param stimulus_a: numpy array where each element is a single neurons spike times, specified in milliseconds
    :param stimulus_b: numpy array where each element is a single neurons spike times, specified in milliseconds
    :param stimulus_duration: Maximal stimulus_duration of the stimulus, units: Sec
    """
    # Verify stimuli are comparable
    if stimulus_a.size != stimulus_b.size:
        raise Exception('Stimuli must consist of same number of neurons')

    distances = []  # Placeholder for distances between each pair of neurons
    for neuron_a, neuron_b in zip(stimulus_a, stimulus_b):
        # Converting to pyspike SpikeTrain object for calculation
        neuron_a = spk.SpikeTrain(neuron_a,
                                  edges=[0, stimulus_duration * 1000])
        neuron_b = spk.SpikeTrain(neuron_b,
                                  edges=[0, stimulus_duration * 1000])
        # Compute distance
        distance = spk.spike_distance(neuron_a, neuron_b)
        distances.append(distance)
    mean_distance = np.mean(distance)
    return mean_distance
def test_regression_spiky():
    # standard example
    st1 = SpikeTrain(np.arange(100, 1201, 100), 1300)
    st2 = SpikeTrain(np.arange(100, 1201, 110), 1300)

    isi_dist = spk.isi_distance(st1, st2)
    assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15)
    isi_profile = spk.isi_profile(st1, st2)
    assert_equal(isi_profile.y, 0.1/1.1 * np.ones_like(isi_profile.y))

    spike_dist = spk.spike_distance(st1, st2)
    assert_equal(spike_dist, 2.1105878248735391e-01)

    spike_sync = spk.spike_sync(st1, st2)
    assert_equal(spike_sync, 8.6956521739130432e-01)

    # multivariate check

    spike_trains = spk.load_spike_trains_from_txt("test/PySpike_testdata.txt",
                                                  (0.0, 4000.0))
    isi_dist = spk.isi_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15)

    spike_profile = spk.spike_profile_multi(spike_trains)
    assert_equal(len(spike_profile.y1)+len(spike_profile.y2), 1252)

    spike_dist = spk.spike_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(spike_dist, 2.4432433330596512e-01, decimal=15)

    spike_sync = spk.spike_sync_multi(spike_trains)
    # get the full precision from SPIKY
    assert_equal(spike_sync, 0.7183531505298066)
Exemple #6
0
def test_spike_empty():
    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([], edges=(0.0, 1.0))
    d = spk.spike_distance(st1, st2)
    assert_equal(d, 0.0)
    prof = spk.spike_profile(st1, st2)
    assert_equal(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 1.0])
    assert_array_equal(prof.y1, [0.0, ])
    assert_array_equal(prof.y2, [0.0, ])

    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
    d = spk.spike_distance(st1, st2)
    assert_almost_equal(d, 0.4*0.4*1.0/(0.4+1.0)**2 + 0.6*0.4*1.0/(0.6+1.0)**2,
                        decimal=15)
    prof = spk.spike_profile(st1, st2)
    assert_equal(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 0.4, 1.0])
    assert_array_almost_equal(prof.y1, [0.0, 2*0.4*1.0/(0.6+1.0)**2],
                              decimal=15)
    assert_array_almost_equal(prof.y2, [2*0.4*1.0/(0.4+1.0)**2, 0.0],
                              decimal=15)

    st1 = SpikeTrain([0.6, ], edges=(0.0, 1.0))
    st2 = SpikeTrain([0.4, ], edges=(0.0, 1.0))
    d = spk.spike_distance(st1, st2)
    s1 = np.array([0.0, 0.4*0.2/0.6, 0.2, 0.0])
    s2 = np.array([0.0, 0.2, 0.2*0.4/0.6, 0.0])
    isi1 = np.array([0.6, 0.6, 0.4])
    isi2 = np.array([0.4, 0.6, 0.6])
    expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2)
    expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2)
    expected_times = np.array([0.0, 0.4, 0.6, 1.0])
    expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) *
                             (expected_y1+expected_y2)/2)
    expected_spike_val /= (expected_times[-1]-expected_times[0])

    assert_almost_equal(d, expected_spike_val, decimal=15)
    prof = spk.spike_profile(st1, st2)
    assert_equal(d, prof.avrg())
    assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15)
    assert_array_almost_equal(prof.y1, expected_y1, decimal=15)
    assert_array_almost_equal(prof.y2, expected_y2, decimal=15)
Exemple #7
0
def test_regression_spiky():
    # standard example
    st1 = SpikeTrain(np.arange(100, 1201, 100), 1300)
    st2 = SpikeTrain(np.arange(100, 1201, 110), 1300)

    isi_dist = spk.isi_distance(st1, st2)
    assert_almost_equal(isi_dist, 9.0909090909090939e-02, decimal=15)
    isi_profile = spk.isi_profile(st1, st2)
    assert_equal(isi_profile.y, 0.1 / 1.1 * np.ones_like(isi_profile.y))

    spike_dist = spk.spike_distance(st1, st2)
    assert_equal(spike_dist, 0.211058782487353908)

    spike_sync = spk.spike_sync(st1, st2)
    assert_equal(spike_sync, 8.6956521739130432e-01)

    # multivariate check

    spike_trains = spk.load_spike_trains_from_txt(
        os.path.join(TEST_PATH, "PySpike_testdata.txt"), (0.0, 4000.0))
    isi_dist = spk.isi_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(isi_dist, 0.17051816816999129656, decimal=15)

    spike_profile = spk.spike_profile_multi(spike_trains)
    assert_equal(len(spike_profile.y1) + len(spike_profile.y2), 1252)

    spike_dist = spk.spike_distance_multi(spike_trains)
    # get the full precision from SPIKY
    assert_almost_equal(spike_dist, 0.25188056475463755, decimal=15)

    spike_sync = spk.spike_sync_multi(spike_trains)
    # get the full precision from SPIKY
    assert_equal(spike_sync, 0.7183531505298066)

    # Eero's edge correction example
    st1 = SpikeTrain([0.5, 1.5, 2.5], 6.0)
    st2 = SpikeTrain([3.5, 4.5, 5.5], 6.0)

    f = spk.spike_profile(st1, st2)

    expected_times = np.array([0.0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.0])
    y_all = np.array([
        0.271604938271605, 0.271604938271605, 0.271604938271605,
        0.617283950617284, 0.617283950617284, 0.444444444444444,
        0.285714285714286, 0.285714285714286, 0.444444444444444,
        0.617283950617284, 0.617283950617284, 0.271604938271605,
        0.271604938271605, 0.271604938271605
    ])
    expected_y1 = y_all[::2]
    expected_y2 = y_all[1::2]

    assert_equal(f.x, expected_times)
    assert_array_almost_equal(f.y1, expected_y1, decimal=14)
    assert_array_almost_equal(f.y2, expected_y2, decimal=14)
Exemple #8
0
def spikey(train, song, condition):
    songset = np.unique(song)
    dist = []
    for s, stim in enumerate(songset):
        subset = np.where(np.asarray(song) == stim)[0]
        trainsub = [train[x] for x in subset if train[x] is not np.nan]
        labels = [condition[x] for x in subset if train[x] is not np.nan]
        pairs = np.zeros((len(trainsub), len(trainsub)))
        for i in range(len(trainsub)):
            for j in range(len(trainsub)):
                pairs[i, j] = spk.spike_distance(trainsub[i], trainsub[j])
        df = pd.DataFrame(pairs, columns=labels, index=labels)
        if trainsub:
            dist.append(compute_dist(df))
        else:
            dist.append([
                0, 0, 0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
                np.nan, np.nan, np.nan, np.nan
            ])
    return dist
Exemple #9
0
spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt",
                                              edges=(0, 4000))
t_loading = time.clock()

print("Number of spike trains: %d" % len(spike_trains))
num_of_spikes = sum([len(spike_trains[i]) for i in range(len(spike_trains))])
print("Number of spikes: %d" % num_of_spikes)

# calculate the multivariate spike distance
f = spk.spike_profile(spike_trains)

t_spike = time.clock()

# print the average
avrg = f.avrg()
print("Spike distance from average: %.8f" % avrg)

t_avrg = time.clock()

# compute average distance directly, should give the same result as above
spike_dist = spk.spike_distance(spike_trains)
print("Spike distance directly:     %.8f" % spike_dist)

t_dist = time.clock()

print("Loading:            %9.1f ms" % time_diff_in_ms(t_start, t_loading))
print("Computing profile:  %9.1f ms" % time_diff_in_ms(t_loading, t_spike))
print("Averaging:          %9.1f ms" % time_diff_in_ms(t_spike, t_avrg))
print("Computing distance: %9.1f ms" % time_diff_in_ms(t_avrg, t_dist))
print("Total:              %9.1f ms" % time_diff_in_ms(t_start, t_dist))
Exemple #10
0
def test_spike():
    # generate two spike trains:
    t1 = SpikeTrain([0.0, 2.0, 5.0, 8.0], 10.0)
    t2 = SpikeTrain([0.0, 1.0, 5.0, 9.0], 10.0)

    expected_times = np.array([0.0, 1.0, 2.0, 5.0, 8.0, 9.0, 10.0])

    f = spk.spike_profile(t1, t2)

    assert_equal(f.x, expected_times)

    # from SPIKY:
    y_all = np.array([
        0.000000000000000000, 0.555555555555555580, 0.222222222222222210,
        0.305555555555555580, 0.255102040816326536, 0.000000000000000000,
        0.000000000000000000, 0.255102040816326536, 0.255102040816326536,
        0.285714285714285698, 0.285714285714285698, 0.285714285714285698
    ])

    #assert_array_almost_equal(f.y1, y_all[::2])
    assert_array_almost_equal(f.y2, y_all[1::2])

    assert_almost_equal(f.avrg(), 0.186309523809523814, decimal=15)
    assert_equal(spk.spike_distance(t1, t2), f.avrg())

    t1 = SpikeTrain([0.2, 0.4, 0.6, 0.7], 1.0)
    t2 = SpikeTrain([0.3, 0.45, 0.8, 0.9, 0.95], 1.0)

    # pen&paper calculation of the spike distance
    expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0]
    s1 = np.array([
        0.1, 0.1, (0.1 * 0.1 + 0.05 * 0.1) / 0.2, 0.05,
        (0.05 * 0.15 * 2) / 0.2, 0.15, 0.1, (0.1 * 0.1 + 0.1 * 0.2) / 0.3,
        (0.1 * 0.2 + 0.1 * 0.1) / 0.3, (0.1 * 0.05 + 0.1 * 0.25) / 0.3, 0.1
    ])
    s2 = np.array([
        0.1, (0.1 * 0.2 + 0.1 * 0.1) / 0.3, 0.1, (0.1 * 0.05 * 2) / .15, 0.05,
        (0.05 * 0.2 + 0.1 * 0.15) / 0.35, (0.05 * 0.1 + 0.1 * 0.25) / 0.35,
        0.1, 0.1, 0.05, 0.05
    ])
    isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.1, 0.3, 0.3, 0.3, 0.3])
    isi2 = np.array([0.3, 0.3, 0.15, 0.15, 0.35, 0.35, 0.35, 0.1, 0.05, 0.05])
    expected_y1 = (s1[:-1] * isi2 + s2[:-1] * isi1) / (0.5 * (isi1 + isi2)**2)
    expected_y2 = (s1[1:] * isi2 + s2[1:] * isi1) / (0.5 * (isi1 + isi2)**2)

    expected_times = np.array(expected_times)
    expected_y1 = np.array(expected_y1)
    expected_y2 = np.array(expected_y2)
    expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) *
                             (expected_y1 + expected_y2) / 2)
    expected_spike_val /= (expected_times[-1] - expected_times[0])

    print("SPIKE value:", expected_spike_val)

    f = spk.spike_profile(t1, t2)

    assert_equal(f.x, expected_times)
    assert_array_almost_equal(f.y1, expected_y1, decimal=15)
    assert_array_almost_equal(f.y2, expected_y2, decimal=15)
    assert_almost_equal(f.avrg(), expected_spike_val, decimal=15)
    assert_almost_equal(spk.spike_distance(t1, t2),
                        expected_spike_val,
                        decimal=15)

    # check with some equal spike times
    t1 = SpikeTrain([0.2, 0.4, 0.6], [0.0, 1.0])
    t2 = SpikeTrain([0.1, 0.4, 0.5, 0.6], [0.0, 1.0])

    expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0]
    # due to the edge correction in the beginning, s1 and s2 are different
    # for left and right values
    s1_r = np.array(
        [0.1, (0.1 * 0.1 + 0.1 * 0.1) / 0.2, 0.1, 0.0, 0.0, 0.0, 0.0])
    s1_l = np.array(
        [0.1, (0.1 * 0.1 + 0.1 * 0.1) / 0.2, 0.1, 0.0, 0.0, 0.0, 0.0])
    # s2_r = np.array([0.1*0.1/0.3, 0.1*0.3/0.3, 0.1*0.2/0.3,
    #                  0.0, 0.1, 0.0, 0.0])
    # s2_l = np.array([0.1*0.1/0.3, 0.1*0.1/0.3, 0.1*0.2/0.3, 0.0,
    #                  0.1, 0.0, 0.0])
    # eero's edge correction:
    s2_r = np.array(
        [0.1, 0.1 * 0.3 / 0.3, 0.1 * 0.2 / 0.3, 0.0, 0.1, 0.0, 0.0])
    s2_l = np.array(
        [0.1, 0.1 * 0.3 / 0.3, 0.1 * 0.2 / 0.3, 0.0, 0.1, 0.0, 0.0])
    isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.4])
    isi2 = np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.4])
    expected_y1 = (s1_r[:-1] * isi2 + s2_r[:-1] * isi1) / (0.5 *
                                                           (isi1 + isi2)**2)
    expected_y2 = (s1_l[1:] * isi2 + s2_l[1:] * isi1) / (0.5 *
                                                         (isi1 + isi2)**2)

    expected_times = np.array(expected_times)
    expected_y1 = np.array(expected_y1)
    expected_y2 = np.array(expected_y2)
    expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) *
                             (expected_y1 + expected_y2) / 2)
    expected_spike_val /= (expected_times[-1] - expected_times[0])

    f = spk.spike_profile(t1, t2)

    assert_equal(f.x, expected_times)
    assert_array_almost_equal(f.y1, expected_y1, decimal=14)
    assert_array_almost_equal(f.y2, expected_y2, decimal=14)
    assert_almost_equal(f.avrg(), expected_spike_val, decimal=16)
    assert_almost_equal(spk.spike_distance(t1, t2),
                        expected_spike_val,
                        decimal=16)
Exemple #11
0
def spikey(fp):
    events = glob.glob(os.path.join(fp,'*.pprox'))
    events.sort()
    #For new recordings:
    syll = pd.read_csv('../restoration_syllables.csv')
    
    #For old recordings:
    #syll = pd.read_csv('../syllables.csv')
    
    for eventfile in events:
        
        with open(eventfile) as fl:
            data = json.load(fl)
        song = []
        condition = []
        train = []
        gapon = {}
        gapoff = {}
        spikes = []
        for t in range(len(data['pprox'])):
            #For new recordings:
            if data['pprox'][t]['condition'] == 'continuous':
                song.append(data['pprox'][t]['stimulus']+'-1')
                song.append(data['pprox'][t]['stimulus']+'-2')
                condition.append(data['pprox'][t]['condition']+'1')
                condition.append(data['pprox'][t]['condition']+'2')
                spikes.append(data['pprox'][t]['event'])
                spikes.append(data['pprox'][t]['event'])
            else:
                songid = data['pprox'][t]['stimulus']+'-'+data['pprox'][t]['condition'][-1]
                song.append(data['pprox'][t]['stimulus']+'-'+data['pprox'][t]['condition'][-1])
                condition.append(data['pprox'][t]['condition'])
                spikes.append(data['pprox'][t]['event'])
                
            if 'gap_on' in data['pprox'][t].keys():
                gapon[songid] = data['pprox'][t]['gap_on']
                gapoff[songid] = data['pprox'][t]['gap_off']
                
            #For old recordings:
            #song.append(data['pprox'][t]['stimulus'])
            #condition.append(data['pprox'][t]['condition'])
            #spikes.append(data['pprox'][t]['event'])
            #if 'gap_on' in data['pprox'][t].keys():
                #gapon[song[t]] = data['pprox'][t]['gap_on'][0]/40
                #gapoff[song[t]] = data['pprox'][t]['gap_off'][0]/40
                
        songset = np.unique(song)
        x = []
        y = []
        for s in song:
            x.append(gapon[s])
            y.append(gapoff[s])
            
        gapon = x
        gapoff = y
        
        for t in range(len(spikes)):
            #For new recordings:
            syllstart = syll['start'][syll['songid'] == song[t][:-2]][syll['start'] <= gapon[t]/1000+0.001][syll['end'] >= gapoff[t]/1000-0.001].values[0] * 1000
            index = syll[syll['songid'] == song[t][:-2]][syll['start'] <= gapon[t]/1000+0.001][syll['end'] >= gapoff[t]/1000-0.001].index.values[0] + 1
            
            #For old recordings:
            #syllstart = syll['start'][syll['songid'] == song[t]][syll['start'] <= gapon[t]/1000+0.001][syll['end'] >= gapoff[t]/1000-0.001].values[0] * 1000
            #index = syll[syll['songid'] == song[t]][syll['start'] <= gapon[t]/1000+0.001][syll['end'] >= gapoff[t]/1000-0.001].index.values[0] + 1
            
            nextsyllend = syll['end'].at[index] * 1000
            spikes[t] = [spike for spike in spikes[t] if spike >= syllstart and spike <= nextsyllend]
            train.append(spk.SpikeTrain(spikes[t],[syllstart,nextsyllend]))
            
        for s,stim in enumerate(songset):
            pairs = np.zeros((len(train)//len(songset),len(train)//len(songset)))
            subset = np.where(np.asarray(song) == stim)
            trainsub = [train[x] for x in subset[0]]
            for i in range(len(trainsub)):
                for j in range(len(trainsub)):
                    pairs[i,j] = spk.spike_distance(trainsub[i], trainsub[j])
            labels = [condition[x] for x in range(len(condition)) if x in subset[0]]
            df = pd.DataFrame(pairs, columns = labels, index = labels)
            df.to_csv(os.path.splitext(eventfile)[0]+'_'+stim+'.csv')
def test_spike():
    # generate two spike trains:
    t1 = SpikeTrain([0.0, 2.0, 5.0, 8.0], 10.0)
    t2 = SpikeTrain([0.0, 1.0, 5.0, 9.0], 10.0)

    expected_times = np.array([0.0, 1.0, 2.0, 5.0, 8.0, 9.0, 10.0])

    f = spk.spike_profile(t1, t2)

    assert_equal(f.x, expected_times)

    assert_almost_equal(f.avrg(), 1.6624149659863946e-01, decimal=15)
    assert_almost_equal(f.y2[-1], 0.1394558, decimal=6)

    t1 = SpikeTrain([0.2, 0.4, 0.6, 0.7], 1.0)
    t2 = SpikeTrain([0.3, 0.45, 0.8, 0.9, 0.95], 1.0)

    # pen&paper calculation of the spike distance
    expected_times = [0.0, 0.2, 0.3, 0.4, 0.45, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0]
    s1 = np.array([0.1, 0.1, (0.1*0.1+0.05*0.1)/0.2, 0.05, (0.05*0.15 * 2)/0.2,
                   0.15, 0.1, (0.1*0.1+0.1*0.2)/0.3, (0.1*0.2+0.1*0.1)/0.3,
                   (0.1*0.05+0.1*0.25)/0.3, 0.1])
    s2 = np.array([0.1, (0.1*0.2+0.1*0.1)/0.3, 0.1, (0.1*0.05 * 2)/.15, 0.05,
                   (0.05*0.2+0.1*0.15)/0.35, (0.05*0.1+0.1*0.25)/0.35,
                   0.1, 0.1, 0.05, 0.05])
    isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.1, 0.3, 0.3, 0.3, 0.3])
    isi2 = np.array([0.3, 0.3, 0.15, 0.15, 0.35, 0.35, 0.35, 0.1, 0.05, 0.05])
    expected_y1 = (s1[:-1]*isi2+s2[:-1]*isi1) / (0.5*(isi1+isi2)**2)
    expected_y2 = (s1[1:]*isi2+s2[1:]*isi1) / (0.5*(isi1+isi2)**2)

    expected_times = np.array(expected_times)
    expected_y1 = np.array(expected_y1)
    expected_y2 = np.array(expected_y2)
    expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) *
                             (expected_y1+expected_y2)/2)
    expected_spike_val /= (expected_times[-1]-expected_times[0])

    f = spk.spike_profile(t1, t2)

    assert_equal(f.x, expected_times)
    assert_array_almost_equal(f.y1, expected_y1, decimal=15)
    assert_array_almost_equal(f.y2, expected_y2, decimal=15)
    assert_almost_equal(f.avrg(), expected_spike_val, decimal=15)
    assert_almost_equal(spk.spike_distance(t1, t2), expected_spike_val,
                        decimal=15)

    # check with some equal spike times
    t1 = SpikeTrain([0.2, 0.4, 0.6], [0.0, 1.0])
    t2 = SpikeTrain([0.1, 0.4, 0.5, 0.6], [0.0, 1.0])

    expected_times = [0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 1.0]
    # due to the edge correction in the beginning, s1 and s2 are different
    # for left and right values
    s1_r = np.array([0.1, (0.1*0.1+0.1*0.1)/0.2, 0.1, 0.0, 0.0, 0.0, 0.0])
    s1_l = np.array([0.1, (0.1*0.1+0.1*0.1)/0.2, 0.1, 0.0, 0.0, 0.0, 0.0])
    s2_r = np.array([0.1*0.1/0.3, 0.1*0.3/0.3, 0.1*0.2/0.3,
                     0.0, 0.1, 0.0, 0.0])
    s2_l = np.array([0.1*0.1/0.3, 0.1*0.1/0.3, 0.1*0.2/0.3, 0.0,
                     0.1, 0.0, 0.0])
    isi1 = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.4])
    isi2 = np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.4])
    expected_y1 = (s1_r[:-1]*isi2+s2_r[:-1]*isi1) / (0.5*(isi1+isi2)**2)
    expected_y2 = (s1_l[1:]*isi2+s2_l[1:]*isi1) / (0.5*(isi1+isi2)**2)

    expected_times = np.array(expected_times)
    expected_y1 = np.array(expected_y1)
    expected_y2 = np.array(expected_y2)
    expected_spike_val = sum((expected_times[1:] - expected_times[:-1]) *
                             (expected_y1+expected_y2)/2)
    expected_spike_val /= (expected_times[-1]-expected_times[0])

    f = spk.spike_profile(t1, t2)

    assert_equal(f.x, expected_times)
    assert_array_almost_equal(f.y1, expected_y1, decimal=14)
    assert_array_almost_equal(f.y2, expected_y2, decimal=14)
    assert_almost_equal(f.avrg(), expected_spike_val, decimal=16)
    assert_almost_equal(spk.spike_distance(t1, t2), expected_spike_val,
                        decimal=16)
                                              edges=(0, 4000))
t_loading = time.clock()

print("Number of spike trains: %d" % len(spike_trains))
num_of_spikes = sum([len(spike_trains[i])
                     for i in range(len(spike_trains))])
print("Number of spikes: %d" % num_of_spikes)

# calculate the multivariate spike distance
f = spk.spike_profile(spike_trains)

t_spike = time.clock()

# print the average
avrg = f.avrg()
print("Spike distance from average: %.8f" % avrg)

t_avrg = time.clock()

# compute average distance directly, should give the same result as above
spike_dist = spk.spike_distance(spike_trains)
print("Spike distance directly:     %.8f" % spike_dist)

t_dist = time.clock()

print("Loading:            %9.1f ms" % time_diff_in_ms(t_start, t_loading))
print("Computing profile:  %9.1f ms" % time_diff_in_ms(t_loading, t_spike))
print("Averaging:          %9.1f ms" % time_diff_in_ms(t_spike, t_avrg))
print("Computing distance: %9.1f ms" % time_diff_in_ms(t_avrg, t_dist))
print("Total:              %9.1f ms" % time_diff_in_ms(t_start, t_dist))
Exemple #14
0
                run(runTime*ms)

                #code below calculates and stores the pyspike metrics
                firingValuesWithUnits = Sp1.spike_trains().values()
                firingValues = []
                for i in range(len(firingValuesWithUnits)):
                    firingValues.append(array(firingValuesWithUnits[i])) 
                fV = open('fv.txt','w')
                for item in firingValues:
                    item = (" ".join(map(str,item)))
                    fV.write("%s\n" % item)
                fV.close()
                spikeTrains = psp.load_spike_trains_from_txt("fv.txt",edges=(0,runTime/1000.0))
                qvalues.iloc[currentLine,0] = tc
                qvalues.iloc[currentLine,1] = delay
                qvalues.iloc[currentLine,2] = psyn
                qvalues.iloc[currentLine,3] = synw
                qvalues.iloc[currentLine,4] = psp.spike_distance(spikeTrains)
                qvalues.iloc[currentLine,5] = psp.isi_distance(spikeTrains)
                qvalues.iloc[currentLine,6] = psp.spike_sync(spikeTrains)
                currentLine += 1 

                del G1
                del S1
                del Sp1
                del firingValuesWithUnits
                del firingValues
                del spikeTrains
     
qvalues.to_excel('qvalues.xlsx', sheet_name='Sheet1')