Пример #1
0
def test_regression_15_sync():
    # load spike trains
    spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=[0, 4000])

    N = len(spike_trains)

    dist_mat = spk.spike_sync_matrix(spike_trains)
    assert_equal(dist_mat.shape, (N, N))

    ind = np.arange(N // 2)
    dist_mat = spk.spike_sync_matrix(spike_trains, ind)
    assert_equal(dist_mat.shape, (N // 2, N // 2))

    ind = np.arange(N // 2, N)
    dist_mat = spk.spike_sync_matrix(spike_trains, ind)
    assert_equal(dist_mat.shape, (N // 2, N // 2))
Пример #2
0
def sync_test():
    import pyspike

    spkt_train = []

    empty_train = [[] for i in range(801)]

    for i in range(len(empty_train)):
        if i < 200:
            empty_train[i] = random.sample(xrange(0, 1500), 0)
            spkt_train.append(pyspike.SpikeTrain(empty_train[i], timeRange))
        elif (i >= 200 and i < 400):
            empty_train[i] = random.sample(xrange(0, 1500), 1)
            spkt_train.append(pyspike.SpikeTrain(empty_train[i], timeRange))
        elif (i >= 400 and i < 600):
            empty_train[i] = random.sample(xrange(0, 1500), 5)
            spkt_train.append(pyspike.SpikeTrain(empty_train[i], timeRange))
        elif i >= 600:
            empty_train[i] = random.sample(xrange(0, 1500), 50)
            spkt_train.append(pyspike.SpikeTrain(empty_train[i], timeRange))

    spike_sync = pyspike.spike_sync_matrix(spkt_train)

    for i in range(len(spkt_train)):
        if i < 400:
            for v in range(400):
                empty_array[i][v] = 1.0

    getmat['2'] = spike_sync - empty_array
Пример #3
0
def test_spike_sync_empty():
    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_allclose(d, 1.0)
    prof = spk.spike_sync_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 1.0])
    assert_array_equal(prof.y, [1.0, 1.0])

    st1 = SpikeTrain([], edges=(0.0, 1.0))
    st2 = SpikeTrain([
        0.4,
    ], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_allclose(d, 0.0)
    prof = spk.spike_sync_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_equal(prof.x, [0.0, 0.4, 1.0])
    assert_array_equal(prof.y, [0.0, 0.0, 0.0])

    st1 = SpikeTrain([
        0.6,
    ], edges=(0.0, 1.0))
    st2 = SpikeTrain([
        0.4,
    ], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_almost_equal(d, 1.0, decimal=15)
    prof = spk.spike_sync_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_almost_equal(prof.x, [0.0, 0.4, 0.6, 1.0], decimal=15)
    assert_array_almost_equal(prof.y, [1.0, 1.0, 1.0, 1.0], decimal=15)

    st1 = SpikeTrain([
        0.2,
    ], edges=(0.0, 1.0))
    st2 = SpikeTrain([
        0.8,
    ], edges=(0.0, 1.0))
    d = spk.spike_sync(st1, st2)
    assert_almost_equal(d, 0.0, decimal=15)
    prof = spk.spike_sync_profile(st1, st2)
    assert_allclose(d, prof.avrg())
    assert_array_almost_equal(prof.x, [0.0, 0.2, 0.8, 1.0], decimal=15)
    assert_array_almost_equal(prof.y, [0.0, 0.0, 0.0, 0.0], decimal=15)

    # test with empty intervals
    st1 = SpikeTrain([2.0, 5.0], [0, 10.0])
    st2 = SpikeTrain([2.1, 7.0], [0, 10.0])
    st3 = SpikeTrain([5.1, 6.0], [0, 10.0])
    res = spk.spike_sync_profile(st1, st2).avrg(interval=[3.0, 4.0])
    assert_allclose(res, 1.0)
    res = spk.spike_sync(st1, st2, interval=[3.0, 4.0])
    assert_allclose(res, 1.0)

    sync_matrix = spk.spike_sync_matrix([st1, st2, st3], interval=[3.0, 4.0])
    assert_array_equal(sync_matrix, np.ones((3, 3)) - np.diag(np.ones(3)))
def multiprocessFunctions(analysis):

    if analysis == 'isi_distance':
        #print ('isi distance')
        print("ISI distance calcualtion started!!!")
        plt.figure()
        isi_distance = spk.isi_distance_matrix(spike_trains, interval=None)
        isi_distance[isi_distance > 1] = 1
        print(isi_distance)
        np.save("isi_distance", isi_distance)
        np.savetxt("isi_distance.csv", isi_distance, delimiter=",")
        isi_distance = spk.isi_distance_matrix(spike_trains,
                                               interval=(0, 5000))
        isi_distance[isi_distance > 1] = 1
        print(isi_distance)
        np.save("isi_distance", isi_distance)
        plt.imshow(isi_distance, clim=(0.0, 1.0), interpolation='nearest')
        plt.colorbar()
        plt.title("ISI-distance(0-100ms)")
        print('----------Took %s seconds for isi distance-----' %
              (time.time() - start_time))
        plt.show()
    elif analysis == 'spike_distance':
        #print ('spike distance')
        plt.figure()
        spike_distance = spk.spike_distance_matrix(spike_trains,
                                                   interval=(0, 100))
        spike_distance[spike_distance > 1] = 1
        np.save("spike_distance", spike_distance)
        plt.imshow(spike_distance, clim=(0.0, 1.0), interpolation='nearest')
        plt.colorbar()
        plt.title("SPIKE-distance(0-100ms)")
        print('----------Took %s seconds for spike distance-----' %
              (time.time() - start_time))
        plt.show()
    elif analysis == 'spike_sync':
        #print ('spike sync')
        plt.figure()
        spike_sync = spk.spike_sync_matrix(spike_trains, interval=(3300, 3500))
        plt.imshow(spike_sync, interpolation='none')
        plt.colorbar()
        plt.title("SPIKE-Sync")
        print('----------Took %s seconds for spike_sync-----' %
              (time.time() - start_time))
        plt.show()
    else:
        pass
Пример #5
0
def get_matrix(select='subset',
               min_spike_number=0,
               save=None,
               analysis=['SPIKE-Sync'],
               network=[0]):
    import pyspike

    load_data(network)

    getmat = {}

    empty_dict_array = {}
    no_empty_dict_array = {}

    spkts = {}
    spkinds = {}
    spktsRange = {}
    spkt_train = {}
    spike_sync = {}

    for f, p in enumerate(data_files):
        if f in network:
            spkts[f] = d[p]['simData']['spkt']  #list
            spkinds[f] = d[p]['simData']['spkid']  #list

            print 'Starting analysis of spike times per ' + str(
                select) + ': ' + str(p)

            for t, y in enumerate(timeRange):

                spktsRange = [
                    spkt for spkt in spkts[f]
                    if timeRange[t][0] <= spkt <= timeRange[t][1]
                ]

                spkt_train[str(f) + str(t)] = []

                if select == 'subset':
                    print 'Time Range: ' + str(y)

                    empty_array = np.zeros(
                        ((len(net_labels) * 2), (len(net_labels) * 2)))
                    no_empty_array = np.zeros(
                        ((len(net_labels) * 2), (len(net_labels) * 2)))
                    array_ii = np.zeros(
                        ((len(net_labels) * 2), (len(net_labels) * 2)))

                    empty_gids = []
                    gids_included = []

                    for k, v in enumerate(gids):
                        train = []
                        for i, gid in enumerate(v):
                            for spkind, spkt in zip(spkinds[f], spkts[f]):
                                if (spkind == gid and spkt in spktsRange):
                                    train.append(spkt)

                        spkt_train[str(f) + str(t)].append(
                            pyspike.SpikeTrain(train, timeRange[t]))

                        if len(train) < min_spike_number:
                            empty_gids.append(k)
                        else:
                            gids_included.append(k)

                    for i in range(len(spkt_train[str(f) + str(t)])):
                        if i in gids_included:
                            for k, v in enumerate(gids_included):
                                no_empty_array[i][v] = 1.0

                    for l in range(len(array_ii)):
                        array_ii[l][l] = 1.0

                    no_empty_dict_array[str(f) + str(t)] = no_empty_array

                elif select == 'cell':

                    print 'Time Range: ' + str(y)

                    empty_array = np.zeros(
                        ((len(net_labels) * 80), (len(net_labels) * 80)))
                    no_empty_array = np.zeros(
                        ((len(net_labels) * 80), (len(net_labels) * 80)))

                    empty_gids = []
                    spkmat2 = []
                    gids_included = []
                    #sync = np.zeros(((len(net_labels)*80),(len(net_labels)*80)))

                    for ii, subset in enumerate(gids):
                        spkmat = [
                            pyspike.SpikeTrain([
                                spkt
                                for spkind, spkt in zip(spkinds[f], spkts[f])
                                if (spkind == gid and spkt in spktsRange)
                            ], timeRange[t]) for gid in set(subset)
                        ]
                        spkt_train[str(f) + str(t)].extend(spkmat)

                        for gid in set(subset):
                            list_spkt = [
                                spkt
                                for spkind, spkt in zip(spkinds[f], spkts[f])
                                if (spkind == gid and spkt in spktsRange)
                            ]

                            if len(list_spkt) < min_spike_number:
                                empty_gids.append(gid)
                            else:
                                spkmat2.append(
                                    pyspike.SpikeTrain(list_spkt,
                                                       timeRange[t]))
                                gids_included.append(gid)
                        pos_labels.append(len(gids_included))

                    #print gids_included
                    empty_gids[:] = [x - 200 for x in empty_gids]
                    gids_included[:] = [x - 200 for x in gids_included]
                    #print empty_gids
                    for i in range(len(spkt_train[str(f) + str(t)])):
                        if i in empty_gids:
                            for k, v in enumerate(empty_gids):
                                empty_array[i][v] = 1.0

                    for i in range(len(spkt_train[str(f) + str(t)])):
                        if i in gids_included:
                            for k, v in enumerate(gids_included):
                                no_empty_array[i][v] = 1.0

                    #print empty_array
                    empty_dict_array[str(f) + str(t)] = empty_array
                    no_empty_dict_array[str(f) + str(t)] = no_empty_array
                #print spkt_train
                for l, mat in enumerate(mats):
                    #spike_sync
                    if (mat == 'ISI-distance' and mat in analysis):
                        print str(mat) + ", number of trains: " + str(
                            len(spkt_train[str(f) + str(t)]))
                        isi_distance = pyspike.isi_distance_matrix(
                            spkt_train[str(f) + str(t)])
                        getmat[str(f) + str(t) + str(l)] = isi_distance

                    elif (mat in analysis and mat == 'SPIKE-distance'):
                        print str(mat) + ", number of trains: " + str(
                            len(spkt_train[str(f) + str(t)]))
                        spike_distance = pyspike.spike_distance_matrix(
                            spkt_train[str(f) + str(t)])
                        getmat[str(f) + str(t) + str(l)] = spike_distance

                    elif (mat in analysis and mat == 'SPIKE-Sync'):
                        print str(mat) + ", number of trains: " + str(
                            len(spkt_train[str(f) + str(t)]))
                        spike_sync[str(f) +
                                   str(t)] = pyspike.spike_sync_matrix(
                                       spkt_train[str(f) + str(t)])
                        #if select == 'subset':
                        getmat[str(f) + str(t) + str(l)] = (
                            spike_sync[str(f) + str(t)] *
                            no_empty_dict_array[str(f) + str(t)]) + array_ii
                        #elif select == 'cell':
                        #getmat[str(f)+str(t)+str(l)] = spike_sync[str(f)+str(t)] * no_empty_dict_array[str(f)+str(t)]

                empty_array = np.zeros(
                    ((len(net_labels) * 80), (len(net_labels) * 80)))
        else:
            pass

    if save == True:
        with open(str(path) + 'data1.pkl', 'wb') as output:
            pickle.dump(getmat, output)

    return getmat
    print 'finished getting data for matrix plotting'
Пример #6
0
########## measure synchrony ##################################################
slices = []
for run in range(runs):
    section = []
    for n in range(N):
        section.append([])
        subint = [
            x for x in spikes[n]
            if x >= ((run - 1) * duration) and x <= (run * duration)
        ]
        section[n] = spk.SpikeTrain(subint, (0, duration))

    slices.append(section)

pl.figure(3)

sync = []
for c in range(len(slices)):
    # sync.append(np.var(spk.spike_sync_matrix(slices[c])))
    sync.append(np.linalg.norm(spk.spike_sync_matrix(slices[c])))
    # sync.append(np.sum(spk.spike_sync_matrix(slices[c])))

pl.plot(sync, linestyle="-", marker="o", markersize="7")
# pl.hlines(15, 0, len(homXS), linewidth=0.3)
pl.grid(which='both', axis='y')
# pl.xlim(xmin=-0.5,xmax=len(homXS)+1.5)
for i in range(len(sync)):
    pl.text(i, sync[i] - 0.3, str(round(sync[i], 2)))

pl.show()
Пример #7
0
first = min(fhs.index)
last = max(fhs.index)

t_start = first - pd.Timedelta(seconds=60 * 60 + 1)
t_end = last + pd.Timedelta(seconds=1)

edges = (0, (t_end - t_start).total_seconds())

spike_trains = [
    df_to_spike_train(fhs.query('priority == @priority'), t_start, edges)
    for priority in labels
]

# Plot simple relationship within errors within an hourly window.
normal = pd.DataFrame(spk.spike_sync_matrix(spike_trains, max_tau=60 * 60),
                      index=labels,
                      columns=labels)
save_matrix_plot(normal, 'Priority errors, Norway',
                 '{}/Norway_errors.png'.format(sys.argv[2]))

# First simple experiment: shift every priority one hour. Somewhat arbirtrarily.
shifted = pd.DataFrame(index=labels, columns=labels)
for P in shifted.index:
    P_shifted = df_to_spike_train(
        fhs[fhs.priority == P].shift(-1, pd.Timedelta(hours=1)), t_start,
        edges)
    shifted[P] = [
        spk.spike_sync(P_shifted, s_t, max_tau=60 * 60) for s_t in spike_trains
    ]
shifted.index = ['$\mathregular{' + x + '_{-1h}}$' for x in labels]
Пример #8
0
import pyspike as spk
import numpy as np

# first load the data, interval ending time = 4000, start=0 (default)
spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", 4000)

print(len(spike_trains))

plt.figure()
isi_distance = spk.isi_distance_matrix(spike_trains)
plt.imshow(isi_distance, interpolation='none')
plt.title("ISI-distance")
plt.savefig("fig/ISI-distance")
plt.close()

plt.figure()
spike_distance = spk.spike_distance_matrix(spike_trains, interval=(0, 1000))
plt.imshow(spike_distance, interpolation='none')
plt.title("SPIKE-distance, T=0-1000")
plt.savefig("fig/SPIKE-distance,T=0-1000")
plt.close()

plt.figure()
spike_sync = spk.spike_sync_matrix(spike_trains, interval=(2000, 4000))
plt.imshow(spike_sync, interpolation='none')
plt.title("SPIKE-Sync, T=2000-4000")
plt.savefig("fig/SPIKE-Sync,T=2000-4000")
plt.close()

# print np.mean(spike_sync), np.mean(spike_distance), np.mean(isi_distance)
Пример #9
0
def iter_plot0(md):
    import seaborn as sns
    import pickle
    with open('cell_indexs.p', 'rb') as f:
        returned_list = pickle.load(f)
    index_exc = returned_list[0]
    index_inh = returned_list[1]
    index, mdf1 = md
    #wgf = {0.025:None,0.05:None,0.125:None,0.25:None,0.3:None,0.4:None,0.5:None,1.0:None,1.5:None,2.0:None,2.5:None,3.0:None}
    wgf = {
        0.0025: None,
        0.0125: None,
        0.025: None,
        0.05: None,
        0.125: None,
        0.25: None,
        0.3: None,
        0.4: None,
        0.5: None,
        1.0: None,
        1.5: None,
        2.0: None,
        2.5: None,
        3.0: None
    }

    weight_gain_factors = {k: v for k, v in enumerate(wgf.keys())}
    print(len(weight_gain_factors))
    print(weight_gain_factors.keys())
    #weight_gain_factors = {0:0.5,1:1.0,2:1.5,3:2.0,4:2.5,5:3}
    #weight_gain_factors = {:None,1.0:None,1.5:None,2.0:None,2.5:None}

    k = weight_gain_factors[index]
    #print(len(mdf1.segments),'length of block')

    ass = mdf1.analogsignals[0]

    time_points = ass.times
    avg = np.mean(ass, axis=0)  # Average over signals of Segment
    #maxx = np.max(ass, axis=0)  # Average over signals of Segment
    std = np.std(ass, axis=0)  # Average over signals of Segment
    #avg_minus =
    plt.figure()
    plt.plot([i for i in range(0, len(avg))], avg)
    plt.plot([i for i in range(0, len(std))], std)

    plt.title("Mean and Standard Dev of $V_{m}$ amplitude per neuron ")
    plt.xlabel('time $(ms)$')
    plt.xlabel('Voltage $(mV)$')

    plt.savefig(str(index) + 'prs.png')
    vm_spiking = []
    vm_not_spiking = []
    spike_trains = []
    binary_trains = []
    max_spikes = 0

    vms = np.array(mdf1.analogsignals[0].as_array().T)
    #print(data)
    #for i,vm in enumerate(data):

    cnt = 0
    for spiketrain in mdf1.spiketrains:
        #spiketrain = mdf1.spiketrains[index]
        y = np.ones_like(spiketrain) * spiketrain.annotations['source_id']
        #import sklearn
        #sklearn.decomposition.NMF(y)
        # argument edges is the time interval you want to be considered.
        pspikes = pyspike.SpikeTrain(spiketrain, edges=(0, len(ass)))
        spike_trains.append(pspikes)
        if len(spiketrain) > max_spikes:
            max_spikes = len(spiketrain)

        if np.max(ass[spiketrain.annotations['source_id']]) > 0.0:
            vm_spiking.append(vms[spiketrain.annotations['source_id']])
        else:
            vm_not_spiking.append(vms[spiketrain.annotations['source_id']])
        cnt += 1

    for spiketrain in mdf1.spiketrains:
        x = conv.BinnedSpikeTrain(spiketrain,
                                  binsize=1 * pq.ms,
                                  t_start=0 * pq.s)
        binary_trains.append(x)
    end_floor = np.floor(float(mdf1.t_stop))
    dt = float(mdf1.t_stop) % end_floor
    mdf1.t_start
    #v = mdf1.take_slice_of_analogsignalarray_by_unit()
    t_axis = np.arange(float(mdf1.t_start), float(mdf1.t_stop), dt)
    plt.figure()
    plt.clf()

    plt.figure()
    plt.clf()
    cleaned = []
    data = np.array(mdf1.analogsignals[0].as_array().T)
    #print(data)
    for i, vm in enumerate(data):
        if np.max(vm) > 900.0 or np.min(vm) < -900.0:
            pass
        else:
            plt.plot(ass.times, vm)  #,label='neuron identifier '+str(i)))
            cleaned.append(vm)
            #vm = s#.as_array()[:,i]

    assert len(cleaned) < len(ass)

    print(len(cleaned))
    plt.title('neuron $V_{m}$')
    #plt.legend(loc="upper left")
    plt.savefig(str('weight_') + str(k) + 'analogsignals' + '.png')
    plt.xlabel('Time $(ms)$')
    plt.ylabel('Voltage $(mV)$')

    plt.close()

    #pass

    plt.figure()
    plt.clf()
    plt.title('Single Neuron $V_{m}$ trace')
    plt.plot(ass.times[0:int(len(ass.times) / 10)],
             vm_not_spiking[index_exc[0]][0:int(len(ass.times) / 10)])
    plt.xlabel('$ms$')
    plt.ylabel('$mV$')
    plt.xlabel('Time $(ms)$')
    plt.ylabel('Voltage $(mV)$')
    plt.savefig(str('weight_') + str(k) + 'eespecific_analogsignals' + '.png')
    plt.close()

    plt.figure()
    plt.clf()
    plt.title('Single Neuron $V_{m}$ trace')
    plt.plot(ass.times[0:int(len(ass.times) / 10)],
             vm_not_spiking[index_inh[0]][0:int(len(ass.times) / 10)])
    plt.xlabel('$ms$')
    plt.ylabel('$mV$')

    plt.savefig(str('weight_') + str(k) + 'inhibitory_analogsignals' + '.png')
    plt.close()

    cvs = [0 for i in range(0, len(spike_trains))]
    cvsd = {}
    cvs = []
    cvsi = []
    rates = []  # firing rates per cell. in spikes a second.
    for i, j in enumerate(spike_trains):
        rates.append(float(len(j) / 2.0))
        cva = cv(j)
        if np.isnan(cva) or cva == 0:
            pass
            #cvs[i] = 0
            #cvsd[i] = 0
        else:
            pass
            #cvs[i] = cva
            #cvsd[i] = cva
        cvs.append(cva)
    #import pickle
    #with open(str('weight_')+str(k)+'coefficients_of_variation.p','wb') as f:
    #   pickle.dump([cvs,cvsd],f)
    import numpy
    a = numpy.asarray(cvs)
    numpy.savetxt('pickles/' + str('weight_') + str(k) +
                  'coefficients_of_variation.csv',
                  a,
                  delimiter=",")

    import numpy
    a = numpy.asarray(rates)
    numpy.savetxt('pickles/' + str('weight_') + str(k) + 'firing_of_rate.csv',
                  a,
                  delimiter=",")

    cvs = [i for i in cvs if i != 0]
    cells = [i for i in range(0, len(cvs))]

    plt.clf()
    fig, axes = plt.subplots()
    axes.set_title('Coefficient of Variation Versus Neuron')
    axes.set_xlabel('Neuron number')
    axes.set_ylabel('CV estimate')
    mcv = np.mean(cvs)
    #plt.scatter(cells,cvs)
    cvs = np.array(cvs)
    plt.scatter(index_inh, cvs[index_inh], label="inhibitory cells")
    plt.scatter(index_exc, cvs[index_exc], label="excitatory cells")
    plt.legend(loc="upper left")

    fig.tight_layout()
    plt.savefig(str('weight_') + str(k) + 'cvs_mean_' + str(mcv) + '.png')
    plt.close()

    plt.clf()
    #frequencies, power = elephant.spectral.welch_psd(ass)
    #mfreq = frequencies[np.where(power==np.max(power))[0][0]]
    #fig, axes = plt.subplots()
    axes.set_title('Firing Rate Versus Neuron Number at mean f=' +
                   str(np.mean(rates)) + str('(Spike Per Second)'))
    axes.set_xlabel('Neuron number')
    axes.set_ylabel('Spikes per second')
    rates = np.array(rates)
    plt.scatter(index_inh, rates[index_inh], label="inhibitory cells")
    plt.scatter(index_exc, rates[index_exc], label="excitatory cells")
    plt.legend(loc="upper left")
    fig.tight_layout()
    plt.savefig(str('firing_rates_per_cell_') + str(k) + str(mcv) + '.png')
    plt.close()
    '''
    import pandas as pd
    d = {'coefficent_of_variation': cvs, 'cells': cells}
    df = pd.DataFrame(data=d)

    ax = sns.regplot(x='cells', y='coefficent_of_variation', data=df)#, fit_reg=False)
    plt.savefig(str('weight_')+str(k)+'cvs_regexp_'+str(mcv)+'.png');
    plt.close()
    '''

    spike_trains = []
    ass = mdf1.analogsignals[0]
    tstop = mdf1.t_stop
    np.max(ass.times) == mdf1.t_stop
    #assert tstop == 2000
    tstop = 2000
    vm_spiking = []

    for spiketrain in mdf1.spiketrains:
        vm_spiking.append(
            mdf1.analogsignals[0][spiketrain.annotations['source_id']])
        y = np.ones_like(spiketrain) * spiketrain.annotations['source_id']

        # argument edges is the time interval you want to be considered.
        pspikes = pyspike.SpikeTrain(spiketrain, edges=(0, tstop))
        spike_trains.append(pspikes)

    # plot the spike times

    plt.clf()
    for (i, spike_train) in enumerate(spike_trains):
        plt.scatter(spike_train, i * np.ones_like(spike_train), marker='.')
    plt.xlabel('Time (ms)')
    plt.ylabel('Cell identifier')
    plt.title('Raster Plot for weight strength:' + str(k))

    plt.savefig(str('weight_') + str(k) + 'raster_plot' + '.png')
    plt.close()

    f = spk.isi_profile(spike_trains, indices=[0, 1])
    x, y = f.get_plottable_data()

    #text_file.close()
    text_file = open(str('weight_') + str(index) + 'net_out.txt', 'w')

    plt.figure()
    plt.plot(x, np.abs(y), '--k', label="ISI-profile")
    print("ISI-distance: %.8f" % f.avrg())
    f = spk.spike_profile(spike_trains, indices=[0, 1])
    x, y = f.get_plottable_data()
    plt.plot(x, y, '-b', label="SPIKE-profile")
    #print("SPIKE-distance: %.8f" % f.avrg())
    string_to_write = str("ISI-distance:") + str(f.avrg()) + str("\n\n")
    plt.title(string_to_write)
    plt.xlabel('Time $(ms)$')
    plt.ylabel('ISI distance')
    plt.legend(loc="upper left")
    plt.savefig(str('weight_') + str(k) + 'ISI_distance_bivariate' + '.png')
    plt.close()
    text_file.write(string_to_write)

    #text_file.write("SPIKE-distance: %.8f" % f.avrg())
    #text_file.write("\n\n")

    plt.figure()
    f = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
    x, y = f.get_plottable_data()
    plt.plot(x, y, '--ok', label="SPIKE-SYNC profile")
    print(f, f.avrg())
    print("Average:" + str(f.avrg()))
    #print(len(f.avrg()),f.avrg())
    string_to_write = str("instantaneous synchrony:") + str(
        f.avrg()) + 'weight: ' + str(index)

    plt.title(string_to_write)
    plt.xlabel('Time $(ms)$')
    plt.ylabel('instantaneous synchrony')

    text_file.write(string_to_write)

    #text_file.write(list())

    f = spk.spike_profile(spike_trains[0], spike_trains[1])
    x, y = f.get_plottable_data()

    plt.plot(x, y, '-b', label="SPIKE-profile")
    plt.axis([0, 4000, -0.1, 1.1])
    plt.legend(loc="center right")
    plt.clf()
    plt.figure()
    plt.subplot(211)

    f = spk.spike_sync_profile(spike_trains)
    x, y = f.get_plottable_data()
    plt.plot(x, y, '-b', alpha=0.7, label="SPIKE-Sync profile")
    x1, y1 = f.get_plottable_data(averaging_window_size=50)
    plt.plot(x1, y1, '-k', lw=2.5, label="averaged SPIKE-Sync profile")
    plt.subplot(212)

    f_psth = spk.psth(spike_trains, bin_size=50.0)
    x, y = f_psth.get_plottable_data()
    plt.plot(x, y, '-k', alpha=1.0, label="PSTH")

    plt.savefig(str('weight_') + str(k) + 'multivariate_PSTH' + '.png')
    plt.close()
    plt.xlabel('Time $(ms)$')
    plt.ylabel('Spikes per bin')

    plt.clf()
    plt.figure()

    f_psth = spk.psth(spike_trains, bin_size=50.0)
    x, y = f_psth.get_plottable_data()
    plt.plot(x, y, '-k', alpha=1.0, label="PSTH")

    plt.savefig(str('weight_') + str(k) + 'exclusively_PSTH' + '.png')
    plt.close()

    plt.figure()
    isi_distance = spk.isi_distance_matrix(spike_trains)
    plt.imshow(isi_distance, interpolation='none')
    plt.title('Pairwise ISI distance, T=0-2000')
    plt.xlabel('post-synaptic neuron number')
    plt.ylabel('pre-synaptic neuron number')

    plt.title("ISI-distance")
    plt.savefig(str('weight_') + str(k) + 'ISI_distance' + '.png')
    plt.close()

    #plt.show()

    plt.figure()
    plt.clf()
    import seaborn as sns

    sns.set()
    sns.clustermap(isi_distance)  #,vmin=-,vmax=1);

    plt.savefig(str('weight_') + str(k) + 'cluster_isi_distance' + '.png')
    plt.close()

    plt.figure()
    spike_distance = spk.spike_distance_matrix(spike_trains,
                                               interval=(0, float(tstop)))

    import pickle
    with open('spike_distance_matrix.p', 'wb') as f:
        pickle.dump(spike_distance, f)

    plt.imshow(spike_distance, interpolation='none')
    plt.title("Pairwise SPIKE-distance, T=0-2000")
    plt.xlabel('post-synaptic neuron number')
    plt.ylabel('pre-synaptic neuron number')

    plt.savefig(str('weight_') + str(k) + 'spike_distance_matrix' + '.png')
    plt.close()
    plt.figure()
    plt.clf()
    sns.set()
    sns.clustermap(spike_distance)

    plt.savefig(str('weight_') + str(k) + 'cluster_spike_distance' + '.png')
    plt.close()

    plt.figure()
    spike_sync = spk.spike_sync_matrix(spike_trains,
                                       interval=(0, float(tstop)))
    plt.imshow(spike_sync, interpolation='none')
    plt.title('Pairwise Spike Synchony, T=0-2000')
    plt.xlabel('post-synaptic neuron number')
    plt.ylabel('pre-synaptic neuron number')

    import numpy
    a = numpy.asarray(spike_sync)
    numpy.savetxt("spike_sync_matrix.csv", a, delimiter=",")

    plt.figure()
    plt.clf()
    sns.clustermap(spike_sync)
    plt.savefig(
        str('weight_') + str(k) + 'cluster_spike_sync_distance' + '.png')
    plt.close()
Пример #10
0
fhs = pd.read_hdf(sys.argv[1], 'fhs')
labels = filter(lambda x: not pd.isnull(x), np.unique(fhs.priority))

first = min(fhs.index)
last = max(fhs.index)

t_start = first - pd.Timedelta(seconds=60*60+1)
t_end = last + pd.Timedelta(seconds=1)

edges = (0, (t_end - t_start).total_seconds())

spike_trains = [ df_to_spike_train(fhs.query('priority == @priority'), t_start, edges) for priority in labels ]

# Plot simple relationship within errors within an hourly window.
normal = pd.DataFrame(spk.spike_sync_matrix(spike_trains, max_tau=60*60), index=labels, columns=labels)
save_matrix_plot(normal, 'Priority errors, Norway', '{}/Norway_errors.png'.format(sys.argv[2]))

# First simple experiment: shift every priority one hour. Somewhat arbirtrarily.
shifted = pd.DataFrame(index=labels, columns=labels)
for P in shifted.index:
    P_shifted = df_to_spike_train(fhs[ fhs.priority == P ].shift(-1, pd.Timedelta(hours=1)), t_start, edges)
    shifted[P] = [ spk.spike_sync(P_shifted, s_t, max_tau=60*60) for s_t in spike_trains ]
shifted.index = [ '$\mathregular{'+x+'_{-1h}}$' for x in labels ]
save_matrix_plot(shifted, 'Priority errors shifted, Norway', '{}/Norway_errors_shifted.png'.format(sys.argv[2]))

P1 = fhs.query("priority == 'P1'")
P2 = fhs.query("priority == 'P2'")

# More complex experiment: find which time shift led to the best synchronous spike correlation.
results = pd.DataFrame(index = np.linspace(0,60,13,dtype=int), columns = np.unique(fhs.county), dtype=float)
Пример #11
0
"""


from __future__ import print_function

import matplotlib.pyplot as plt

import pyspike as spk

# first load the data, interval ending time = 4000, start=0 (default)
spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", 4000)

print(len(spike_trains))

plt.figure()
isi_distance = spk.isi_distance_matrix(spike_trains)
plt.imshow(isi_distance, interpolation='none')
plt.title("ISI-distance")

plt.figure()
spike_distance = spk.spike_distance_matrix(spike_trains, interval=(0, 1000))
plt.imshow(spike_distance, interpolation='none')
plt.title("SPIKE-distance, T=0-1000")

plt.figure()
spike_sync = spk.spike_sync_matrix(spike_trains, interval=(2000, 4000))
plt.imshow(spike_sync, interpolation='none')
plt.title("SPIKE-Sync, T=2000-4000")

plt.show()
Пример #12
0
for s in range(1):
    try:
        d = np.load('save/brunel_inp={}_g={}_seed_{}.npy'.format(inp, g,
                                                                 s)).item()

        # synchronicity
        sp = d['sp']

        spike_list = []
        for train in sp:
            spike_list.append(
                spk.SpikeTrain(list(sp[train]), (0, 50), is_sorted=False))

        sync_dist = spk.spike_sync_matrix(spike_list,
                                          indices=None,
                                          interval=(1, 20))
        spike_dist = spk.spike_distance_matrix(spike_list,
                                               indices=None,
                                               interval=(1, 20))
        for i in range(sync_dist.shape[0]):
            sync_dist[i, i] = 1
        utils.Weight2txt(
            1 - sync_dist,
            'txt/brunel_inp={}_g={}_seed_{}_sync.txt'.format(inp, g, s))
        utils.Weight2txt(
            spike_dist,
            'txt/brunel_inp={}_g={}_seed_{}_dist.txt'.format(inp, g, s))

        # Correlation
        corr = utils.Correlation_matrice(sp,