コード例 #1
0
def cluster_spike_trains(spike_trains, tstart, tend, interval=None, eps=0.01, measure='SPIKE_distance'):
    """Cluster a list of spike trains by SPIKE distance measure.

    tstart and tend are the start and end time of recording (used for creating SpikeTrain object from PySpike.

    interval is (t0, t1) the time interval during which to compute
    SPIKE_distance. If None, (tstart, tend) is used.

    eps: epsilon parameter for DBSCAN algorithm. This is the maximum
    distance between two samples for them to be considered in the same
    neighborhood.

    All spike trains should be nonempty.

    Return (cluster_info, spike_distance_matrix)
    where
    cluster_info: the result of applying DBSCAN. This gives 
    spike_distance_matrix: an NxN distance matrix for N spike trains.

    """
    if interval is None:
        interval = (tstart, tend)
    st_list = [spk.SpikeTrain(st, (tstart, tend)) for st in spike_trains]
    print('Interval:', interval)
    print('tstart: {} tend: {}'.format(tstart, tend))
    print('Number of spike trains:', len(st_list))
    
    dist = spk.spike_distance_matrix(st_list, interval=interval)
    clus = skc.DBSCAN(eps=eps, metric='precomputed').fit(dist)
    return clus, dist
コード例 #2
0
def test_regression_15_spike():
    # load spike trains
    spike_trains = spk.load_spike_trains_from_txt(TEST_DATA, edges=[0, 4000])

    N = len(spike_trains)

    dist_mat = spk.spike_distance_matrix(spike_trains)
    assert_equal(dist_mat.shape, (N, N))

    ind = np.arange(N // 2)
    dist_mat = spk.spike_distance_matrix(spike_trains, ind)
    assert_equal(dist_mat.shape, (N // 2, N // 2))

    ind = np.arange(N // 2, N)
    dist_mat = spk.spike_distance_matrix(spike_trains, ind)
    assert_equal(dist_mat.shape, (N // 2, N // 2))
コード例 #3
0
def multiprocessFunctions(analysis):

    if analysis == 'isi_distance':
        #print ('isi distance')
        print("ISI distance calcualtion started!!!")
        plt.figure()
        isi_distance = spk.isi_distance_matrix(spike_trains, interval=None)
        isi_distance[isi_distance > 1] = 1
        print(isi_distance)
        np.save("isi_distance", isi_distance)
        np.savetxt("isi_distance.csv", isi_distance, delimiter=",")
        isi_distance = spk.isi_distance_matrix(spike_trains,
                                               interval=(0, 5000))
        isi_distance[isi_distance > 1] = 1
        print(isi_distance)
        np.save("isi_distance", isi_distance)
        plt.imshow(isi_distance, clim=(0.0, 1.0), interpolation='nearest')
        plt.colorbar()
        plt.title("ISI-distance(0-100ms)")
        print('----------Took %s seconds for isi distance-----' %
              (time.time() - start_time))
        plt.show()
    elif analysis == 'spike_distance':
        #print ('spike distance')
        plt.figure()
        spike_distance = spk.spike_distance_matrix(spike_trains,
                                                   interval=(0, 100))
        spike_distance[spike_distance > 1] = 1
        np.save("spike_distance", spike_distance)
        plt.imshow(spike_distance, clim=(0.0, 1.0), interpolation='nearest')
        plt.colorbar()
        plt.title("SPIKE-distance(0-100ms)")
        print('----------Took %s seconds for spike distance-----' %
              (time.time() - start_time))
        plt.show()
    elif analysis == 'spike_sync':
        #print ('spike sync')
        plt.figure()
        spike_sync = spk.spike_sync_matrix(spike_trains, interval=(3300, 3500))
        plt.imshow(spike_sync, interpolation='none')
        plt.colorbar()
        plt.title("SPIKE-Sync")
        print('----------Took %s seconds for spike_sync-----' %
              (time.time() - start_time))
        plt.show()
    else:
        pass
コード例 #4
0
def create_distance_matrix(spikes_neuron, name_Area):
    # function to create the distance matrix of the spikes produced by a neuron when presented different stimuli
    # parameters:
    # spikes_neuron: list of spikes produced by the neuron
    # name_Area: name of the brain region where the neuron is located
    list_spikes = []
    max_interval = 4000.0

    nb_flatrip = len(
        spikes_neuron[0]
        ["spikes"])  # number of synthetic stimuli presented to the neuron
    nb_conspecific = len(
        spikes_neuron[1]
        ["spikes"])  # number of authentic stimuli presented to the neuron
    min_nb_stim = min(nb_conspecific, nb_flatrip)

    spike_trains_flatrip = spikes_neuron[0]["spikes"]
    # iterate through spikes for flatrip (synthetic stimuli)
    # and add them to the list of spikes for the distance matrix
    for i in range(min_nb_stim):
        for spike in spike_trains_flatrip[i]["seriesSpikes"]:
            new_spike_train = SpikeTrain(spike, [0.0, max_interval])
            list_spikes.append(new_spike_train)

    spike_trains_conspecific = spikes_neuron[1]["spikes"]
    # iterate through spikes for conspecific (authentic stimuli)
    # and add them to the list of spikes for the distance matrix
    for i in range(min_nb_stim):
        for spike in spike_trains_conspecific[i]["seriesSpikes"]:
            new_spike_train = SpikeTrain(spike, [0.0, max_interval])
            list_spikes.append(new_spike_train)

    plt.figure()
    spike_distance = spk.spike_distance_matrix(
        list_spikes,
        interval=(0, max_interval))  # compute distances between all the trials
    plt.imshow(spike_distance, interpolation='none')
    plt.colorbar()
    plt.title("SPIKE-distance of " + str(min_nb_stim * 2) +
              " stimuli presented to\nneurons in the " + name_Area +
              " brain region")
    print(spike_distance)
    plt.show()
コード例 #5
0
ファイル: sync.py プロジェクト: joewgraham/EEE_network
def get_matrix(select='subset',
               min_spike_number=0,
               save=None,
               analysis=['SPIKE-Sync'],
               network=[0]):
    import pyspike

    load_data(network)

    getmat = {}

    empty_dict_array = {}
    no_empty_dict_array = {}

    spkts = {}
    spkinds = {}
    spktsRange = {}
    spkt_train = {}
    spike_sync = {}

    for f, p in enumerate(data_files):
        if f in network:
            spkts[f] = d[p]['simData']['spkt']  #list
            spkinds[f] = d[p]['simData']['spkid']  #list

            print 'Starting analysis of spike times per ' + str(
                select) + ': ' + str(p)

            for t, y in enumerate(timeRange):

                spktsRange = [
                    spkt for spkt in spkts[f]
                    if timeRange[t][0] <= spkt <= timeRange[t][1]
                ]

                spkt_train[str(f) + str(t)] = []

                if select == 'subset':
                    print 'Time Range: ' + str(y)

                    empty_array = np.zeros(
                        ((len(net_labels) * 2), (len(net_labels) * 2)))
                    no_empty_array = np.zeros(
                        ((len(net_labels) * 2), (len(net_labels) * 2)))
                    array_ii = np.zeros(
                        ((len(net_labels) * 2), (len(net_labels) * 2)))

                    empty_gids = []
                    gids_included = []

                    for k, v in enumerate(gids):
                        train = []
                        for i, gid in enumerate(v):
                            for spkind, spkt in zip(spkinds[f], spkts[f]):
                                if (spkind == gid and spkt in spktsRange):
                                    train.append(spkt)

                        spkt_train[str(f) + str(t)].append(
                            pyspike.SpikeTrain(train, timeRange[t]))

                        if len(train) < min_spike_number:
                            empty_gids.append(k)
                        else:
                            gids_included.append(k)

                    for i in range(len(spkt_train[str(f) + str(t)])):
                        if i in gids_included:
                            for k, v in enumerate(gids_included):
                                no_empty_array[i][v] = 1.0

                    for l in range(len(array_ii)):
                        array_ii[l][l] = 1.0

                    no_empty_dict_array[str(f) + str(t)] = no_empty_array

                elif select == 'cell':

                    print 'Time Range: ' + str(y)

                    empty_array = np.zeros(
                        ((len(net_labels) * 80), (len(net_labels) * 80)))
                    no_empty_array = np.zeros(
                        ((len(net_labels) * 80), (len(net_labels) * 80)))

                    empty_gids = []
                    spkmat2 = []
                    gids_included = []
                    #sync = np.zeros(((len(net_labels)*80),(len(net_labels)*80)))

                    for ii, subset in enumerate(gids):
                        spkmat = [
                            pyspike.SpikeTrain([
                                spkt
                                for spkind, spkt in zip(spkinds[f], spkts[f])
                                if (spkind == gid and spkt in spktsRange)
                            ], timeRange[t]) for gid in set(subset)
                        ]
                        spkt_train[str(f) + str(t)].extend(spkmat)

                        for gid in set(subset):
                            list_spkt = [
                                spkt
                                for spkind, spkt in zip(spkinds[f], spkts[f])
                                if (spkind == gid and spkt in spktsRange)
                            ]

                            if len(list_spkt) < min_spike_number:
                                empty_gids.append(gid)
                            else:
                                spkmat2.append(
                                    pyspike.SpikeTrain(list_spkt,
                                                       timeRange[t]))
                                gids_included.append(gid)
                        pos_labels.append(len(gids_included))

                    #print gids_included
                    empty_gids[:] = [x - 200 for x in empty_gids]
                    gids_included[:] = [x - 200 for x in gids_included]
                    #print empty_gids
                    for i in range(len(spkt_train[str(f) + str(t)])):
                        if i in empty_gids:
                            for k, v in enumerate(empty_gids):
                                empty_array[i][v] = 1.0

                    for i in range(len(spkt_train[str(f) + str(t)])):
                        if i in gids_included:
                            for k, v in enumerate(gids_included):
                                no_empty_array[i][v] = 1.0

                    #print empty_array
                    empty_dict_array[str(f) + str(t)] = empty_array
                    no_empty_dict_array[str(f) + str(t)] = no_empty_array
                #print spkt_train
                for l, mat in enumerate(mats):
                    #spike_sync
                    if (mat == 'ISI-distance' and mat in analysis):
                        print str(mat) + ", number of trains: " + str(
                            len(spkt_train[str(f) + str(t)]))
                        isi_distance = pyspike.isi_distance_matrix(
                            spkt_train[str(f) + str(t)])
                        getmat[str(f) + str(t) + str(l)] = isi_distance

                    elif (mat in analysis and mat == 'SPIKE-distance'):
                        print str(mat) + ", number of trains: " + str(
                            len(spkt_train[str(f) + str(t)]))
                        spike_distance = pyspike.spike_distance_matrix(
                            spkt_train[str(f) + str(t)])
                        getmat[str(f) + str(t) + str(l)] = spike_distance

                    elif (mat in analysis and mat == 'SPIKE-Sync'):
                        print str(mat) + ", number of trains: " + str(
                            len(spkt_train[str(f) + str(t)]))
                        spike_sync[str(f) +
                                   str(t)] = pyspike.spike_sync_matrix(
                                       spkt_train[str(f) + str(t)])
                        #if select == 'subset':
                        getmat[str(f) + str(t) + str(l)] = (
                            spike_sync[str(f) + str(t)] *
                            no_empty_dict_array[str(f) + str(t)]) + array_ii
                        #elif select == 'cell':
                        #getmat[str(f)+str(t)+str(l)] = spike_sync[str(f)+str(t)] * no_empty_dict_array[str(f)+str(t)]

                empty_array = np.zeros(
                    ((len(net_labels) * 80), (len(net_labels) * 80)))
        else:
            pass

    if save == True:
        with open(str(path) + 'data1.pkl', 'wb') as output:
            pickle.dump(getmat, output)

    return getmat
    print 'finished getting data for matrix plotting'
コード例 #6
0
import pyspike as spk
import numpy as np

# first load the data, interval ending time = 4000, start=0 (default)
spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", 4000)

print(len(spike_trains))

plt.figure()
isi_distance = spk.isi_distance_matrix(spike_trains)
plt.imshow(isi_distance, interpolation='none')
plt.title("ISI-distance")
plt.savefig("fig/ISI-distance")
plt.close()

plt.figure()
spike_distance = spk.spike_distance_matrix(spike_trains, interval=(0, 1000))
plt.imshow(spike_distance, interpolation='none')
plt.title("SPIKE-distance, T=0-1000")
plt.savefig("fig/SPIKE-distance,T=0-1000")
plt.close()

plt.figure()
spike_sync = spk.spike_sync_matrix(spike_trains, interval=(2000, 4000))
plt.imshow(spike_sync, interpolation='none')
plt.title("SPIKE-Sync, T=2000-4000")
plt.savefig("fig/SPIKE-Sync,T=2000-4000")
plt.close()

# print np.mean(spike_sync), np.mean(spike_distance), np.mean(isi_distance)
コード例 #7
0
def iter_plot0(md):
    import seaborn as sns
    import pickle
    with open('cell_indexs.p', 'rb') as f:
        returned_list = pickle.load(f)
    index_exc = returned_list[0]
    index_inh = returned_list[1]
    index, mdf1 = md
    #wgf = {0.025:None,0.05:None,0.125:None,0.25:None,0.3:None,0.4:None,0.5:None,1.0:None,1.5:None,2.0:None,2.5:None,3.0:None}
    wgf = {
        0.0025: None,
        0.0125: None,
        0.025: None,
        0.05: None,
        0.125: None,
        0.25: None,
        0.3: None,
        0.4: None,
        0.5: None,
        1.0: None,
        1.5: None,
        2.0: None,
        2.5: None,
        3.0: None
    }

    weight_gain_factors = {k: v for k, v in enumerate(wgf.keys())}
    print(len(weight_gain_factors))
    print(weight_gain_factors.keys())
    #weight_gain_factors = {0:0.5,1:1.0,2:1.5,3:2.0,4:2.5,5:3}
    #weight_gain_factors = {:None,1.0:None,1.5:None,2.0:None,2.5:None}

    k = weight_gain_factors[index]
    #print(len(mdf1.segments),'length of block')

    ass = mdf1.analogsignals[0]

    time_points = ass.times
    avg = np.mean(ass, axis=0)  # Average over signals of Segment
    #maxx = np.max(ass, axis=0)  # Average over signals of Segment
    std = np.std(ass, axis=0)  # Average over signals of Segment
    #avg_minus =
    plt.figure()
    plt.plot([i for i in range(0, len(avg))], avg)
    plt.plot([i for i in range(0, len(std))], std)

    plt.title("Mean and Standard Dev of $V_{m}$ amplitude per neuron ")
    plt.xlabel('time $(ms)$')
    plt.xlabel('Voltage $(mV)$')

    plt.savefig(str(index) + 'prs.png')
    vm_spiking = []
    vm_not_spiking = []
    spike_trains = []
    binary_trains = []
    max_spikes = 0

    vms = np.array(mdf1.analogsignals[0].as_array().T)
    #print(data)
    #for i,vm in enumerate(data):

    cnt = 0
    for spiketrain in mdf1.spiketrains:
        #spiketrain = mdf1.spiketrains[index]
        y = np.ones_like(spiketrain) * spiketrain.annotations['source_id']
        #import sklearn
        #sklearn.decomposition.NMF(y)
        # argument edges is the time interval you want to be considered.
        pspikes = pyspike.SpikeTrain(spiketrain, edges=(0, len(ass)))
        spike_trains.append(pspikes)
        if len(spiketrain) > max_spikes:
            max_spikes = len(spiketrain)

        if np.max(ass[spiketrain.annotations['source_id']]) > 0.0:
            vm_spiking.append(vms[spiketrain.annotations['source_id']])
        else:
            vm_not_spiking.append(vms[spiketrain.annotations['source_id']])
        cnt += 1

    for spiketrain in mdf1.spiketrains:
        x = conv.BinnedSpikeTrain(spiketrain,
                                  binsize=1 * pq.ms,
                                  t_start=0 * pq.s)
        binary_trains.append(x)
    end_floor = np.floor(float(mdf1.t_stop))
    dt = float(mdf1.t_stop) % end_floor
    mdf1.t_start
    #v = mdf1.take_slice_of_analogsignalarray_by_unit()
    t_axis = np.arange(float(mdf1.t_start), float(mdf1.t_stop), dt)
    plt.figure()
    plt.clf()

    plt.figure()
    plt.clf()
    cleaned = []
    data = np.array(mdf1.analogsignals[0].as_array().T)
    #print(data)
    for i, vm in enumerate(data):
        if np.max(vm) > 900.0 or np.min(vm) < -900.0:
            pass
        else:
            plt.plot(ass.times, vm)  #,label='neuron identifier '+str(i)))
            cleaned.append(vm)
            #vm = s#.as_array()[:,i]

    assert len(cleaned) < len(ass)

    print(len(cleaned))
    plt.title('neuron $V_{m}$')
    #plt.legend(loc="upper left")
    plt.savefig(str('weight_') + str(k) + 'analogsignals' + '.png')
    plt.xlabel('Time $(ms)$')
    plt.ylabel('Voltage $(mV)$')

    plt.close()

    #pass

    plt.figure()
    plt.clf()
    plt.title('Single Neuron $V_{m}$ trace')
    plt.plot(ass.times[0:int(len(ass.times) / 10)],
             vm_not_spiking[index_exc[0]][0:int(len(ass.times) / 10)])
    plt.xlabel('$ms$')
    plt.ylabel('$mV$')
    plt.xlabel('Time $(ms)$')
    plt.ylabel('Voltage $(mV)$')
    plt.savefig(str('weight_') + str(k) + 'eespecific_analogsignals' + '.png')
    plt.close()

    plt.figure()
    plt.clf()
    plt.title('Single Neuron $V_{m}$ trace')
    plt.plot(ass.times[0:int(len(ass.times) / 10)],
             vm_not_spiking[index_inh[0]][0:int(len(ass.times) / 10)])
    plt.xlabel('$ms$')
    plt.ylabel('$mV$')

    plt.savefig(str('weight_') + str(k) + 'inhibitory_analogsignals' + '.png')
    plt.close()

    cvs = [0 for i in range(0, len(spike_trains))]
    cvsd = {}
    cvs = []
    cvsi = []
    rates = []  # firing rates per cell. in spikes a second.
    for i, j in enumerate(spike_trains):
        rates.append(float(len(j) / 2.0))
        cva = cv(j)
        if np.isnan(cva) or cva == 0:
            pass
            #cvs[i] = 0
            #cvsd[i] = 0
        else:
            pass
            #cvs[i] = cva
            #cvsd[i] = cva
        cvs.append(cva)
    #import pickle
    #with open(str('weight_')+str(k)+'coefficients_of_variation.p','wb') as f:
    #   pickle.dump([cvs,cvsd],f)
    import numpy
    a = numpy.asarray(cvs)
    numpy.savetxt('pickles/' + str('weight_') + str(k) +
                  'coefficients_of_variation.csv',
                  a,
                  delimiter=",")

    import numpy
    a = numpy.asarray(rates)
    numpy.savetxt('pickles/' + str('weight_') + str(k) + 'firing_of_rate.csv',
                  a,
                  delimiter=",")

    cvs = [i for i in cvs if i != 0]
    cells = [i for i in range(0, len(cvs))]

    plt.clf()
    fig, axes = plt.subplots()
    axes.set_title('Coefficient of Variation Versus Neuron')
    axes.set_xlabel('Neuron number')
    axes.set_ylabel('CV estimate')
    mcv = np.mean(cvs)
    #plt.scatter(cells,cvs)
    cvs = np.array(cvs)
    plt.scatter(index_inh, cvs[index_inh], label="inhibitory cells")
    plt.scatter(index_exc, cvs[index_exc], label="excitatory cells")
    plt.legend(loc="upper left")

    fig.tight_layout()
    plt.savefig(str('weight_') + str(k) + 'cvs_mean_' + str(mcv) + '.png')
    plt.close()

    plt.clf()
    #frequencies, power = elephant.spectral.welch_psd(ass)
    #mfreq = frequencies[np.where(power==np.max(power))[0][0]]
    #fig, axes = plt.subplots()
    axes.set_title('Firing Rate Versus Neuron Number at mean f=' +
                   str(np.mean(rates)) + str('(Spike Per Second)'))
    axes.set_xlabel('Neuron number')
    axes.set_ylabel('Spikes per second')
    rates = np.array(rates)
    plt.scatter(index_inh, rates[index_inh], label="inhibitory cells")
    plt.scatter(index_exc, rates[index_exc], label="excitatory cells")
    plt.legend(loc="upper left")
    fig.tight_layout()
    plt.savefig(str('firing_rates_per_cell_') + str(k) + str(mcv) + '.png')
    plt.close()
    '''
    import pandas as pd
    d = {'coefficent_of_variation': cvs, 'cells': cells}
    df = pd.DataFrame(data=d)

    ax = sns.regplot(x='cells', y='coefficent_of_variation', data=df)#, fit_reg=False)
    plt.savefig(str('weight_')+str(k)+'cvs_regexp_'+str(mcv)+'.png');
    plt.close()
    '''

    spike_trains = []
    ass = mdf1.analogsignals[0]
    tstop = mdf1.t_stop
    np.max(ass.times) == mdf1.t_stop
    #assert tstop == 2000
    tstop = 2000
    vm_spiking = []

    for spiketrain in mdf1.spiketrains:
        vm_spiking.append(
            mdf1.analogsignals[0][spiketrain.annotations['source_id']])
        y = np.ones_like(spiketrain) * spiketrain.annotations['source_id']

        # argument edges is the time interval you want to be considered.
        pspikes = pyspike.SpikeTrain(spiketrain, edges=(0, tstop))
        spike_trains.append(pspikes)

    # plot the spike times

    plt.clf()
    for (i, spike_train) in enumerate(spike_trains):
        plt.scatter(spike_train, i * np.ones_like(spike_train), marker='.')
    plt.xlabel('Time (ms)')
    plt.ylabel('Cell identifier')
    plt.title('Raster Plot for weight strength:' + str(k))

    plt.savefig(str('weight_') + str(k) + 'raster_plot' + '.png')
    plt.close()

    f = spk.isi_profile(spike_trains, indices=[0, 1])
    x, y = f.get_plottable_data()

    #text_file.close()
    text_file = open(str('weight_') + str(index) + 'net_out.txt', 'w')

    plt.figure()
    plt.plot(x, np.abs(y), '--k', label="ISI-profile")
    print("ISI-distance: %.8f" % f.avrg())
    f = spk.spike_profile(spike_trains, indices=[0, 1])
    x, y = f.get_plottable_data()
    plt.plot(x, y, '-b', label="SPIKE-profile")
    #print("SPIKE-distance: %.8f" % f.avrg())
    string_to_write = str("ISI-distance:") + str(f.avrg()) + str("\n\n")
    plt.title(string_to_write)
    plt.xlabel('Time $(ms)$')
    plt.ylabel('ISI distance')
    plt.legend(loc="upper left")
    plt.savefig(str('weight_') + str(k) + 'ISI_distance_bivariate' + '.png')
    plt.close()
    text_file.write(string_to_write)

    #text_file.write("SPIKE-distance: %.8f" % f.avrg())
    #text_file.write("\n\n")

    plt.figure()
    f = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
    x, y = f.get_plottable_data()
    plt.plot(x, y, '--ok', label="SPIKE-SYNC profile")
    print(f, f.avrg())
    print("Average:" + str(f.avrg()))
    #print(len(f.avrg()),f.avrg())
    string_to_write = str("instantaneous synchrony:") + str(
        f.avrg()) + 'weight: ' + str(index)

    plt.title(string_to_write)
    plt.xlabel('Time $(ms)$')
    plt.ylabel('instantaneous synchrony')

    text_file.write(string_to_write)

    #text_file.write(list())

    f = spk.spike_profile(spike_trains[0], spike_trains[1])
    x, y = f.get_plottable_data()

    plt.plot(x, y, '-b', label="SPIKE-profile")
    plt.axis([0, 4000, -0.1, 1.1])
    plt.legend(loc="center right")
    plt.clf()
    plt.figure()
    plt.subplot(211)

    f = spk.spike_sync_profile(spike_trains)
    x, y = f.get_plottable_data()
    plt.plot(x, y, '-b', alpha=0.7, label="SPIKE-Sync profile")
    x1, y1 = f.get_plottable_data(averaging_window_size=50)
    plt.plot(x1, y1, '-k', lw=2.5, label="averaged SPIKE-Sync profile")
    plt.subplot(212)

    f_psth = spk.psth(spike_trains, bin_size=50.0)
    x, y = f_psth.get_plottable_data()
    plt.plot(x, y, '-k', alpha=1.0, label="PSTH")

    plt.savefig(str('weight_') + str(k) + 'multivariate_PSTH' + '.png')
    plt.close()
    plt.xlabel('Time $(ms)$')
    plt.ylabel('Spikes per bin')

    plt.clf()
    plt.figure()

    f_psth = spk.psth(spike_trains, bin_size=50.0)
    x, y = f_psth.get_plottable_data()
    plt.plot(x, y, '-k', alpha=1.0, label="PSTH")

    plt.savefig(str('weight_') + str(k) + 'exclusively_PSTH' + '.png')
    plt.close()

    plt.figure()
    isi_distance = spk.isi_distance_matrix(spike_trains)
    plt.imshow(isi_distance, interpolation='none')
    plt.title('Pairwise ISI distance, T=0-2000')
    plt.xlabel('post-synaptic neuron number')
    plt.ylabel('pre-synaptic neuron number')

    plt.title("ISI-distance")
    plt.savefig(str('weight_') + str(k) + 'ISI_distance' + '.png')
    plt.close()

    #plt.show()

    plt.figure()
    plt.clf()
    import seaborn as sns

    sns.set()
    sns.clustermap(isi_distance)  #,vmin=-,vmax=1);

    plt.savefig(str('weight_') + str(k) + 'cluster_isi_distance' + '.png')
    plt.close()

    plt.figure()
    spike_distance = spk.spike_distance_matrix(spike_trains,
                                               interval=(0, float(tstop)))

    import pickle
    with open('spike_distance_matrix.p', 'wb') as f:
        pickle.dump(spike_distance, f)

    plt.imshow(spike_distance, interpolation='none')
    plt.title("Pairwise SPIKE-distance, T=0-2000")
    plt.xlabel('post-synaptic neuron number')
    plt.ylabel('pre-synaptic neuron number')

    plt.savefig(str('weight_') + str(k) + 'spike_distance_matrix' + '.png')
    plt.close()
    plt.figure()
    plt.clf()
    sns.set()
    sns.clustermap(spike_distance)

    plt.savefig(str('weight_') + str(k) + 'cluster_spike_distance' + '.png')
    plt.close()

    plt.figure()
    spike_sync = spk.spike_sync_matrix(spike_trains,
                                       interval=(0, float(tstop)))
    plt.imshow(spike_sync, interpolation='none')
    plt.title('Pairwise Spike Synchony, T=0-2000')
    plt.xlabel('post-synaptic neuron number')
    plt.ylabel('pre-synaptic neuron number')

    import numpy
    a = numpy.asarray(spike_sync)
    numpy.savetxt("spike_sync_matrix.csv", a, delimiter=",")

    plt.figure()
    plt.clf()
    sns.clustermap(spike_sync)
    plt.savefig(
        str('weight_') + str(k) + 'cluster_spike_sync_distance' + '.png')
    plt.close()
コード例 #8
0
"""


from __future__ import print_function

import matplotlib.pyplot as plt

import pyspike as spk

# first load the data, interval ending time = 4000, start=0 (default)
spike_trains = spk.load_spike_trains_from_txt("PySpike_testdata.txt", 4000)

print(len(spike_trains))

plt.figure()
isi_distance = spk.isi_distance_matrix(spike_trains)
plt.imshow(isi_distance, interpolation='none')
plt.title("ISI-distance")

plt.figure()
spike_distance = spk.spike_distance_matrix(spike_trains, interval=(0, 1000))
plt.imshow(spike_distance, interpolation='none')
plt.title("SPIKE-distance, T=0-1000")

plt.figure()
spike_sync = spk.spike_sync_matrix(spike_trains, interval=(2000, 4000))
plt.imshow(spike_sync, interpolation='none')
plt.title("SPIKE-Sync, T=2000-4000")

plt.show()
コード例 #9
0
        d = np.load('save/brunel_inp={}_g={}_seed_{}.npy'.format(inp, g,
                                                                 s)).item()

        # synchronicity
        sp = d['sp']

        spike_list = []
        for train in sp:
            spike_list.append(
                spk.SpikeTrain(list(sp[train]), (0, 50), is_sorted=False))

        sync_dist = spk.spike_sync_matrix(spike_list,
                                          indices=None,
                                          interval=(1, 20))
        spike_dist = spk.spike_distance_matrix(spike_list,
                                               indices=None,
                                               interval=(1, 20))
        for i in range(sync_dist.shape[0]):
            sync_dist[i, i] = 1
        utils.Weight2txt(
            1 - sync_dist,
            'txt/brunel_inp={}_g={}_seed_{}_sync.txt'.format(inp, g, s))
        utils.Weight2txt(
            spike_dist,
            'txt/brunel_inp={}_g={}_seed_{}_dist.txt'.format(inp, g, s))

        # Correlation
        corr = utils.Correlation_matrice(sp,
                                         interval=(1, 20),
                                         bin_by_sec=500,
                                         tau=1)