Пример #1
0
 def generate_prediction(self, model):
     model.inject_square_current(observation['current'])
     spike_train = model.get_spike_train()
     if len(spike_train) >= 3:
         cv(spike_train)*pq.dimensionless
         #isis = isi(spike_train)
         #cv_old = np.std(isis) / np.mean(isis)
     else:
         cv = None
     return {'cv':cv}
Пример #2
0
 def generate_prediction(self, model):
     model.inject_square_current(observation['current'])
     spike_train = model.get_spike_train()
     if len(spike_train) >= 3:
         cv(spike_train) * pq.dimensionless
         #isis = isi(spike_train)
         #cv_old = np.std(isis) / np.mean(isis)
     else:
         cv = None
     return {'cv': cv}
Пример #3
0
 def generate_prediction(self, model, **kwargs):
     isi_var = self.get_prediction(model)
     if isi_var is None:
         if kwargs:
             self.params.update(kwargs)
         if 'variation_measure' not in self.params:
             self.params.update(variation_measure='lv')
         spiketrains = model.produce_spiketrains(**self.params)
         isi_list = [isi(st) for st in spiketrains]
         if self.params['variation_measure'] == 'lv':
             isi_var = []
             for intervals in isi_list:
                 if intervals.size > 2:
                     isi_var.append(lv(intervals))
         elif self.params['variation_measure'] == 'cv':
             isi_var = []
             for intervals in isi_list:
                 if intervals.size > 2:
                     isi_var.append(cv(intervals))
         elif self.params['variation_measure'] == 'isi':
             isi_var = [
                 float(item) for sublist in isi_list for item in sublist
             ]
         else:
             raise ValueError('Variation measure not known.')
         self.set_prediction(model, isi_var)
     return isi_var
Пример #4
0
def spike_statistics(idx, row):
    from elephant.statistics import mean_firing_rate, cv, isi
    from elephant.conversion import BinnedSpikeTrain
    from elephant.spike_train_correlation import corrcoef

    print(idx)
    results = {}

    # read spike trains from file
    io = get_io(row["output_file"])
    data_block = io.read()[0]
    spiketrains = data_block.segments[0].spiketrains

    # calculate mean firing rate
    results["spike_counts"] = sum(st.size for st in spiketrains)
    rates = [mean_firing_rate(st) for st in spiketrains]
    results["firing_rate"] = Quantity(rates, units=rates[0].units).rescale("1/s").mean()

    # calculate coefficient of variation of the inter-spike interval
    cvs = [cv(isi(st)) for st in spiketrains if st.size > 1]
    if len(cvs) > 0:
        results["cv_isi"] = sum(cvs)/len(cvs)
    else:
        results["cv_isi"] = 0

    # calculate global cross-correlation
    #cc_matrix = corrcoef(BinnedSpikeTrain(spiketrains, binsize=5*ms))
    #results["cc_min"] = cc_matrix.min()
    #results["cc_max"] = cc_matrix.max()
    #results["cc_mean"] = cc_matrix.mean()

    io.close()
    return results
Пример #5
0
 def generate_prediction(self, model):
     model.inject_square_current(self.run_params['current'])
     spike_train = model.get_spike_train()
     if len(spike_train) >= 3:
         value = cv(spike_train)*pq.dimensionless
     else:
         value = None
     return {'cv': value}
Пример #6
0
 def generate_prediction(self, model=None):
     st = model.get_spike_train()
     if len(st) >= 3:
         value = abs(cv(st))*pq.dimensionless
     else:
         value = None
     prediction = {'cv': value}
     return prediction
Пример #7
0
 def generate_prediction(self, model = None):
     st = model.get_spike_train()
     #prediction = abs(cv(st))
     if len(st) >= 3:
         value = abs(cv(st))*pq.dimensionless
     else:
         value = None
     prediction = {'cv':value}
     return prediction
Пример #8
0
def calc_cv_isi_hist(spike_times, spike_ids, num, duration, bin_x=None):
    # Loop through neurons
    cv_isi = []
    for n in range(num):
        # Get mask of spikes from this neuron and use to extract their times
        mask = (spike_ids == n)
        neuron_spike_times = spike_times[mask]

        # If this neuron spiked more than once i.e. it is possible to calculate ISI!
        if len(neuron_spike_times) > 1:
            cv_isi.append(cv(isi(neuron_spike_times)))

    return calc_histogram(cv_isi, 0.04, bin_x)
Пример #9
0
def calculate_neuron_cov(col, num_mins_per_bin, total_time):
    num_bins = np.int(total_time / num_mins_per_bin)
    col_bins = np.array_split(col, num_bins)
    cv_isis = pd.Series(np.zeros(num_bins))

    for ind, col_bin in enumerate(col_bins):
        spike_times = pd.to_numeric(col_bin[col_bin.notnull()].index.values)
        try:
            spike_train = SpikeTrain(times=spike_times,
                                     t_stop=spike_times[-1],
                                     units=ns)
            plt.tight_layout()
            cv_isi = cv(isi(spike_train))
        except IndexError:
            cv_isi = np.nan
        cv_isis[ind] = cv_isi

    return cv_isis
Пример #10
0
# In[6]:

dir(ass)

# In[40]:

#dir(spike_trains)
#len(spike_trains)
from elephant.statistics import cv
import matplotlib.pyplot as plt

hist_cv = []
import numpy as np
for i in spike_trains:
    cva = cv(i)
    if np.isnan(cva):
        hist_cv.append(0)
    else:
        hist_cv.append(cva)
# print(cv(i))
x_axis = [i for i in range(0, len(hist_cv))]
plt.bar(x_axis, hist_cv)
plt.show()
plt.hist(hist_cv)

#dir(elephant)
#plt.

# In[46]:
Пример #11
0
for i, spiketrain in enumerate(snglnrn_spikes_neo):
		t = spiketrain.rescale(q.ms)
		plt.plot(t, i * np.ones_like(t), 'k.', markersize=2)
plt.axis('tight')
plt.xlim(0, runtime)
plt.xlabel('Time (ms)', fontsize=16)
plt.ylabel('Spike Train Index', fontsize=16)
plt.gca().tick_params(axis='both', which='major', labelsize=14)
plt.savefig('decorr_rasterplot_w{}_k{}.png'.format(w, numInhPerNeuron))
'''

# calculate ISIs and coefficient of variation (CV)

isi_list  = [np.nanmean(isi(spiketrain))       for spiketrain in snglnrn_spikes_neo]
rate_list = [(np.size(spiketrain) / runtime * 1e3) for spiketrain in snglnrn_spikes]
cv_list   = [cv(isi(spiketrain))               for spiketrain in snglnrn_spikes_neo]


train = BinnedSpikeTrain(snglnrn_spikes_neo, binsize=5 * q.ms)
cc_matrix = corrcoef(train, binary=False)

# Matrix zwischenspeichern 
#np.savetxt('cc_matrix.txt', cc_matrix)
#print(np.shape(cc_matrix)) # (192, 192)
#print(cc_matrix)
#plt.plot(cc_matrix)

# Hauptdiagonale entfernen
for i in range(192):
	cc_matrix[i][i] = np.nan 
Пример #12
0
 def test_cv_isi_regular_array_is_zero(self):
     st = self.test_array_regular
     targ = 0.0
     res = es.cv(es.isi(st))
     self.assertEqual(res, targ)
Пример #13
0
 def test_cv_isi_regular_spiketrain_is_zero(self):
     st = neo.SpikeTrain(self.test_array_regular,  units='ms', t_stop=10.0)
     targ = 0.0
     res = es.cv(es.isi(st))
     self.assertEqual(res, targ)
Пример #14
0
 def test_cv_isi_regular_spiketrain_is_zero(self):
     st = neo.SpikeTrain(self.test_array_regular,  units='ms', t_stop=10.0)
     targ = 0.0
     res = es.cv(es.isi(st))
     self.assertEqual(res, targ)
Пример #15
0
mean_rate = np.divide(rate, (max_ms - min_ms) / 1000.0, dtype=float)
print("Mean firing rate: %fHz" % np.average(mean_rate))

# Sort spikes by id
neuron_spikes = spikes.groupby("id")

# Loop through neurons
cv_isi = []
for n in range(num_excitatory):
    try:
        # Get this neuron's spike times
        neuron_spike_times = neuron_spikes.get_group(n)["time"].values

        # If this neuron spiked more than once i.e. it is possible to calculate ISI!
        if len(neuron_spike_times) > 1:
            cv_isi.append(cv(isi(neuron_spike_times)))
    except KeyError:
        pass

print("Mean CV ISI: %f" % np.average(cv_isi))

# Pick 1000 neurons
binned_spike_times = None
for i, n in enumerate(np.random.choice(num_excitatory, 1000, replace=False)):
    # Get this neuron's spike times
    neuron_spike_times = neuron_spikes.get_group(n)["time"].values

    # Bin spike times
    neuron_binned_spike_times, _ = np.histogram(neuron_spike_times,
                                                bins=np.arange(
                                                    min_ms, max_ms, 3))
Пример #16
0
        sta_i_df = None
        sta_ind = None
        sta_avg = None
    plt.legend()
    #plt.show()
    plt.savefig(item+'_sta_meansubtracted.png',dpi=600,format='png')

#%% ISI and CV isi, has problem when there are no spikes at all.
os.chdir('/home/ngg1/[email protected]/Data_Urban/NEURON/Analysis/'
         'AlmogAndKorngreen2014/ModCell_5_thrdsafe/summary')
from elephant.statistics import isi, cv
cvdf = pd.Series()
for item in g_list:
    for key in channels[item]:
        isi_list = [isi(spiketrain) for spiketrain in channels[item][key]['spikeTimes500']]
        cv_list = [cv(isi_list[i]) for i in range(len(isi_list))]
        channels[item][key]['isi'] = isi_list
        channels[item][key]['CVisi'] = np.mean(cv_list)
        cv_mean = pd.Series(np.mean(cv_list))
        cvdf = pd.concat([cvdf,cv_mean])
stats3df = pd.concat([statsdf2,cvdf],axis=1)
stats3df = stats3df.rename(columns = {0:'CV ISI'})
stats3df.to_csv('wns_delta_stats3.csv',float_format='%10.4f',index=False,header=True)

#%% plot cv isi as function of delta conductance
os.chdir('/home/ngg1/[email protected]/Data_Urban/NEURON/Analysis/'
         'AlmogAndKorngreen2014/ModCell_5_thrdsafe/figures')
plt.figure()
plt.xlabel('Global Channel Conductance Factor')
plt.ylabel('CV ISI')
for item in g_list:
Пример #17
0
        raster_list.append(t)
        plt.plot(t, (i + 1) * np.ones_like(t), 'k.', markersize=2)
        plt.axis('tight')
        plt.xlim(0, 3100)
        plt.xlabel('Time (ms)', fontsize=16)
        plt.ylim(0, 51)
        plt.ylabel('Trial Number', fontsize=16)
        plt.gca().tick_params(axis='both', which='major', labelsize=14)
        plt.show()

#%%
#CV isi NEVER CONVERTED FOR MULTIPLE FILES###
from elephant.statistics import isi, cv

isi_list = [isi(spiketrain) for spiketrain in st_list]
cv_list = [cv(item) for item in isi_list]
plt.figure()
plt.hist(cv_list)
plt.xlabel('CV', fontsize=16)
plt.ylabel('count', fontsize=16)
plt.show()
plt.figure()
plt.plot(cv_list)
#%%
"""
28Sep2017
NOTE: Work on conversion of discrete spike time lists to binary spike counts.
Use this conversion for PSTH and for spike-time correlations.
"""
##ADDED bstc_df to cut the first 500 ms of data out for correlations
###conversion of discrete spike times to binary counts
Пример #18
0
def main(p,file,save_path):
    pre = 10*pq.ms
    post = 10*pq.ms
    fid = PIO(file)
    blk = fid.read_block()
    FR,ISI,contact_trains = get_contact_sliced_trains(blk,pre=pre,post=post)
    binsize = 2*pq.ms
    for unit in blk.channel_indexes[-1].units:
        root = blk.annotations['ratnum'] + blk.annotations['whisker'] + 'c{}'.format(unit.name[-1])
        trains = contact_trains[unit.name]
        all_isi = np.array([])
        CV_array = np.array([])
        LV_array = np.array([])
        for interval in ISI[unit.name]:
            all_isi = np.concatenate([all_isi,interval])
            if np.all(np.isfinite(interval)):
                CV_array = np.concatenate([CV_array,[cv(interval)]])
                LV_array = np.concatenate([LV_array,[lv(interval)]])

        all_isi = all_isi * interval.units
        CV_array = CV_array
        CV = np.mean(CV_array)
        LV = np.mean(LV_array)

        ## calculate data for PSTH
        b,durations = get_binary_trains(contact_trains[unit.name])
        b_times = np.where(b)[1] * pq.ms#interval.units
        b_times-=pre
        PSTH,t_edges = np.histogram(b_times,bins=np.arange(-np.array(pre),np.max(durations)+np.array(post),float(binsize)))
        plt.bar(t_edges[:-1],
                PSTH.astype('f8')/len(durations)/binsize*1000,
                width=float(binsize),
                align='edge',
                alpha=0.8
                )

        ax = plt.gca()
        thresh = 500 * pq.ms
        ax.set_xlim(-15, thresh.__int__())
        ax.set_xlabel('Time after contact (ms)')
        ax.set_ylabel('Spikes per second')
        ax.set_title('PSTH for: {}'.format(root))

        plt.savefig(os.path.join(save_path,root+'_PSTH.svg'))
        plt.close('all')
        # ============================================

        # PLOT ISIs
        plt.figure()
        thresh = 100 * pq.ms
        if len(all_isi[np.logical_and(np.isfinite(all_isi), all_isi < thresh)])==0:
            return
        ax = sns.distplot(all_isi[np.logical_and(np.isfinite(all_isi), all_isi < thresh)],
                          bins=np.arange(0,100,1),
                          kde_kws={'color':'k','lw':3,'alpha':0.5,'label':'KDE'})
        ax.set_xlabel('ISI '+all_isi.dimensionality.latex)
        ax.set_ylabel('Percentage of all ISIs')

        a_inset = plt.axes([.55, .5, .2, .2], facecolor='w')
        a_inset.grid(color='k',linestyle=':',alpha=0.4)
        a_inset.axvline(CV,color='k',lw=0.5)
        a_inset.set_title('CV = {:0.2f}\nLV = {:0.2f}'.format(CV,LV))
        a_inset.set_xlabel('CV')
        a_inset.set_ylabel('# of Contacts')
        sns.distplot(CV_array,color='g',kde=False)
        ax.set_title('ISI distribution for {}'.format(root))
        plt.savefig(os.path.join(save_path, root + '_ISI.svg'))
        plt.close('all')
Пример #19
0
def get_CV(spike_train):
    from elephant.statistics import cv
    return cv(get_ISI(spike_train))
Пример #20
0
def iter_plot0(md):
    import seaborn as sns
    import pickle
    with open('cell_indexs.p', 'rb') as f:
        returned_list = pickle.load(f)
    index_exc = returned_list[0]
    index_inh = returned_list[1]
    index, mdf1 = md
    #wgf = {0.025:None,0.05:None,0.125:None,0.25:None,0.3:None,0.4:None,0.5:None,1.0:None,1.5:None,2.0:None,2.5:None,3.0:None}
    wgf = {
        0.0025: None,
        0.0125: None,
        0.025: None,
        0.05: None,
        0.125: None,
        0.25: None,
        0.3: None,
        0.4: None,
        0.5: None,
        1.0: None,
        1.5: None,
        2.0: None,
        2.5: None,
        3.0: None
    }

    weight_gain_factors = {k: v for k, v in enumerate(wgf.keys())}
    print(len(weight_gain_factors))
    print(weight_gain_factors.keys())
    #weight_gain_factors = {0:0.5,1:1.0,2:1.5,3:2.0,4:2.5,5:3}
    #weight_gain_factors = {:None,1.0:None,1.5:None,2.0:None,2.5:None}

    k = weight_gain_factors[index]
    #print(len(mdf1.segments),'length of block')

    ass = mdf1.analogsignals[0]

    time_points = ass.times
    avg = np.mean(ass, axis=0)  # Average over signals of Segment
    #maxx = np.max(ass, axis=0)  # Average over signals of Segment
    std = np.std(ass, axis=0)  # Average over signals of Segment
    #avg_minus =
    plt.figure()
    plt.plot([i for i in range(0, len(avg))], avg)
    plt.plot([i for i in range(0, len(std))], std)

    plt.title("Mean and Standard Dev of $V_{m}$ amplitude per neuron ")
    plt.xlabel('time $(ms)$')
    plt.xlabel('Voltage $(mV)$')

    plt.savefig(str(index) + 'prs.png')
    vm_spiking = []
    vm_not_spiking = []
    spike_trains = []
    binary_trains = []
    max_spikes = 0

    vms = np.array(mdf1.analogsignals[0].as_array().T)
    #print(data)
    #for i,vm in enumerate(data):

    cnt = 0
    for spiketrain in mdf1.spiketrains:
        #spiketrain = mdf1.spiketrains[index]
        y = np.ones_like(spiketrain) * spiketrain.annotations['source_id']
        #import sklearn
        #sklearn.decomposition.NMF(y)
        # argument edges is the time interval you want to be considered.
        pspikes = pyspike.SpikeTrain(spiketrain, edges=(0, len(ass)))
        spike_trains.append(pspikes)
        if len(spiketrain) > max_spikes:
            max_spikes = len(spiketrain)

        if np.max(ass[spiketrain.annotations['source_id']]) > 0.0:
            vm_spiking.append(vms[spiketrain.annotations['source_id']])
        else:
            vm_not_spiking.append(vms[spiketrain.annotations['source_id']])
        cnt += 1

    for spiketrain in mdf1.spiketrains:
        x = conv.BinnedSpikeTrain(spiketrain,
                                  binsize=1 * pq.ms,
                                  t_start=0 * pq.s)
        binary_trains.append(x)
    end_floor = np.floor(float(mdf1.t_stop))
    dt = float(mdf1.t_stop) % end_floor
    mdf1.t_start
    #v = mdf1.take_slice_of_analogsignalarray_by_unit()
    t_axis = np.arange(float(mdf1.t_start), float(mdf1.t_stop), dt)
    plt.figure()
    plt.clf()

    plt.figure()
    plt.clf()
    cleaned = []
    data = np.array(mdf1.analogsignals[0].as_array().T)
    #print(data)
    for i, vm in enumerate(data):
        if np.max(vm) > 900.0 or np.min(vm) < -900.0:
            pass
        else:
            plt.plot(ass.times, vm)  #,label='neuron identifier '+str(i)))
            cleaned.append(vm)
            #vm = s#.as_array()[:,i]

    assert len(cleaned) < len(ass)

    print(len(cleaned))
    plt.title('neuron $V_{m}$')
    #plt.legend(loc="upper left")
    plt.savefig(str('weight_') + str(k) + 'analogsignals' + '.png')
    plt.xlabel('Time $(ms)$')
    plt.ylabel('Voltage $(mV)$')

    plt.close()

    #pass

    plt.figure()
    plt.clf()
    plt.title('Single Neuron $V_{m}$ trace')
    plt.plot(ass.times[0:int(len(ass.times) / 10)],
             vm_not_spiking[index_exc[0]][0:int(len(ass.times) / 10)])
    plt.xlabel('$ms$')
    plt.ylabel('$mV$')
    plt.xlabel('Time $(ms)$')
    plt.ylabel('Voltage $(mV)$')
    plt.savefig(str('weight_') + str(k) + 'eespecific_analogsignals' + '.png')
    plt.close()

    plt.figure()
    plt.clf()
    plt.title('Single Neuron $V_{m}$ trace')
    plt.plot(ass.times[0:int(len(ass.times) / 10)],
             vm_not_spiking[index_inh[0]][0:int(len(ass.times) / 10)])
    plt.xlabel('$ms$')
    plt.ylabel('$mV$')

    plt.savefig(str('weight_') + str(k) + 'inhibitory_analogsignals' + '.png')
    plt.close()

    cvs = [0 for i in range(0, len(spike_trains))]
    cvsd = {}
    cvs = []
    cvsi = []
    rates = []  # firing rates per cell. in spikes a second.
    for i, j in enumerate(spike_trains):
        rates.append(float(len(j) / 2.0))
        cva = cv(j)
        if np.isnan(cva) or cva == 0:
            pass
            #cvs[i] = 0
            #cvsd[i] = 0
        else:
            pass
            #cvs[i] = cva
            #cvsd[i] = cva
        cvs.append(cva)
    #import pickle
    #with open(str('weight_')+str(k)+'coefficients_of_variation.p','wb') as f:
    #   pickle.dump([cvs,cvsd],f)
    import numpy
    a = numpy.asarray(cvs)
    numpy.savetxt('pickles/' + str('weight_') + str(k) +
                  'coefficients_of_variation.csv',
                  a,
                  delimiter=",")

    import numpy
    a = numpy.asarray(rates)
    numpy.savetxt('pickles/' + str('weight_') + str(k) + 'firing_of_rate.csv',
                  a,
                  delimiter=",")

    cvs = [i for i in cvs if i != 0]
    cells = [i for i in range(0, len(cvs))]

    plt.clf()
    fig, axes = plt.subplots()
    axes.set_title('Coefficient of Variation Versus Neuron')
    axes.set_xlabel('Neuron number')
    axes.set_ylabel('CV estimate')
    mcv = np.mean(cvs)
    #plt.scatter(cells,cvs)
    cvs = np.array(cvs)
    plt.scatter(index_inh, cvs[index_inh], label="inhibitory cells")
    plt.scatter(index_exc, cvs[index_exc], label="excitatory cells")
    plt.legend(loc="upper left")

    fig.tight_layout()
    plt.savefig(str('weight_') + str(k) + 'cvs_mean_' + str(mcv) + '.png')
    plt.close()

    plt.clf()
    #frequencies, power = elephant.spectral.welch_psd(ass)
    #mfreq = frequencies[np.where(power==np.max(power))[0][0]]
    #fig, axes = plt.subplots()
    axes.set_title('Firing Rate Versus Neuron Number at mean f=' +
                   str(np.mean(rates)) + str('(Spike Per Second)'))
    axes.set_xlabel('Neuron number')
    axes.set_ylabel('Spikes per second')
    rates = np.array(rates)
    plt.scatter(index_inh, rates[index_inh], label="inhibitory cells")
    plt.scatter(index_exc, rates[index_exc], label="excitatory cells")
    plt.legend(loc="upper left")
    fig.tight_layout()
    plt.savefig(str('firing_rates_per_cell_') + str(k) + str(mcv) + '.png')
    plt.close()
    '''
    import pandas as pd
    d = {'coefficent_of_variation': cvs, 'cells': cells}
    df = pd.DataFrame(data=d)

    ax = sns.regplot(x='cells', y='coefficent_of_variation', data=df)#, fit_reg=False)
    plt.savefig(str('weight_')+str(k)+'cvs_regexp_'+str(mcv)+'.png');
    plt.close()
    '''

    spike_trains = []
    ass = mdf1.analogsignals[0]
    tstop = mdf1.t_stop
    np.max(ass.times) == mdf1.t_stop
    #assert tstop == 2000
    tstop = 2000
    vm_spiking = []

    for spiketrain in mdf1.spiketrains:
        vm_spiking.append(
            mdf1.analogsignals[0][spiketrain.annotations['source_id']])
        y = np.ones_like(spiketrain) * spiketrain.annotations['source_id']

        # argument edges is the time interval you want to be considered.
        pspikes = pyspike.SpikeTrain(spiketrain, edges=(0, tstop))
        spike_trains.append(pspikes)

    # plot the spike times

    plt.clf()
    for (i, spike_train) in enumerate(spike_trains):
        plt.scatter(spike_train, i * np.ones_like(spike_train), marker='.')
    plt.xlabel('Time (ms)')
    plt.ylabel('Cell identifier')
    plt.title('Raster Plot for weight strength:' + str(k))

    plt.savefig(str('weight_') + str(k) + 'raster_plot' + '.png')
    plt.close()

    f = spk.isi_profile(spike_trains, indices=[0, 1])
    x, y = f.get_plottable_data()

    #text_file.close()
    text_file = open(str('weight_') + str(index) + 'net_out.txt', 'w')

    plt.figure()
    plt.plot(x, np.abs(y), '--k', label="ISI-profile")
    print("ISI-distance: %.8f" % f.avrg())
    f = spk.spike_profile(spike_trains, indices=[0, 1])
    x, y = f.get_plottable_data()
    plt.plot(x, y, '-b', label="SPIKE-profile")
    #print("SPIKE-distance: %.8f" % f.avrg())
    string_to_write = str("ISI-distance:") + str(f.avrg()) + str("\n\n")
    plt.title(string_to_write)
    plt.xlabel('Time $(ms)$')
    plt.ylabel('ISI distance')
    plt.legend(loc="upper left")
    plt.savefig(str('weight_') + str(k) + 'ISI_distance_bivariate' + '.png')
    plt.close()
    text_file.write(string_to_write)

    #text_file.write("SPIKE-distance: %.8f" % f.avrg())
    #text_file.write("\n\n")

    plt.figure()
    f = spk.spike_sync_profile(spike_trains[0], spike_trains[1])
    x, y = f.get_plottable_data()
    plt.plot(x, y, '--ok', label="SPIKE-SYNC profile")
    print(f, f.avrg())
    print("Average:" + str(f.avrg()))
    #print(len(f.avrg()),f.avrg())
    string_to_write = str("instantaneous synchrony:") + str(
        f.avrg()) + 'weight: ' + str(index)

    plt.title(string_to_write)
    plt.xlabel('Time $(ms)$')
    plt.ylabel('instantaneous synchrony')

    text_file.write(string_to_write)

    #text_file.write(list())

    f = spk.spike_profile(spike_trains[0], spike_trains[1])
    x, y = f.get_plottable_data()

    plt.plot(x, y, '-b', label="SPIKE-profile")
    plt.axis([0, 4000, -0.1, 1.1])
    plt.legend(loc="center right")
    plt.clf()
    plt.figure()
    plt.subplot(211)

    f = spk.spike_sync_profile(spike_trains)
    x, y = f.get_plottable_data()
    plt.plot(x, y, '-b', alpha=0.7, label="SPIKE-Sync profile")
    x1, y1 = f.get_plottable_data(averaging_window_size=50)
    plt.plot(x1, y1, '-k', lw=2.5, label="averaged SPIKE-Sync profile")
    plt.subplot(212)

    f_psth = spk.psth(spike_trains, bin_size=50.0)
    x, y = f_psth.get_plottable_data()
    plt.plot(x, y, '-k', alpha=1.0, label="PSTH")

    plt.savefig(str('weight_') + str(k) + 'multivariate_PSTH' + '.png')
    plt.close()
    plt.xlabel('Time $(ms)$')
    plt.ylabel('Spikes per bin')

    plt.clf()
    plt.figure()

    f_psth = spk.psth(spike_trains, bin_size=50.0)
    x, y = f_psth.get_plottable_data()
    plt.plot(x, y, '-k', alpha=1.0, label="PSTH")

    plt.savefig(str('weight_') + str(k) + 'exclusively_PSTH' + '.png')
    plt.close()

    plt.figure()
    isi_distance = spk.isi_distance_matrix(spike_trains)
    plt.imshow(isi_distance, interpolation='none')
    plt.title('Pairwise ISI distance, T=0-2000')
    plt.xlabel('post-synaptic neuron number')
    plt.ylabel('pre-synaptic neuron number')

    plt.title("ISI-distance")
    plt.savefig(str('weight_') + str(k) + 'ISI_distance' + '.png')
    plt.close()

    #plt.show()

    plt.figure()
    plt.clf()
    import seaborn as sns

    sns.set()
    sns.clustermap(isi_distance)  #,vmin=-,vmax=1);

    plt.savefig(str('weight_') + str(k) + 'cluster_isi_distance' + '.png')
    plt.close()

    plt.figure()
    spike_distance = spk.spike_distance_matrix(spike_trains,
                                               interval=(0, float(tstop)))

    import pickle
    with open('spike_distance_matrix.p', 'wb') as f:
        pickle.dump(spike_distance, f)

    plt.imshow(spike_distance, interpolation='none')
    plt.title("Pairwise SPIKE-distance, T=0-2000")
    plt.xlabel('post-synaptic neuron number')
    plt.ylabel('pre-synaptic neuron number')

    plt.savefig(str('weight_') + str(k) + 'spike_distance_matrix' + '.png')
    plt.close()
    plt.figure()
    plt.clf()
    sns.set()
    sns.clustermap(spike_distance)

    plt.savefig(str('weight_') + str(k) + 'cluster_spike_distance' + '.png')
    plt.close()

    plt.figure()
    spike_sync = spk.spike_sync_matrix(spike_trains,
                                       interval=(0, float(tstop)))
    plt.imshow(spike_sync, interpolation='none')
    plt.title('Pairwise Spike Synchony, T=0-2000')
    plt.xlabel('post-synaptic neuron number')
    plt.ylabel('pre-synaptic neuron number')

    import numpy
    a = numpy.asarray(spike_sync)
    numpy.savetxt("spike_sync_matrix.csv", a, delimiter=",")

    plt.figure()
    plt.clf()
    sns.clustermap(spike_sync)
    plt.savefig(
        str('weight_') + str(k) + 'cluster_spike_sync_distance' + '.png')
    plt.close()
Пример #21
0
            plt.hist(hist_isi, bins=100)
            plt.xlim((0, 20))
        if plot_isi_scattergram:
            plt.figure("ISI scattergram {}".format(ear_index))
            # plt.ylabel("ISI + 1 (ms)")
            # plt.xlabel("ISI (ms)")
            sr = math.sqrt(len(neuron_list))
            num_cols = np.ceil(sr)
            num_rows = np.ceil(len(neuron_list) / num_cols)
            plt.subplot(num_rows, num_cols, i + 1)
            plt.plot(isi_n[0], isi_n[1], '.')
            plt.ylim((0, 30))
            plt.xlim((0, 30))
        if plot_isi_cv:
            plt.figure("CV {}".format(ear_index))
            cvs = [cv(interval) for interval in t_isi if len(interval) > 0]
            plt.subplot(len(neuron_list), 1, i + 1)
            plt.hist(cvs)  #,bins=100)
            plt.xlim((0, 2))
            # plt.ylim((0,100))

    if plot_moc:
        plt.figure("moc ear {} test {}".format(ear_index, test_index))
        n_channels = len(moc_att[ear_index])
        # a = np.sum(moc_att[ear_index][::n_channels/10],axis=0)
        # a = moc_att[ear_index][n_channels/2]
        # plt.plot(a)
        # print "ear {} zero moc spikes {}".format(ear_index,len(np.where(a<1)[0]))
        for i, moc in enumerate(moc_att[ear_index][::n_channels / 10]):
            # for i,moc in enumerate(moc_att[ear_index][::1]):
            # for i,moc in enumerate([moc_att[ear_index][n_channels/20]]):
Пример #22
0
def compute_cv(a, v_ext, g):
    from elephant.statistics import isi, cv
    times = read_file(a, v_ext, g)
    cv_list = [cv(isi(spiketrain)) for spiketrain in times.values()]
    return np.nanmean(cv_list)
Пример #23
0
 def test_cv_isi_regular_array_is_zero(self):
     st = self.test_array_regular
     targ = 0.0
     res = es.cv(es.isi(st))
     self.assertEqual(res, targ)
Пример #24
0
os.chdir('Analysis/PSTH')  # /PSTH')
plt.savefig('Noise_PSTH_' + file_name + '.png', bbox_inches='tight')
plt.close()
os.chdir('../..')
print 'PSTH for ' + file_name + 'saved'

#%% CV-ISI
"""
The below calculates CV-ISI from only the latter 2.5 s current step, output = half_mean_CV
Method = drop values from neospiketrain_list that occur before 1000 ms, then calculating ISI and CV from that
"""
from elephant.statistics import isi, cv

half_isi_list = [isi(spiketrain) for spiketrain in half_spiketime_list]
half_cv_list = [cv(item) for item in half_isi_list]

half_isi_list = [
    isis for i, isis in enumerate(half_isi_list) if i not in depolarized_sweeps
]

#excluding depolarized sweeps - can't use del_half_spiketime_list bc
#need to sort by current injection later

for item in exclude:
    half_cv_list[item] = np.nan

half_mean_CV = np.nanmean(half_cv_list)

#%% Plot ISI histogram for second 1.5 s half of sweep