def test_compound_crossspec(self):
     # use less neurons (0.2*self.N) for full matrix (memory error!)
     Nloc = int(0.2 * self.N)
     Nloceff = int(0.2 * self.Neff)
     # population a
     sp_a = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
     sp_a_ids, sp_a_srt = cthlp.sort_gdf_by_id(sp_a, 0, Nloc)
     bins_a, bsp_a = cthlp.instantaneous_spike_count(
         sp_a_srt, self.tbin, tmin=0., tmax=self.T)
     bsp_a = cthlp.centralize(bsp_a, time=True)
     # population b
     sp_b = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
     sp_b_ids, sp_b_srt = cthlp.sort_gdf_by_id(sp_b, 0, Nloc)
     bins_b, bsp_b = cthlp.instantaneous_spike_count(
         sp_b_srt, self.tbin, tmin=0., tmax=self.T)
     bsp_b = cthlp.centralize(bsp_b, time=True)
     freq_a, power_a = ctana.compound_powerspec(bsp_a, self.tbin)
     freq_b, power_b = ctana.compound_powerspec(bsp_b, self.tbin)
     freq_cross, cross = ctana.compound_crossspec([bsp_a, bsp_b], self.tbin)
     self.assertTrue(abs(np.sum(power_a - cross[0, 0])) < 1e-10)
     self.assertTrue(abs(np.sum(power_b - cross[1, 1])) < 1e-10)
     freq_cross_alt, cross_alt = ctana.crossspec(
         np.array([np.sum(bsp_a, axis=0), np.sum(bsp_b, axis=0)]),
         self.tbin)
     self.assertTrue(abs(np.sum(cross_alt[0, 1] - cross[0, 1])) < 1e-12)
     self.assertTrue(abs(np.sum(cross_alt[1, 0] - cross[1, 0])) < 1e-12)
 def test_crossspec(self):
     # use less neurons (0.2*self.N) for full matrix (memory error!)
     Nloc = int(0.2 * self.N)
     Nloceff = int(0.2 * self.Neff)
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     bsp = cthlp.centralize(bsp, time=True)
     freq_power, power = ctana.powerspec(bsp, self.tbin)
     freq_cross, cross = ctana.crossspec(bsp, self.tbin)
     self.assertEqual(len(freq_power), len(freq_cross))
     self.assertEqual(np.min(freq_power), np.min(freq_cross))
     self.assertEqual(np.max(freq_power), np.max(freq_cross))
     for i in xrange(Nloc):
         for j in xrange(Nloc):
             if i != j:
                 # poisson trains are uncorrelated
                 self.assertTrue(abs(np.mean(cross[i, j])) < 1e0)
             else:
                 # compare with auto spectra
                 self.assertTrue(
                     abs(np.mean(cross[i, i] - power[i])) < 1e-12)
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.N)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     bsp = cthlp.centralize(bsp, time=True)
     freq_cross, cross = ctana.crossspec(bsp, self.tbin, units=True)
     self.assertTrue(abs(np.mean(cross)) < 1e-2)
     freq_cross, cross = ctana.crossspec(
         bsp, self.tbin, Df=self.Df, units=True)
     self.assertTrue(self.Df <= freq_cross[1])
Пример #3
0
 def test_centralize(self):
     v1 = np.random.normal(-50, 2, int(self.T * 1e1))
     v2 = np.random.normal(-30, 2, int(self.T * 1e1))
     v_cen_time = cthlp.centralize([v1, v2], time=True)
     for v in v_cen_time:
         self.assertTrue(abs(np.mean(v)) < 1e-12)
     v_cen_units = cthlp.centralize([v1, v2], units=True)
     for v in v_cen_units.T:
         self.assertTrue(abs(np.mean(v)) < 1e-12)
     v_cen_timeunits = cthlp.centralize([v1, v2], time=True, units=True)
     self.assertTrue(abs(np.mean(v_cen_timeunits)) < 1e-12)
 def test_compound_powerspec(self):
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     bsp = cthlp.centralize(bsp, time=True)
     freq_alt, power_alt = ctana.powerspec(
         [np.sum(bsp, axis=0)], self.tbin, Df=self.Df)
     freq, power = ctana.compound_powerspec(bsp, self.tbin, Df=self.Df)
     self.assertEqual(len(freq_alt), len(freq))  # frequencies
     self.assertEqual(len(power_alt[0]), len(power))  # same number of bins
     # same spectra
     self.assertTrue(abs(np.sum(power_alt[0] - power)) < 1e-16)
    def test_powerspec(self):
        sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
        sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N)
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
        freq, power = ctana.powerspec(bsp, self.tbin)
        for i in range(self.N):
            # power(0) == 1./T*integral(s(t))**2 == 1./T*sum(s_binned)**2
            self.assertTrue(
                abs(power[i][0] - 1. / self.T * 1e3 *
                    (np.sum(bsp[i]))**2) < 1e-15)
        bsp = cthlp.centralize(bsp, time=True)
        freq, power = ctana.powerspec(bsp, self.tbin)
        for i in range(self.N):
            # power(0) == 0
            self.assertTrue(abs(power[i][0]) < 1e-15)

        auto = np.array([np.fft.ifft(x) for x in power])
        for i in range(self.N):
            if np.sum(power[i]) > 0.:
                # power == rate (flat spectrum for poisson with power == rate)
                self.assertTrue(
                    abs(np.mean(power[i]) - self.rate) < self.rate * 2e-1)
                # auto(t) = rate*delta(t)-(offset due to centralizing)
                self.assertTrue(abs(auto[i][0] - self.rate) < self.rate * 2e-1)
                # integral(auto(t)) == 0 (due to centralizing, delta is
                # canceled by offset)
                self.assertTrue(abs(np.sum(auto[i])) < 1e-11)

        freq, power = ctana.powerspec(bsp, self.tbin, Df=self.Df)
        # smallest frequency is larger than size of smoothing window
        self.assertTrue(self.Df <= freq[1])

        freq_units, power_units = ctana.powerspec(bsp,
                                                  self.tbin,
                                                  Df=self.Df,
                                                  units=True)
        # power_units should equal population averaged power spectrum
        self.assertTrue(
            abs(np.sum(power_units - np.mean(power, axis=0))) < 1e-10)
Пример #6
0
        rate_time_series['Parameters'] = json.load(f)

    tmin, tmax = (500., T)
    imax = int(tmax - rate_time_series['Parameters']['t_min'])
    imin = int(tmin - rate_time_series['Parameters']['t_min'])


    # Order of vector auto-regressive model

    # As potentially Granger-causal populations, we only consider source
    # population with an indegree > 1
    mask = create_mask(M.structure, target_pops=[pop], target_areas=[area], external=False)[:, :-1]
    pairs = indices_to_population(M.structure, np.where(K[mask] > 1.))

    # Build a list of the time series of all source pairs onto the target pair
    all_rates = [ch.centralize(rate_time_series[area][pop][imin:imax], units=True)]
    target_index = 0
    source_pairs = [target_pair]
    for pair in pairs:
        source_area = pair.split('-')[0]
        source_pop = pair.split('-')[1]
        if (source_area, source_pop) != target_pair:
            all_rates.append(ch.centralize(rate_time_series[source_area][source_pop][imin:imax],
                                           units=True))
            source_pairs.append((source_area, source_pop))

    # Fit VAR with all rates
    dat = np.vstack(all_rates)
    dat = dat.transpose()
    model = VAR(dat)
    # Order of auto-regressive regression model
Пример #7
0
    imax = int(tmax - rate_time_series['Parameters']['t_min'])
    imin = int(tmin - rate_time_series['Parameters']['t_min'])

    # Order of vector auto-regressive model

    # As potentially Granger-causal populations, we only consider source
    # population with an indegree > 1
    mask = create_mask(M.structure,
                       target_pops=[pop],
                       target_areas=[area],
                       external=False)[:, :-1]
    pairs = indices_to_population(M.structure, np.where(K[mask] > 1.))

    # Build a list of the time series of all source pairs onto the target pair
    all_rates = [
        ch.centralize(rate_time_series[area][pop][imin:imax], units=True)
    ]
    target_index = 0
    source_pairs = [target_pair]
    for pair in pairs:
        source_area = pair.split('-')[0]
        source_pop = pair.split('-')[1]
        if (source_area, source_pop) != target_pair:
            all_rates.append(
                ch.centralize(
                    rate_time_series[source_area][source_pop][imin:imax],
                    units=True))
            source_pairs.append((source_area, source_pop))

    # Fit VAR with all rates
    dat = np.vstack(all_rates)
Пример #8
0
fn2 = os.path.join(load_path,
                   'rate_time_series_full_{}.npy'.format(area2))
rate_time_series2 = np.load(fn2)

fn = os.path.join(load_path,
                  'rate_time_series_full_Parameters.json')
with open(fn, 'r') as f:
    params = json.load(f)

i_min = int(500. - params['t_min'])
i_max = int(T - params['t_min'])

rates = [rate_time_series1[i_min:i_max],
         rate_time_series2[i_min:i_max]]

dat = [ch.centralize(rates[0], units=True),
       ch.centralize(rates[1], units=True)]
freq, crossspec = corr.crossspec(dat, 1.)
t, cross = corr.crosscorrfunc(freq, crossspec)

sigma = 2.
time_range = np.arange(-5., 5.)
kernel = 1 / (np.sqrt(2.0 * np.pi) * sigma) * np.exp(-(time_range ** 2 / (2 * sigma ** 2)))
cross_conv = np.zeros_like(cross)
cross_conv[0][0] = np.convolve(kernel, cross[0][0], mode='same')
cross_conv[0][1] = np.convolve(kernel, cross[0][1], mode='same')
cross_conv[1][0] = np.convolve(kernel, cross[1][0], mode='same')
cross_conv[1][1] = np.convolve(kernel, cross[1][1], mode='same')
cross = cross_conv

fp = '_'.join(('cross_correlation',
Compute the functional connectivity between all areas of a given
simulation based on their time series of spiking rates or their
estimated BOLD signal.
"""

data_path = sys.argv[1]
label = sys.argv[2]
method = sys.argv[3]

load_path = os.path.join(data_path, label, 'Analysis', method)
save_path = os.path.join(data_path, label, 'Analysis')
"""
Create MultiAreaModel instance to have access to data structures
"""
M = MultiAreaModel({})

time_series = []
for area in M.area_list:
    fn = os.path.join(load_path, '{}_{}.npy'.format(method, area))
    si = np.load(fn)
    if method == 'bold_signal':  # Cut off the long initial transient of the BOLD signal
        si = si[5000:]
    time_series.append(ch.centralize(si, units=True))

D = pdist(time_series, metric='correlation')
correlation_matrix = 1. - squareform(D)

np.save(
    os.path.join(save_path, 'functional_connectivity_{}.npy'.format(method)),
    correlation_matrix)