def test_crossspec(self):
     # use less neurons (0.2*self.N) for full matrix (memory error!)
     Nloc = int(0.2 * self.N)
     Nloceff = int(0.2 * self.Neff)
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     bsp = cthlp.centralize(bsp, time=True)
     freq_power, power = ctana.powerspec(bsp, self.tbin)
     freq_cross, cross = ctana.crossspec(bsp, self.tbin)
     self.assertEqual(len(freq_power), len(freq_cross))
     self.assertEqual(np.min(freq_power), np.min(freq_cross))
     self.assertEqual(np.max(freq_power), np.max(freq_cross))
     for i in xrange(Nloc):
         for j in xrange(Nloc):
             if i != j:
                 # poisson trains are uncorrelated
                 self.assertTrue(abs(np.mean(cross[i, j])) < 1e0)
             else:
                 # compare with auto spectra
                 self.assertTrue(
                     abs(np.mean(cross[i, i] - power[i])) < 1e-12)
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.N)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     bsp = cthlp.centralize(bsp, time=True)
     freq_cross, cross = ctana.crossspec(bsp, self.tbin, units=True)
     self.assertTrue(abs(np.mean(cross)) < 1e-2)
     freq_cross, cross = ctana.crossspec(
         bsp, self.tbin, Df=self.Df, units=True)
     self.assertTrue(self.Df <= freq_cross[1])
 def test_compound_crossspec(self):
     # use less neurons (0.2*self.N) for full matrix (memory error!)
     Nloc = int(0.2 * self.N)
     Nloceff = int(0.2 * self.Neff)
     # population a
     sp_a = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
     sp_a_ids, sp_a_srt = cthlp.sort_gdf_by_id(sp_a, 0, Nloc)
     bins_a, bsp_a = cthlp.instantaneous_spike_count(
         sp_a_srt, self.tbin, tmin=0., tmax=self.T)
     bsp_a = cthlp.centralize(bsp_a, time=True)
     # population b
     sp_b = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
     sp_b_ids, sp_b_srt = cthlp.sort_gdf_by_id(sp_b, 0, Nloc)
     bins_b, bsp_b = cthlp.instantaneous_spike_count(
         sp_b_srt, self.tbin, tmin=0., tmax=self.T)
     bsp_b = cthlp.centralize(bsp_b, time=True)
     freq_a, power_a = ctana.compound_powerspec(bsp_a, self.tbin)
     freq_b, power_b = ctana.compound_powerspec(bsp_b, self.tbin)
     freq_cross, cross = ctana.compound_crossspec([bsp_a, bsp_b], self.tbin)
     self.assertTrue(abs(np.sum(power_a - cross[0, 0])) < 1e-10)
     self.assertTrue(abs(np.sum(power_b - cross[1, 1])) < 1e-10)
     freq_cross_alt, cross_alt = ctana.crossspec(
         np.array([np.sum(bsp_a, axis=0), np.sum(bsp_b, axis=0)]),
         self.tbin)
     self.assertTrue(abs(np.sum(cross_alt[0, 1] - cross[0, 1])) < 1e-12)
     self.assertTrue(abs(np.sum(cross_alt[1, 0] - cross[1, 0])) < 1e-12)
 def test_strip_binned_spiketrains(self):
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     self.assertEqual(self.N, len(bsp))
     bsp = cthlp.strip_binned_spiketrains(bsp)
     self.assertEqual(self.Neff, len(bsp))
    def test_crosscorrfunc(self):
        Nloc = int(0.1 * self.N)
        Nloceff = int(0.1 * self.Neff)
        sp = cthlp.create_correlated_spiketrains_sip(self.rate, self.T,
                                                     Nloceff, self.cc)
        sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)

        freq, power = ctana.powerspec(bsp, self.tbin)
        freq_cross, cross = ctana.crossspec(bsp, self.tbin)
        time_auto, autof = ctana.autocorrfunc(freq, power)
        time_cross, crossf = ctana.crosscorrfunc(freq_cross, cross)

        if len(crossf[0, 0]) % 2 == 0:
            mid = int(len(crossf[0, 0]) / 2 - 1)
        else:
            mid = int(np.floor(len(crossf[0, 0]) / 2.))
        offset = self.tbin / self.T * \
            (self.rate + self.rate ** 2 * self.T * 1e-3)
        for i in range(Nloceff):
            # consistency check with auto-correlation function
            self.assertTrue(abs(np.sum(autof[i] - crossf[i, i])) < 1e-10)
            for j in range(Nloceff):
                if i != j:
                    # c(0) = corrcoef*rate+offset
                    self.assertTrue(
                        abs(crossf[i, j][mid] -
                            (self.cc * self.rate + offset)) <
                        (self.cc * self.rate + offset) * 1e-1)
                    # c(0)/a(0) = corrcoef
                    self.assertTrue(
                        abs((crossf[i, j][mid] - offset) / np.sqrt(
                            (crossf[i, i][mid] - offset) *
                            (crossf[j, j][mid] - offset)) -
                            self.cc) < self.cc * 5e-2)

        freq, power = ctana.powerspec(bsp, self.tbin, units=True)
        freq_cross, cross = ctana.crossspec(bsp, self.tbin, units=True)
        time, autof = ctana.autocorrfunc(freq, power)
        time_cross, crossf = ctana.crosscorrfunc(freq, cross)
        offset_auto = self.p * self.tbin / self.T * \
            (self.rate + self.rate ** 2 * self.T * 1e-3)
        offset_cross = 1. * Nloceff * \
            (Nloceff - 1) / Nloc / (Nloc - 1) * self.tbin / \
            self.T * (self.rate + self.rate ** 2 * self.T * 1e-3)
        # c(0) ~ self.p**2*corrcoef*rate+offset
        self.assertTrue(
            abs(crossf[mid] -
                (1. * Nloceff * (Nloceff - 1) / Nloc /
                 (Nloc - 1) * self.cc * self.rate + offset_cross)) <
            (1. * Nloceff * (Nloceff - 1) / Nloc /
             (Nloc - 1) * self.cc * self.rate + offset_cross) * 2e-1)

        # c(0)/a(0) = corrcoef
        self.assertTrue(
            abs((crossf[mid] - offset_cross) /
                (autof[mid] - offset_auto) - 1. * (Nloceff - 1.) /
                (Nloc - 1.) * self.cc) < 1. * (Nloceff - 1.) /
            (Nloc - 1.) * self.cc * 2e-1)
    def test_autocorrfunc(self):
        Nloc = int(0.1 * self.N)
        Nloceff = int(0.1 * self.Neff)
        # sp = cthlp.create_correlated_spiketrains_sip(
        # self.rate,self.T,Nloceff,self.cc)
        sp = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
        # sp = cthlp.create_gamma_spiketrains(self.rate,self.T,Nloceff,.5)
        sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
        # sp_srt = [np.arange(0,self.T,100.) for i in xrange(Nloceff)]
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)

        freq, power = ctana.powerspec(bsp, self.tbin)
        # freq_cross,cross = ctana.crossspec(bsp,self.tbin,units=True)
        time_auto, autof = ctana.autocorrfunc(freq, power)
        # time_cross,crossf = ctana.crosscorrfunc(freq_cross,cross)
        for i in range(Nloceff):
            if len(autof[i]) % 2 == 0:
                mid = int(len(autof[i]) / 2 - 1)
            else:
                mid = int(np.floor(len(autof[i]) / 2.))
            offset = self.tbin / self.T * \
                (self.rate + self.rate ** 2 * self.T * 1e-3)
            # a(0) == rate+offset
            self.assertTrue(
                abs(autof[i][mid] -
                    (self.rate + offset)) < (self.rate + offset) * 2e-1)
            # test offset (see notes)
            self.assertTrue(
                abs(np.mean(autof[i][:mid - 1]) - offset) < offset * 2e-1)
        freq, power = ctana.powerspec(bsp, self.tbin, units=True)
        time_auto, autof = ctana.autocorrfunc(freq, power)
        print(autof)
        if len(autof) % 2 == 0:
            mid = int(len(autof) / 2 - 1)
        else:
            mid = int(np.floor(len(autof) / 2.))
        offset = self.p * self.tbin / self.T * \
            (self.rate + self.rate ** 2 * self.T * 1e-3)
        # mean(a(0)) == p*rate+offset
        self.assertTrue(
            abs(autof[mid] - (self.p * self.rate + offset)) <
            (self.p * self.rate + offset) * 1e-1)
        # test offset (see notes)
        self.assertTrue(abs(np.mean(autof[:mid - 1]) - offset) < offset * 2e-1)
        # symmetry of autocorrelation function
        lim = int(np.floor(len(autof) / 4))
        print(autof[mid - lim + 1:mid], len(autof[mid - lim + 1:mid]))
        print((autof[mid + 1:mid + lim])[::-1],
              len(autof[mid + 1:mid + lim][::-1]))
        print(
            abs(
                np.sum(autof[mid - lim + 1:mid] -
                       (autof[mid + 1:mid + lim])[::-1])))
        self.assertTrue(
            abs(
                np.sum(autof[mid - lim + 1:mid] -
                       (autof[mid + 1:mid + lim])[::-1])) < 1e-12)
 def test_compound_powerspec(self):
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     bsp = cthlp.centralize(bsp, time=True)
     freq_alt, power_alt = ctana.powerspec(
         [np.sum(bsp, axis=0)], self.tbin, Df=self.Df)
     freq, power = ctana.compound_powerspec(bsp, self.tbin, Df=self.Df)
     self.assertEqual(len(freq_alt), len(freq))  # frequencies
     self.assertEqual(len(power_alt[0]), len(power))  # same number of bins
     # same spectra
     self.assertTrue(abs(np.sum(power_alt[0] - power)) < 1e-16)
 def test_sort_gdf_by_id(self):
     # create N-5 poisson instead of N, creates empty arrays in sp_srt
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
     self.assertEqual(self.N, len(sp_ids))  # N
     self.assertTrue(self.T >= np.max([np.max(x)
                     for x in sp_srt if len(x) > 0]))  # T
     for i in range(self.N):
         emp_rate = 1. * len(sp_srt[i]) / self.T * 1e3
         assert(emp_rate >= 0.)
         if emp_rate > 0.:
             self.assertTrue(abs(self.rate - emp_rate) < 1e1)  # rate
             self.assertTrue(min(np.diff(sp_srt[i])) > 0.)  # time ordering
    def test_instantaneous_spike_count(self):
        # create N-5 poisson instead of N, creates empty arrays in sp_srt
        # to test binning for empty spiketrains
        sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
        sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)

        # test whether binning produces correct results
        sp_srt = np.array([[1., 2., 5., 7.], [4., 6., 9.]])
        # ground truth
        bsp_true = np.array(
            [[1, 1, 0, 0, 1, 0, 1, 0], [0, 0, 0, 1, 0, 1, 0, 1]])
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
        self.assertTrue(len(bins) == len(bsp[0]))  # number of bins
        self.assertEqual(2, len(bsp))  # number of binned spike trains
        self.assertEqual(np.sum(bsp_true - bsp), 0.)  # histogram
 def test_coherence(self):
     # use less neurons (0.2*self.N) for full matrix (memory error!)
     Nloc = int(0.15 * self.N)
     Nloceff = int(0.15 * self.Neff)
     sp = cthlp.create_correlated_spiketrains_sip(self.rate, self.T,
                                                  Nloceff, self.cc)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     # test all pairs of active neurons
     # TODO why does the result not depend on centralizing?
     # bsp = cthlp.centralize(bsp, time=True)
     freq_cross, cross = ctana.crossspec(bsp, self.tbin, Df=self.Df)
     df = freq_cross[1] - freq_cross[0]
     a_lfcoh = []
     for i in range(Nloc):
         for j in range(Nloc):
             if i != j:
                 if np.sum(cross[i, i]) > 0. and np.sum(cross[j, j]) > 0.:
                     lfcoh = np.mean((np.real(cross[i, j]) / np.sqrt(
                         cross[i, i] * cross[j, j]))[:int(self.fcut / df)])
                     a_lfcoh.append(lfcoh)
                     self.assertTrue(abs(lfcoh - self.cc) < self.cc * 4e-1)
                 else:
                     a_lfcoh.append(0.)
     # average correlation coefficient is p**2 smaller than correlation
     # between active neurons
     self.assertTrue(
         abs(np.mean(a_lfcoh) - self.p**2 * self.cc) < self.cc * 1e-1)
     # test coherence of population averaged signals
     # (careful with interpretation!)
     freq_power, power = ctana.powerspec(bsp,
                                         self.tbin,
                                         Df=self.Df,
                                         units=True)
     freq_cross, cross = ctana.crossspec(bsp,
                                         self.tbin,
                                         Df=self.Df,
                                         units=True)
     # make sure frequencies are the same for power and cross
     self.assertEqual(len(freq_power), len(freq_cross))
     self.assertEqual(np.min(freq_power), np.min(freq_cross))
     self.assertEqual(np.max(freq_power), np.max(freq_cross))
     df = freq_cross[1] - freq_cross[0]
     lfcoh = np.mean((cross / power)[:int(self.fcut / df)])
     # low frequency coherence should coincide with corrcoef
     self.assertTrue(abs(lfcoh - self.p * self.cc) < self.cc * 1e-1)
 def test_autocorrfunc_time(self):
     Nloc = int(0.1 * self.N)
     Nloceff = int(0.1 * self.Neff)
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
     time_auto, autof = ctana.autocorrfunc_time(sp_srt, self.tau_max,
                                                self.tbin, self.T)
     for i in range(Nloceff):
         if len(autof[i]) % 2 == 0:
             mid = int(len(autof[i]) / 2 - 1)
         else:
             mid = int(np.floor(len(autof[i]) / 2.))
         offset = self.tbin / self.T * \
             (self.rate + self.rate ** 2 * self.T * 1e-3)
         # a(0) == rate+offset
         self.assertTrue(
             abs(autof[i][mid] -
                 (self.rate + offset)) < (self.rate + offset) * 2e-1)
         # test offset (see notes)
         self.assertTrue(
             abs(np.mean(autof[i][:mid - 1]) - offset) < offset * 2e-1)
     lim = len(autof) / 4
     time_auto, autof = ctana.autocorrfunc_time(sp_srt,
                                                self.tau_max,
                                                self.tbin,
                                                self.T,
                                                units=True)
     if len(autof) % 2 == 0:
         mid = int(len(autof) / 2 - 1)
     else:
         mid = int(np.floor(len(autof) / 2.))
     offset = self.p * self.tbin / self.T * \
         (self.rate + self.rate ** 2 * self.T * 1e-3)
     # mean(a(0)) == p*rate+offset
     self.assertTrue(
         abs(autof[mid] - (self.p * self.rate + offset)) <
         (self.p * self.rate + offset) * 1e-1)
     # test offset (see notes)
     self.assertTrue(abs(np.mean(autof[:mid - 1]) - offset) < offset * 2e-1)
     # symmetry of autocorrelation function
     lim = len(autof) / 4
     sum_left = np.sum(autof[mid - lim + 1:mid])
     sum_right = np.sum(autof[mid + 1:mid + lim])
     sum_av = (sum_left + sum_right) / 2.
     self.assertTrue(abs(sum_left - sum_right) / sum_av < 1e-3)
 def test_create_correlated_spiketrains_sip(self):
     # create N-5 poisson instead of N, changes correlation
     sp = cthlp.create_correlated_spiketrains_sip(
         self.rate, self.T, self.Neff, self.cc)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     emp_rate = 1. * np.sum(bsp) / self.T * 1e3 / self.N
     self.assertTrue(abs(self.p * self.rate - emp_rate) < 5e-1)  # rate
     self.assertEqual(self.N, len(bsp))  # N
     self.assertTrue(self.T >= np.max(bins))  # T
     emp_cc = np.corrcoef(cthlp.strip_binned_spiketrains(bsp))
     emp_a_cc = []
     for i in range(self.Neff):
         for j in range(self.Neff):
             if i != j:
                 emp_a_cc.append(emp_cc[i, j])
     emp_mu_cc = 1. / (self.N * (self.N - 1.)) * np.sum(emp_a_cc)
     # correlation coefficient
     self.assertTrue(abs(self.p ** 2 * self.cc - emp_mu_cc) < 2e-2)
    def test_corrcoef(self):
        Nloc = int(0.1 * self.N)
        Nloceff = int(0.1 * self.Neff)
        sp = cthlp.create_correlated_spiketrains_sip(
            self.rate, self.T, Nloceff, self.cc)
        sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)

        freq_cross, cross = ctana.crossspec(bsp, self.tbin)
        time_cross, crossf = ctana.crosscorrfunc(freq_cross, cross)

        corrcoef = ctana.corrcoef(time_cross, crossf)
        for i in xrange(Nloc):
            for j in xrange(Nloc):
                if i < Nloceff and j < Nloceff:
                    if i == j:
                        self.assertTrue(abs(corrcoef[i, j] - 1.) < 1e-12)
                    else:
                        self.assertTrue(
                            abs(corrcoef[i, j] - self.cc) < self.cc * 1e-1)
                else:
                    self.assertTrue(abs(corrcoef[i, j]) < 1e-12)
    def test_powerspec(self):
        sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
        sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N)
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
        freq, power = ctana.powerspec(bsp, self.tbin)
        for i in range(self.N):
            # power(0) == 1./T*integral(s(t))**2 == 1./T*sum(s_binned)**2
            self.assertTrue(
                abs(power[i][0] - 1. / self.T * 1e3 *
                    (np.sum(bsp[i]))**2) < 1e-15)
        bsp = cthlp.centralize(bsp, time=True)
        freq, power = ctana.powerspec(bsp, self.tbin)
        for i in range(self.N):
            # power(0) == 0
            self.assertTrue(abs(power[i][0]) < 1e-15)

        auto = np.array([np.fft.ifft(x) for x in power])
        for i in range(self.N):
            if np.sum(power[i]) > 0.:
                # power == rate (flat spectrum for poisson with power == rate)
                self.assertTrue(
                    abs(np.mean(power[i]) - self.rate) < self.rate * 2e-1)
                # auto(t) = rate*delta(t)-(offset due to centralizing)
                self.assertTrue(abs(auto[i][0] - self.rate) < self.rate * 2e-1)
                # integral(auto(t)) == 0 (due to centralizing, delta is
                # canceled by offset)
                self.assertTrue(abs(np.sum(auto[i])) < 1e-11)

        freq, power = ctana.powerspec(bsp, self.tbin, Df=self.Df)
        # smallest frequency is larger than size of smoothing window
        self.assertTrue(self.Df <= freq[1])

        freq_units, power_units = ctana.powerspec(bsp,
                                                  self.tbin,
                                                  Df=self.Df,
                                                  units=True)
        # power_units should equal population averaged power spectrum
        self.assertTrue(
            abs(np.sum(power_units - np.mean(power, axis=0))) < 1e-10)
 def test_strip_sorted_spiketrains(self):
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
     self.assertEqual(self.N, len(sp_srt))
     sp_srt = cthlp.strip_sorted_spiketrains(sp_srt)
     self.assertEqual(self.Neff, len(sp_srt))
for area in M.area_list:
    cc_dict[area] = {}
    LvR_list = []
    N = []
    for pop in M.structure[area]:
        fp = '-'.join((
            label,
            'spikes',  # assumes that the default label for spike files was used
            area,
            pop))
        fn = '{}/{}.npy'.format(load_path, fp)
        # +1000 to ensure that we really have subsample non-silent neurons in the end
        spikes = np.load(fn)
        ids = np.unique(spikes[:, 0])
        dat = ch.sort_gdf_by_id(spikes,
                                idmin=ids[0],
                                idmax=ids[0] + subsample + 1000)
        bins, hist = ch.instantaneous_spike_count(dat[1],
                                                  resolution,
                                                  tmin=tmin,
                                                  tmax=T)
        rates = ch.strip_binned_spiketrains(hist)[:subsample]
        cc = np.corrcoef(rates)
        cc = np.extract(1 - np.eye(cc[0].size), cc)
        cc[np.where(np.isnan(cc))] = 0.
        cc_dict[area][pop] = np.mean(cc)

fn = os.path.join(save_path, 'corrcoeff.json')
with open(fn, 'w') as f:
    json.dump(cc_dict, f)