def test_crosscorrfunc(self):
        Nloc = int(0.1 * self.N)
        Nloceff = int(0.1 * self.Neff)
        sp = cthlp.create_correlated_spiketrains_sip(self.rate, self.T,
                                                     Nloceff, self.cc)
        sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)

        freq, power = ctana.powerspec(bsp, self.tbin)
        freq_cross, cross = ctana.crossspec(bsp, self.tbin)
        time_auto, autof = ctana.autocorrfunc(freq, power)
        time_cross, crossf = ctana.crosscorrfunc(freq_cross, cross)

        if len(crossf[0, 0]) % 2 == 0:
            mid = int(len(crossf[0, 0]) / 2 - 1)
        else:
            mid = int(np.floor(len(crossf[0, 0]) / 2.))
        offset = self.tbin / self.T * \
            (self.rate + self.rate ** 2 * self.T * 1e-3)
        for i in range(Nloceff):
            # consistency check with auto-correlation function
            self.assertTrue(abs(np.sum(autof[i] - crossf[i, i])) < 1e-10)
            for j in range(Nloceff):
                if i != j:
                    # c(0) = corrcoef*rate+offset
                    self.assertTrue(
                        abs(crossf[i, j][mid] -
                            (self.cc * self.rate + offset)) <
                        (self.cc * self.rate + offset) * 1e-1)
                    # c(0)/a(0) = corrcoef
                    self.assertTrue(
                        abs((crossf[i, j][mid] - offset) / np.sqrt(
                            (crossf[i, i][mid] - offset) *
                            (crossf[j, j][mid] - offset)) -
                            self.cc) < self.cc * 5e-2)

        freq, power = ctana.powerspec(bsp, self.tbin, units=True)
        freq_cross, cross = ctana.crossspec(bsp, self.tbin, units=True)
        time, autof = ctana.autocorrfunc(freq, power)
        time_cross, crossf = ctana.crosscorrfunc(freq, cross)
        offset_auto = self.p * self.tbin / self.T * \
            (self.rate + self.rate ** 2 * self.T * 1e-3)
        offset_cross = 1. * Nloceff * \
            (Nloceff - 1) / Nloc / (Nloc - 1) * self.tbin / \
            self.T * (self.rate + self.rate ** 2 * self.T * 1e-3)
        # c(0) ~ self.p**2*corrcoef*rate+offset
        self.assertTrue(
            abs(crossf[mid] -
                (1. * Nloceff * (Nloceff - 1) / Nloc /
                 (Nloc - 1) * self.cc * self.rate + offset_cross)) <
            (1. * Nloceff * (Nloceff - 1) / Nloc /
             (Nloc - 1) * self.cc * self.rate + offset_cross) * 2e-1)

        # c(0)/a(0) = corrcoef
        self.assertTrue(
            abs((crossf[mid] - offset_cross) /
                (autof[mid] - offset_auto) - 1. * (Nloceff - 1.) /
                (Nloc - 1.) * self.cc) < 1. * (Nloceff - 1.) /
            (Nloc - 1.) * self.cc * 2e-1)
    def test_autocorrfunc(self):
        Nloc = int(0.1 * self.N)
        Nloceff = int(0.1 * self.Neff)
        # sp = cthlp.create_correlated_spiketrains_sip(
        # self.rate,self.T,Nloceff,self.cc)
        sp = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
        # sp = cthlp.create_gamma_spiketrains(self.rate,self.T,Nloceff,.5)
        sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
        # sp_srt = [np.arange(0,self.T,100.) for i in xrange(Nloceff)]
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)

        freq, power = ctana.powerspec(bsp, self.tbin)
        # freq_cross,cross = ctana.crossspec(bsp,self.tbin,units=True)
        time_auto, autof = ctana.autocorrfunc(freq, power)
        # time_cross,crossf = ctana.crosscorrfunc(freq_cross,cross)
        for i in range(Nloceff):
            if len(autof[i]) % 2 == 0:
                mid = int(len(autof[i]) / 2 - 1)
            else:
                mid = int(np.floor(len(autof[i]) / 2.))
            offset = self.tbin / self.T * \
                (self.rate + self.rate ** 2 * self.T * 1e-3)
            # a(0) == rate+offset
            self.assertTrue(
                abs(autof[i][mid] -
                    (self.rate + offset)) < (self.rate + offset) * 2e-1)
            # test offset (see notes)
            self.assertTrue(
                abs(np.mean(autof[i][:mid - 1]) - offset) < offset * 2e-1)
        freq, power = ctana.powerspec(bsp, self.tbin, units=True)
        time_auto, autof = ctana.autocorrfunc(freq, power)
        print(autof)
        if len(autof) % 2 == 0:
            mid = int(len(autof) / 2 - 1)
        else:
            mid = int(np.floor(len(autof) / 2.))
        offset = self.p * self.tbin / self.T * \
            (self.rate + self.rate ** 2 * self.T * 1e-3)
        # mean(a(0)) == p*rate+offset
        self.assertTrue(
            abs(autof[mid] - (self.p * self.rate + offset)) <
            (self.p * self.rate + offset) * 1e-1)
        # test offset (see notes)
        self.assertTrue(abs(np.mean(autof[:mid - 1]) - offset) < offset * 2e-1)
        # symmetry of autocorrelation function
        lim = int(np.floor(len(autof) / 4))
        print(autof[mid - lim + 1:mid], len(autof[mid - lim + 1:mid]))
        print((autof[mid + 1:mid + lim])[::-1],
              len(autof[mid + 1:mid + lim][::-1]))
        print(
            abs(
                np.sum(autof[mid - lim + 1:mid] -
                       (autof[mid + 1:mid + lim])[::-1])))
        self.assertTrue(
            abs(
                np.sum(autof[mid - lim + 1:mid] -
                       (autof[mid + 1:mid + lim])[::-1])) < 1e-12)
 def test_crossspec(self):
     # use less neurons (0.2*self.N) for full matrix (memory error!)
     Nloc = int(0.2 * self.N)
     Nloceff = int(0.2 * self.Neff)
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, Nloceff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     bsp = cthlp.centralize(bsp, time=True)
     freq_power, power = ctana.powerspec(bsp, self.tbin)
     freq_cross, cross = ctana.crossspec(bsp, self.tbin)
     self.assertEqual(len(freq_power), len(freq_cross))
     self.assertEqual(np.min(freq_power), np.min(freq_cross))
     self.assertEqual(np.max(freq_power), np.max(freq_cross))
     for i in xrange(Nloc):
         for j in xrange(Nloc):
             if i != j:
                 # poisson trains are uncorrelated
                 self.assertTrue(abs(np.mean(cross[i, j])) < 1e0)
             else:
                 # compare with auto spectra
                 self.assertTrue(
                     abs(np.mean(cross[i, i] - power[i])) < 1e-12)
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.N)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     bsp = cthlp.centralize(bsp, time=True)
     freq_cross, cross = ctana.crossspec(bsp, self.tbin, units=True)
     self.assertTrue(abs(np.mean(cross)) < 1e-2)
     freq_cross, cross = ctana.crossspec(
         bsp, self.tbin, Df=self.Df, units=True)
     self.assertTrue(self.Df <= freq_cross[1])
 def test_compound_powerspec(self):
     sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     bsp = cthlp.centralize(bsp, time=True)
     freq_alt, power_alt = ctana.powerspec(
         [np.sum(bsp, axis=0)], self.tbin, Df=self.Df)
     freq, power = ctana.compound_powerspec(bsp, self.tbin, Df=self.Df)
     self.assertEqual(len(freq_alt), len(freq))  # frequencies
     self.assertEqual(len(power_alt[0]), len(power))  # same number of bins
     # same spectra
     self.assertTrue(abs(np.sum(power_alt[0] - power)) < 1e-16)
    def test_powerspec(self):
        sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
        sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N)
        bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
        freq, power = ctana.powerspec(bsp, self.tbin)
        for i in range(self.N):
            # power(0) == 1./T*integral(s(t))**2 == 1./T*sum(s_binned)**2
            self.assertTrue(
                abs(power[i][0] - 1. / self.T * 1e3 *
                    (np.sum(bsp[i]))**2) < 1e-15)
        bsp = cthlp.centralize(bsp, time=True)
        freq, power = ctana.powerspec(bsp, self.tbin)
        for i in range(self.N):
            # power(0) == 0
            self.assertTrue(abs(power[i][0]) < 1e-15)

        auto = np.array([np.fft.ifft(x) for x in power])
        for i in range(self.N):
            if np.sum(power[i]) > 0.:
                # power == rate (flat spectrum for poisson with power == rate)
                self.assertTrue(
                    abs(np.mean(power[i]) - self.rate) < self.rate * 2e-1)
                # auto(t) = rate*delta(t)-(offset due to centralizing)
                self.assertTrue(abs(auto[i][0] - self.rate) < self.rate * 2e-1)
                # integral(auto(t)) == 0 (due to centralizing, delta is
                # canceled by offset)
                self.assertTrue(abs(np.sum(auto[i])) < 1e-11)

        freq, power = ctana.powerspec(bsp, self.tbin, Df=self.Df)
        # smallest frequency is larger than size of smoothing window
        self.assertTrue(self.Df <= freq[1])

        freq_units, power_units = ctana.powerspec(bsp,
                                                  self.tbin,
                                                  Df=self.Df,
                                                  units=True)
        # power_units should equal population averaged power spectrum
        self.assertTrue(
            abs(np.sum(power_units - np.mean(power, axis=0))) < 1e-10)
 def test_coherence(self):
     # use less neurons (0.2*self.N) for full matrix (memory error!)
     Nloc = int(0.15 * self.N)
     Nloceff = int(0.15 * self.Neff)
     sp = cthlp.create_correlated_spiketrains_sip(self.rate, self.T,
                                                  Nloceff, self.cc)
     sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, Nloc)
     bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
     # test all pairs of active neurons
     # TODO why does the result not depend on centralizing?
     # bsp = cthlp.centralize(bsp, time=True)
     freq_cross, cross = ctana.crossspec(bsp, self.tbin, Df=self.Df)
     df = freq_cross[1] - freq_cross[0]
     a_lfcoh = []
     for i in range(Nloc):
         for j in range(Nloc):
             if i != j:
                 if np.sum(cross[i, i]) > 0. and np.sum(cross[j, j]) > 0.:
                     lfcoh = np.mean((np.real(cross[i, j]) / np.sqrt(
                         cross[i, i] * cross[j, j]))[:int(self.fcut / df)])
                     a_lfcoh.append(lfcoh)
                     self.assertTrue(abs(lfcoh - self.cc) < self.cc * 4e-1)
                 else:
                     a_lfcoh.append(0.)
     # average correlation coefficient is p**2 smaller than correlation
     # between active neurons
     self.assertTrue(
         abs(np.mean(a_lfcoh) - self.p**2 * self.cc) < self.cc * 1e-1)
     # test coherence of population averaged signals
     # (careful with interpretation!)
     freq_power, power = ctana.powerspec(bsp,
                                         self.tbin,
                                         Df=self.Df,
                                         units=True)
     freq_cross, cross = ctana.crossspec(bsp,
                                         self.tbin,
                                         Df=self.Df,
                                         units=True)
     # make sure frequencies are the same for power and cross
     self.assertEqual(len(freq_power), len(freq_cross))
     self.assertEqual(np.min(freq_power), np.min(freq_cross))
     self.assertEqual(np.max(freq_power), np.max(freq_cross))
     df = freq_cross[1] - freq_cross[0]
     lfcoh = np.mean((cross / power)[:int(self.fcut / df)])
     # low frequency coherence should coincide with corrcoef
     self.assertTrue(abs(lfcoh - self.p * self.cc) < self.cc * 1e-1)