def rebin_several(self, df):
     """
     TODO: Not sure how to write tests for the rebin method!
     """
     ps = Powerspectrum(lc=self.lc, norm="Leahy")
     bin_ps = ps.rebin(df)
     assert np.isclose(bin_ps.freq[0], bin_ps.df, atol=1e-4, rtol=1e-4)
Exemplo n.º 2
0
 def test_classical_significances_trial_correction(self):
     ps = Powerspectrum(lc=self.lc, norm="leahy")
     # change the powers so that just one exceeds the threshold
     ps.power = np.zeros_like(ps.power) + 2.0
     index = 1
     ps.power[index] = 10.0
     threshold = 0.01
     pval = ps.classical_significances(threshold=threshold,
                                       trial_correction=True)
     assert np.size(pval) == 0
Exemplo n.º 3
0
    def test_fractional_rms_in_leahy_norm(self):
        """
        fractional rms should only be *approximately* equal the standard
        deviation divided by the mean of the light curve. Therefore, we allow
        for a larger tolerance in np.isclose()
        """
        ps = Powerspectrum(lc=self.lc, norm="Leahy")
        rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[0],
                                         max_freq=ps.freq[-1])

        rms_lc = np.std(self.lc.counts) / np.mean(self.lc.counts)
        assert np.isclose(rms_ps, rms_lc, atol=0.01)
    def test_rebin_makes_right_attributes(self):
        ps = Powerspectrum(lc=self.lc, norm="Leahy")
        # replace powers
        ps.ps = np.ones_like(ps.ps) * 2.0
        rebin_factor = 2.0
        bin_ps = ps.rebin(rebin_factor*ps.df)

        assert bin_ps.freq is not None
        assert bin_ps.ps is not None
        assert bin_ps.df == rebin_factor * 1.0 / self.lc.tseg
        assert bin_ps.norm.lower() == "leahy"
        assert bin_ps.m == 2
        assert bin_ps.n == self.lc.time.shape[0]
        assert bin_ps.nphots == np.sum(self.lc.counts)
Exemplo n.º 5
0
    def test_pvals_is_numpy_array(self):
        ps = Powerspectrum(lc=self.lc, norm="leahy")
        # change the powers so that just one exceeds the threshold
        ps.power = np.zeros_like(ps.power) + 2.0

        index = 1
        ps.power[index] = 10.0

        threshold = 1.0

        pval = ps.classical_significances(threshold=threshold,
                                          trial_correction=True)

        assert isinstance(pval, np.ndarray)
        assert pval.shape[0] == 2
    def test_classical_significances_threshold(self):
        ps = Powerspectrum(lc=self.lc, norm="leahy")

        # change the powers so that just one exceeds the threshold
        ps.ps = np.zeros(ps.ps.shape[0])+2.0

        index = 1
        ps.ps[index] = 10.0

        threshold = 0.01

        pval = ps.classical_significances(threshold=threshold,
                                          trial_correction=False)
        assert pval[0, 0] < threshold
        assert pval[1, 0] == index
Exemplo n.º 7
0
    def test_fractional_rms_in_frac_norm_is_consistent(self):
        time = np.arange(0, 100, 1) + 0.5

        poisson_counts = np.random.poisson(100.0,
                                           size=time.shape[0])

        lc = Lightcurve(time, counts=poisson_counts, dt=1,
                            gti=[[0, 100]])
        ps = Powerspectrum(lc=lc, norm="leahy")
        rms_ps_l, rms_err_l = ps.compute_rms(min_freq=ps.freq[1],
                                         max_freq=ps.freq[-1], white_noise_offset=0)

        ps = Powerspectrum(lc=lc, norm="frac")
        rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1],
                                         max_freq=ps.freq[-1], white_noise_offset=0)
        assert np.allclose(rms_ps, rms_ps_l, atol=0.01)
        assert np.allclose(rms_err, rms_err_l, atol=0.01)
    def test_compute_highest_outlier_works(self):

        mp_ind = 5
        max_power = 1000.0

        ps = Powerspectrum()
        ps.freq = np.arange(10)
        ps.power = np.ones_like(ps.freq)
        ps.power[mp_ind] = max_power
        ps.m = 1
        ps.df = ps.freq[1]-ps.freq[0]
        ps.norm = "leahy"

        model = models.Const1D()
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}

        lpost = PSDPosterior(ps.freq, ps.power, model, 1)
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        res = pe.fit(lpost, [1.0])

        res.mfit = np.ones_like(ps.freq)

        max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)

        assert np.isclose(max_y[0], 2*max_power)
        assert np.isclose(max_x[0], ps.freq[mp_ind])
        assert max_ind == mp_ind
    def setup_class(cls):
        m = 1
        nfreq = 100000
        freq = np.arange(nfreq)
        noise = np.random.exponential(size=nfreq)
        power = noise * 2.0

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude": p_amplitude}
        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
                                 m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
    def test_calibrate_highest_outlier_works_with_mvn(self):
        m = 1
        nfreq = 10000
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 10

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        pe = PSDParEst(ps)

        pval = pe.calibrate_highest_outlier(loglike, [2.0], sample=None,
                                            max_post=False, seed=seed,
                                            nsim=nsim)

        assert pval > 0.001
    def test_generate_data_produces_correct_distribution(self):
        model = models.Const1D()

        model.amplitude = 2.0

        p = model(self.ps.freq)

        seed = 100
        rng = np.random.RandomState(seed)

        noise = rng.exponential(size=len(p))
        power = noise*p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = 1
        ps.df = self.ps.freq[1]-self.ps.freq[0]
        ps.norm = "leahy"

        lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        pe = PSDParEst(ps)

        rng2 = np.random.RandomState(seed)
        sim_data = pe._generate_data(lpost, [2.0], rng2)

        assert np.allclose(ps.power, sim_data.power)
Exemplo n.º 12
0
    def setup_class(cls):

        cls.m = 10
        nfreq = 1000000
        freq = np.arange(nfreq)
        noise = scipy.stats.chi2(2.*cls.m).rvs(size=nfreq)/np.float(cls.m)
        power = noise

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = cls.m
        ps.df = freq[1]-freq[0]
        ps.norm = "leahy"


        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude":p_amplitude}
    def test_simulate_highest_outlier_works(self):
        m = 1
        nfreq = 100000
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 10

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        s_all = np.atleast_2d(np.ones(nsim) * 2.0).T

        pe = PSDParEst(ps)

        res = pe.fit(loglike, [2.0], neg=True)

        maxpow_sim = pe.simulate_highest_outlier(s_all, loglike, [2.0],
                                                 max_post=False, seed=seed)

        assert maxpow_sim.shape[0] == nsim
        assert np.all(maxpow_sim > 20.00) and np.all(maxpow_sim < 31.0)
    def setup_class(cls):
        m = 1
        nfreq = 100000
        freq = np.arange(nfreq)
        noise = np.random.exponential(size=nfreq)
        power = noise * 2.0

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude": p_amplitude}
        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
                                 cls.model, m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [2.0]
        cls.neg = True
        cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
                                          method=cls.fitmethod,
                                          args=cls.neg, tol=1.e-10)
    def test_calibrate_lrt_works_with_mvn(self):

        m = 1
        nfreq = 10000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)

        pe = PSDParEst(ps)

        pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
                                [2.0, 1.0, 2.0], sample=None,
                                max_post=False, nsim=10,
                                seed=100)

        assert pval > 0.001
    def test_calibrate_lrt_works_with_sampling(self):
        m = 1
        nfreq = 10000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
                amplitude)


        priors = {"amplitude": p_amplitude_1}

        priors2 = {"amplitude_1": p_amplitude_1,
                      "amplitude_0": p_amplitude_0,
                      "alpha_0": p_alpha_0}


        lpost.logprior = set_logprior(lpost, priors)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
        lpost2.logprior = set_logprior(lpost2, priors2)

        pe = PSDParEst(ps)

        pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
                                [2.0, 1.0, 2.0], sample=None,
                                max_post=True, nsim=10, nwalkers=100,
                                burnin=100, niter=20,
                                seed=100)

        assert pval > 0.001
        def setup_class(cls):
            m = 1
            nfreq = 100000
            freq = np.arange(nfreq)
            noise = np.random.exponential(size=nfreq)
            power = noise * 2.0

            ps = Powerspectrum()
            ps.freq = freq
            ps.power = power
            ps.m = m
            ps.df = freq[1] - freq[0]
            ps.norm = "leahy"

            cls.ps = ps
            cls.a_mean, cls.a_var = 2.0, 1.0

            cls.model = models.Const1D()

            p_amplitude = lambda amplitude: \
                scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
                    amplitude)

            cls.priors = {"amplitude": p_amplitude}
            cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
                                     cls.model, m=cls.ps.m)
            cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

            cls.fitmethod = "BFGS"
            cls.max_post = True
            cls.t0 = [2.0]
            cls.neg = True

            pe = ParameterEstimation()
            res = pe.fit(cls.lpost, cls.t0)

            cls.nwalkers = 100
            cls.niter = 200

            np.random.seed(200)
            p0 = np.array(
                [np.random.multivariate_normal(res.p_opt, res.cov) for
                 i in range(cls.nwalkers)])

            cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
                                                len(res.p_opt), cls.lpost,
                                                args=[False], threads=1)

            _, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
    def test_find_highest_outlier_works_as_expected(self):

        mp_ind = 5
        max_power = 1000.0

        ps = Powerspectrum()
        ps.freq = np.arange(10)
        ps.power = np.ones_like(ps.freq)
        ps.power[mp_ind] = max_power
        ps.m = 1
        ps.df = ps.freq[1]-ps.freq[0]
        ps.norm = "leahy"

        pe = PSDParEst(ps)

        max_x, max_ind = pe._find_outlier(ps.freq, ps.power, max_power)

        assert np.isclose(max_x, ps.freq[mp_ind])
        assert max_ind == mp_ind
    def test_plotfits_log_pow(self):
        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = self.ps.power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "none"
        pe = PSDParEst(ps)

        t0 = [2.0, 1, 1, 1]
        res = pe.fit(self.lpost, t0)

        pe.plotfits(res, res2=res, save_plot=True, log=True)

        assert os.path.exists("test_ps_fit.png")
        os.unlink("test_ps_fit.png")
    def test_fitting_with_ties_and_bounds(self, capsys):
        double_f = lambda model : model.x_0_0 * 2
        model = self.model.copy()
        model += models.Lorentz1D(amplitude=model.amplitude_0,
                                   x_0 = model.x_0_0 * 2,
                                   fwhm = model.fwhm_0)
        model.x_0_0 = self.model.x_0_0
        model.amplitude_0 = self.model.amplitude_0
        model.amplitude_1 = self.model.amplitude_1
        model.fwhm_0 = self.model.fwhm_0
        model.x_0_2.tied = double_f
        model.fwhm_0.bounds = [0, 10]
        model.amplitude_0.fixed = True

        p = model(self.ps.freq)

        noise = np.random.exponential(size=len(p))
        power = noise*p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "leahy"

        pe = PSDParEst(ps, fitmethod="TNC")
        llike = PSDLogLikelihood(ps.freq, ps.power, model)

        true_pars = [self.x_0_0, self.fwhm_0,
                     self.amplitude_1,
                     model.amplitude_2.value,
                     model.fwhm_2.value]

        res = pe.fit(llike, true_pars, neg=True)

        compare_pars = [self.x_0_0, self.fwhm_0,
                        self.amplitude_1,
                        model.amplitude_2.value,
                        model.fwhm_2.value]

        assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
    def test_calibrate_highest_outlier_works_with_sampling(self):
        m = 1
        nfreq = 100000
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 10

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        pval = pe.calibrate_highest_outlier(lpost, [2.0], sample=None,
                                            max_post=True, seed=seed,
                                            nsim=nsim, niter=20, nwalkers=100,
                                            burnin=100)

        assert pval > 0.001
    def test_compute_lrt_works(self):

        m = 1
        nfreq = 100000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        s_all = np.atleast_2d(np.ones(10) * 2.0).T

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)

        pe = PSDParEst(ps)

        lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
                                             [2.0, 1.0, 2.0], neg=True)
        lrt_sim = pe.simulate_lrts(s_all, loglike, [2.0], loglike2,
                                           [2.0, 1.0, 2.0],
                                           seed=100)

        assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
        assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
Exemplo n.º 23
0
 def test_init_with_nonsense_norm(self):
     nonsense_norm = "bla"
     with pytest.raises(ValueError):
         assert Powerspectrum(self.lc, norm=nonsense_norm)
Exemplo n.º 24
0
 def test_init_with_wrong_norm_type(self):
     nonsense_norm = 1.0
     with pytest.raises(TypeError):
         assert Powerspectrum(self.lc, norm=nonsense_norm)
 def test_classical_significances_fails_in_rms(self):
     ps = Powerspectrum(lc=self.lc, norm="rms")
     ps.classical_significances()
Exemplo n.º 26
0
def createPspec(lc):
    Powerspectrum(lc)
Exemplo n.º 27
0
 def test_periodogram_types(self):
     ps = Powerspectrum(lc=self.lc)
     assert isinstance(ps.freq, np.ndarray)
     assert isinstance(ps.power, np.ndarray)
Exemplo n.º 28
0
 def test_init_without_lightcurve(self):
     with pytest.raises(TypeError):
         assert Powerspectrum(self.lc.counts)
Exemplo n.º 29
0
 def test_classical_significances_runs(self):
     ps = Powerspectrum(lc=self.lc, norm="Leahy")
     ps.classical_significances()
Exemplo n.º 30
0
    def setup_class(cls):

        m = 1
        nfreq = 100000
        freq = np.linspace(1, 10.0, nfreq)

        rng = np.random.RandomState(
            100)  # set the seed for the random number generator
        noise = rng.exponential(size=nfreq)

        cls.model = models.Lorentz1D() + models.Const1D()

        cls.x_0_0 = 2.0
        cls.fwhm_0 = 0.1
        cls.amplitude_0 = 100.0

        cls.amplitude_1 = 2.0

        cls.model.x_0_0 = cls.x_0_0
        cls.model.fwhm_0 = cls.fwhm_0
        cls.model.amplitude_0 = cls.amplitude_0
        cls.model.amplitude_1 = cls.amplitude_1

        p = cls.model(freq)

        np.random.seed(400)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0
        cls.a2_mean, cls.a2_var = 100.0, 10.0

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        p_x_0_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_fwhm_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 0.5).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)

        cls.priors = {
            "amplitude_1": p_amplitude_1,
            "amplitude_0": p_amplitude_0,
            "x_0_0": p_x_0_0,
            "fwhm_0": p_fwhm_0
        }

        cls.lpost = PSDPosterior(cls.ps.freq,
                                 cls.ps.power,
                                 cls.model,
                                 m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
        cls.neg = True
Exemplo n.º 31
0
 def test_fractional_rms_in_frac_norm(self):
     ps = Powerspectrum(lc=self.lc, norm="frac")
     rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1],
                                      max_freq=ps.freq[-1])
     rms_lc = np.std(self.lc.counts) / np.mean(self.lc.counts)
     assert np.isclose(rms_ps, rms_lc, atol=0.01)
Exemplo n.º 32
0
 def test_fractional_rms_fails_when_rms_not_leahy(self):
     with pytest.raises(Exception):
         ps = Powerspectrum(lc=self.lc, norm="rms")
         rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[0],
                                          max_freq=ps.freq[-1])
    def setup_class(cls):

        m = 1
        nfreq = 100000
        freq = np.linspace(1, 10.0, nfreq)

        rng = np.random.RandomState(100)  # set the seed for the random number generator
        noise = rng.exponential(size=nfreq)

        cls.model = models.Lorentz1D() + models.Const1D()

        cls.x_0_0 = 2.0
        cls.fwhm_0 = 0.1
        cls.amplitude_0 = 100.0

        cls.amplitude_1 = 2.0

        cls.model.x_0_0 = cls.x_0_0
        cls.model.fwhm_0 = cls.fwhm_0
        cls.model.amplitude_0 = cls.amplitude_0
        cls.model.amplitude_1 = cls.amplitude_1

        p = cls.model(freq)

        np.random.seed(400)
        power = noise*p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1]-freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0
        cls.a2_mean, cls.a2_var = 100.0, 10.0

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        p_x_0_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_fwhm_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 0.5).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)

        cls.priors = {"amplitude_1": p_amplitude_1,
                      "amplitude_0": p_amplitude_0,
                      "x_0_0": p_x_0_0,
                      "fwhm_0": p_fwhm_0}

        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
                                 cls.model, m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
        cls.neg = True
Exemplo n.º 34
0
 def test_classical_significances_runs(self):
     ps = Powerspectrum(lc=self.lc, norm="Leahy")
     ps.classical_significances()
Exemplo n.º 35
0
 def test_classical_significances_fails_in_rms(self):
     ps = Powerspectrum(lc=self.lc, norm="frac")
     with pytest.raises(ValueError):
         ps.classical_significances()
Exemplo n.º 36
0
 def test_init_with_nonsense_data(self):
     nonsense_data = [None for i in range(100)]
     with pytest.raises(TypeError):
         assert Powerspectrum(nonsense_data)
Exemplo n.º 37
0
 def test_fractional_rms_fails_when_rms_not_leahy(self):
     with pytest.raises(Exception):
         ps = Powerspectrum(lc=self.lc, norm="rms")
         rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[0],
                                          max_freq=ps.freq[-1])
 def test_fractional_rms_in_rms_norm(self):
     ps = Powerspectrum(lc=self.lc, norm="rms")
     rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1],
                                      max_freq=ps.freq[-1])
     rms_lc = np.std(self.lc.counts) / np.mean(self.lc.counts)
     assert np.isclose(rms_ps, rms_lc, atol=0.01)
Exemplo n.º 39
0
 def test_classical_significances_fails_in_rms(self):
     ps = Powerspectrum(lc=self.lc, norm="frac")
     with pytest.raises(ValueError):
         ps.classical_significances()
Exemplo n.º 40
0
    def setup_class(cls):
        m = 1
        nfreq = 100000
        freq = np.linspace(1, 1000, nfreq)

        np.random.seed(100)  # set the seed for the random number generator
        noise = np.random.exponential(size=nfreq)

        cls.model = models.PowerLaw1D() + models.Const1D()
        cls.model.x_0_0.fixed = True

        cls.alpha_0 = 2.0
        cls.amplitude_0 = 100.0
        cls.amplitude_1 = 2.0

        cls.model.alpha_0 = cls.alpha_0
        cls.model.amplitude_0 = cls.amplitude_0
        cls.model.amplitude_1 = cls.amplitude_1

        p = cls.model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0
        cls.a2_mean, cls.a2_var = 100.0, 10.0

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(
                amplitude)

        cls.priors = {
            "amplitude_1": p_amplitude_1,
            "amplitude_0": p_amplitude_0,
            "alpha_0": p_alpha_0
        }

        cls.lpost = PSDPosterior(cls.ps.freq,
                                 cls.ps.power,
                                 cls.model,
                                 m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [cls.amplitude_0, cls.alpha_0, cls.amplitude_1]
        cls.neg = True
        cls.opt = scipy.optimize.minimize(cls.lpost,
                                          cls.t0,
                                          method=cls.fitmethod,
                                          args=cls.neg,
                                          tol=1.e-5)

        cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
                                                      cls.opt,
                                                      neg=True)
Exemplo n.º 41
0
    def test_classical_significances_with_logbinned_psd(self):
        ps = Powerspectrum(lc=self.lc, norm="leahy")
        ps_log = ps.rebin_log()
        pval = ps_log.classical_significances(threshold=1.1, trial_correction=False)

        assert len(pval[0]) == len(ps_log.power)
Exemplo n.º 42
0
 def test_init_with_lightcurve(self):
     assert Powerspectrum(self.lc)
Exemplo n.º 43
0
    def test_ccf(self):
        # to make testing faster, fitting is not done.
        ref_ps = Powerspectrum(self.ref_lc, norm='abs')

        ci_counts_0 = self.ci_counts[0]
        ci_times = np.arange(0, self.n_seconds * self.n_seg, self.dt)
        ci_lc = Lightcurve(ci_times, ci_counts_0, dt=self.dt)

        # rebinning factor used in `rebin_log`
        rebin_log_factor = 0.4

        acs = AveragedCrossspectrum(lc1=ci_lc, lc2=self.ref_lc,
                                    segment_size=self.n_seconds, norm='leahy',
                                    power_type="absolute")
        acs = acs.rebin_log(rebin_log_factor)

        # parest, res = fit_crossspectrum(acs, self.model, fitmethod="CG")
        acs_result_model = self.model

        # using optimal filter
        optimal_filter = Optimal1D(acs_result_model)
        optimal_filter_freq = optimal_filter(acs.freq)
        filtered_acs_power = optimal_filter_freq * np.abs(acs.power)

        # rebinning power spectrum
        new_df = spec.get_new_df(ref_ps, self.n_bins)
        ref_ps_rebinned = ref_ps.rebin(df=new_df)

        # parest, res = fit_powerspectrum(ref_ps_rebinned, self.model)
        ref_ps_rebinned_result_model = self.model

        # calculating rms from power spectrum
        ref_ps_rebinned_rms = spec.compute_rms(ref_ps_rebinned,
                                               ref_ps_rebinned_result_model,
                                               criteria="optimal")

        # calculating normalized ccf
        ccf_norm = spec.ccf(filtered_acs_power, ref_ps_rebinned_rms,
                            self.n_bins)

        # calculating ccf error
        meta = {'N_SEG': self.n_seg, 'NSECONDS': self.n_seconds, 'DT': self.dt,
                'N_BINS': self.n_bins}
        error_ccf, avg_seg_ccf = spec.ccf_error(self.ref_counts, ci_counts_0,
                                                acs_result_model,
                                                rebin_log_factor,
                                                meta, ref_ps_rebinned_rms,
                                                filter_type="optimal")

        assert np.all(np.isclose(ccf_norm, avg_seg_ccf, atol=0.01))
        assert np.all(np.isclose(error_ccf, np.zeros(shape=error_ccf.shape),
                                 atol=0.01))

        # using window function
        tophat_filter = Window1D(acs_result_model)
        tophat_filter_freq = tophat_filter(acs.freq)
        filtered_acs_power = tophat_filter_freq * np.abs(acs.power)

        ref_ps_rebinned_rms = spec.compute_rms(ref_ps_rebinned,
                                               ref_ps_rebinned_result_model,
                                               criteria="window")

        ccf_norm = spec.ccf(filtered_acs_power, ref_ps_rebinned_rms,
                            self.n_bins)

        error_ccf, avg_seg_ccf = spec.ccf_error(self.ref_counts, ci_counts_0,
                                                acs_result_model,
                                                rebin_log_factor,
                                                meta, ref_ps_rebinned_rms,
                                                filter_type="window")

        assert np.all(np.isclose(ccf_norm, avg_seg_ccf, atol=0.01))
        assert np.all(np.isclose(error_ccf, np.zeros(shape=error_ccf.shape),
                                 atol=0.01))
Exemplo n.º 44
0
    def test_ccf(self):
        # to make testing faster, fitting is not done.
        ref_ps = Powerspectrum(self.ref_lc, norm='abs')

        ci_counts_0 = self.ci_counts[0]
        ci_times = np.arange(0, self.n_seconds * self.n_seg, self.dt)
        ci_lc = Lightcurve(ci_times, ci_counts_0, dt=self.dt)

        # rebinning factor used in `rebin_log`
        rebin_log_factor = 0.4

        acs = AveragedCrossspectrum(lc1=ci_lc,
                                    lc2=self.ref_lc,
                                    segment_size=self.n_seconds,
                                    norm='leahy',
                                    power_type="absolute")
        acs = acs.rebin_log(rebin_log_factor)

        # parest, res = fit_crossspectrum(acs, self.model, fitmethod="CG")
        acs_result_model = self.model

        # using optimal filter
        optimal_filter = Optimal1D(acs_result_model)
        optimal_filter_freq = optimal_filter(acs.freq)
        filtered_acs_power = optimal_filter_freq * np.abs(acs.power)

        # rebinning power spectrum
        new_df = spec.get_new_df(ref_ps, self.n_bins)
        ref_ps_rebinned = ref_ps.rebin(df=new_df)

        # parest, res = fit_powerspectrum(ref_ps_rebinned, self.model)
        ref_ps_rebinned_result_model = self.model

        # calculating rms from power spectrum
        ref_ps_rebinned_rms = spec.compute_rms(ref_ps_rebinned,
                                               ref_ps_rebinned_result_model,
                                               criteria="optimal")

        # calculating normalized ccf
        ccf_norm = spec.ccf(filtered_acs_power, ref_ps_rebinned_rms,
                            self.n_bins)

        # calculating ccf error
        meta = {
            'N_SEG': self.n_seg,
            'NSECONDS': self.n_seconds,
            'DT': self.dt,
            'N_BINS': self.n_bins
        }
        error_ccf, avg_seg_ccf = spec.ccf_error(self.ref_counts,
                                                ci_counts_0,
                                                acs_result_model,
                                                rebin_log_factor,
                                                meta,
                                                ref_ps_rebinned_rms,
                                                filter_type="optimal")

        assert np.all(np.isclose(ccf_norm, avg_seg_ccf, atol=0.01))
        assert np.all(
            np.isclose(error_ccf, np.zeros(shape=error_ccf.shape), atol=0.01))

        # using window function
        tophat_filter = Window1D(acs_result_model)
        tophat_filter_freq = tophat_filter(acs.freq)
        filtered_acs_power = tophat_filter_freq * np.abs(acs.power)

        ref_ps_rebinned_rms = spec.compute_rms(ref_ps_rebinned,
                                               ref_ps_rebinned_result_model,
                                               criteria="window")

        ccf_norm = spec.ccf(filtered_acs_power, ref_ps_rebinned_rms,
                            self.n_bins)

        error_ccf, avg_seg_ccf = spec.ccf_error(self.ref_counts,
                                                ci_counts_0,
                                                acs_result_model,
                                                rebin_log_factor,
                                                meta,
                                                ref_ps_rebinned_rms,
                                                filter_type="window")

        assert np.all(np.isclose(ccf_norm, avg_seg_ccf, atol=0.01))
        assert np.all(
            np.isclose(error_ccf, np.zeros(shape=error_ccf.shape), atol=0.01))