Exemplo n.º 1
0
    def test_calibrate_lrt_works_with_mvn(self):

        m = 1
        nfreq = 10000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)

        pe = PSDParEst(ps)

        pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
                                [2.0, 1.0, 2.0], sample=None,
                                max_post=False, nsim=10,
                                seed=100)

        assert pval > 0.001
Exemplo n.º 2
0
    def setup_class(cls):

        cls.m = 10
        nfreq = 1000000
        freq = np.arange(nfreq)
        noise = scipy.stats.chi2(2.*cls.m).rvs(size=nfreq)/float(cls.m)
        power = noise

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = cls.m
        ps.df = freq[1]-freq[0]
        ps.norm = "leahy"


        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude":p_amplitude}
Exemplo n.º 3
0
    def setup_class(cls):

        cls.m = 10
        nfreq = 1000000
        freq = np.arange(nfreq)
        noise = scipy.stats.chi2(2.*cls.m).rvs(size=nfreq)/np.float(cls.m)
        power = noise

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = cls.m
        ps.df = freq[1]-freq[0]
        ps.norm = "leahy"


        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude":p_amplitude}
    def test_generate_data_produces_correct_distribution(self):
        model = models.Const1D()

        model.amplitude = 2.0

        p = model(self.ps.freq)

        seed = 100
        rng = np.random.RandomState(seed)

        noise = rng.exponential(size=len(p))
        power = noise*p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = 1
        ps.df = self.ps.freq[1]-self.ps.freq[0]
        ps.norm = "leahy"

        lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        pe = PSDParEst(ps)

        rng2 = np.random.RandomState(seed)
        sim_data = pe._generate_data(lpost, [2.0], rng2)

        assert np.allclose(ps.power, sim_data.power)
Exemplo n.º 5
0
def dask_fit_fourier_pl_c(power_spectrum):
    """
    Fits the power law + constant observation model

    Parameters
    ----------
    power_spectrum :

    Return
    ------

    """

    # Make the random data into a Powerspectrum object
    ps = Powerspectrum()
    ps.freq = power_spectrum[0]
    ps.power = power_spectrum[1]
    ps.df = ps.freq[1] - ps.freq[0]
    ps.m = 1

    # Define the log-likelihood of the data given the model
    loglike = PSDLogLikelihood(ps.freq, ps.power, observation_model, m=ps.m)
    # Parameter estimation object
    parameter_estimate = PSDParEst(ps, fitmethod="L-BFGS-B", max_post=False)

    # Estimate the starting parameters
    ipe = InitialParameterEstimatePlC(ps.freq, ps.power)
    return parameter_estimate.fit(loglike, [ipe.amplitude, ipe.index, ipe.background],
                                  scipy_optimize_options=scipy_optimize_options)
Exemplo n.º 6
0
    def setup_class(cls):
        m = 1
        nfreq = 100000
        freq = np.arange(nfreq)
        noise = np.random.exponential(size=nfreq)
        power = noise * 2.0

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude": p_amplitude}
        cls.lpost = PSDPosterior(cls.ps, cls.model)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [2.0]
        cls.neg = True
        cls.opt = scipy.optimize.minimize(cls.lpost,
                                          cls.t0,
                                          method=cls.fitmethod,
                                          args=cls.neg,
                                          tol=1.e-10)
Exemplo n.º 7
0
    def test_generate_data_produces_correct_distribution(self):
        model = models.Const1D()

        model.amplitude = 2.0

        p = model(self.ps.freq)

        seed = 100
        rng = np.random.RandomState(seed)

        noise = rng.exponential(size=len(p))
        power = noise*p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = 1
        ps.df = self.ps.freq[1]-self.ps.freq[0]
        ps.norm = "leahy"

        lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        pe = PSDParEst(ps)

        rng2 = np.random.RandomState(seed)
        sim_data = pe._generate_data(lpost, [2.0], rng2)

        assert np.allclose(ps.power, sim_data.power)
Exemplo n.º 8
0
    def setup_class(cls):
        np.random.seed(100)
        m = 1
        nfreq = 100
        freq = np.arange(nfreq)
        noise = np.random.exponential(size=nfreq)
        power = noise * 2.0

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude": p_amplitude}
        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
                                 m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
Exemplo n.º 9
0
    def test_compute_highest_outlier_works(self):

        mp_ind = 5
        max_power = 1000.0

        ps = Powerspectrum()
        ps.freq = np.arange(10)
        ps.power = np.ones_like(ps.freq)
        ps.power[mp_ind] = max_power
        ps.m = 1
        ps.df = ps.freq[1]-ps.freq[0]
        ps.norm = "leahy"

        model = models.Const1D()
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}

        lpost = PSDPosterior(ps.freq, ps.power, model, 1)
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        res = pe.fit(lpost, [1.0])

        res.mfit = np.ones_like(ps.freq)

        max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)

        assert np.isclose(max_y[0], 2*max_power)
        assert np.isclose(max_x[0], ps.freq[mp_ind])
        assert max_ind == mp_ind
    def setup_class(cls):
        m = 1
        nfreq = 100000
        freq = np.arange(nfreq)
        noise = np.random.exponential(size=nfreq)
        power = noise * 2.0

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude": p_amplitude}
        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
                                 m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
Exemplo n.º 11
0
    def test_simulate_highest_outlier_works(self):
        m = 1
        nfreq = 100
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 5

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        s_all = np.atleast_2d(np.ones(nsim) * 2.0).T

        pe = PSDParEst(ps)

        maxpow_sim = pe.simulate_highest_outlier(s_all, loglike, [2.0],
                                                 max_post=False, seed=seed)

        assert maxpow_sim.shape[0] == nsim
        assert np.all(maxpow_sim > 9.00) and np.all(maxpow_sim < 31.0)
    def setup_class(cls):
        m = 1
        nfreq = 100000
        freq = np.arange(nfreq)
        noise = np.random.exponential(size=nfreq)
        power = noise * 2.0

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude": p_amplitude}
        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
                                 cls.model, m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [2.0]
        cls.neg = True
        cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
                                          method=cls.fitmethod,
                                          args=cls.neg, tol=1.e-10)
Exemplo n.º 13
0
    def test_calibrate_highest_outlier_works_with_mvn(self):
        m = 1
        nfreq = 10000
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 10

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        pe = PSDParEst(ps)

        pval = pe.calibrate_highest_outlier(loglike, [2.0], sample=None,
                                            max_post=False, seed=seed,
                                            nsim=nsim)

        assert pval > 0.001
    def test_simulate_highest_outlier_works(self):
        m = 1
        nfreq = 100000
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 10

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        s_all = np.atleast_2d(np.ones(nsim) * 2.0).T

        pe = PSDParEst(ps)

        res = pe.fit(loglike, [2.0], neg=True)

        maxpow_sim = pe.simulate_highest_outlier(s_all, loglike, [2.0],
                                                 max_post=False, seed=seed)

        assert maxpow_sim.shape[0] == nsim
        assert np.all(maxpow_sim > 20.00) and np.all(maxpow_sim < 31.0)
    def test_compute_highest_outlier_works(self):

        mp_ind = 5
        max_power = 1000.0

        ps = Powerspectrum()
        ps.freq = np.arange(10)
        ps.power = np.ones_like(ps.freq)
        ps.power[mp_ind] = max_power
        ps.m = 1
        ps.df = ps.freq[1]-ps.freq[0]
        ps.norm = "leahy"

        model = models.Const1D()
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}

        lpost = PSDPosterior(ps.freq, ps.power, model, 1)
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        res = pe.fit(lpost, [1.0])

        res.mfit = np.ones_like(ps.freq)

        max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)

        assert np.isclose(max_y[0], 2*max_power)
        assert np.isclose(max_x[0], ps.freq[mp_ind])
        assert max_ind == mp_ind
Exemplo n.º 16
0
    def test_calibrate_lrt_works_with_sampling(self):
        m = 1
        nfreq = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude_1}

        priors2 = {
            "amplitude_1": p_amplitude_1,
            "amplitude_0": p_amplitude_0,
            "alpha_0": p_alpha_0
        }

        lpost.logprior = set_logprior(lpost, priors)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
        lpost2.logprior = set_logprior(lpost2, priors2)

        pe = PSDParEst(ps)

        with catch_warnings(RuntimeWarning):
            pval = pe.calibrate_lrt(lpost, [2.0],
                                    lpost2, [2.0, 1.0, 2.0],
                                    sample=None,
                                    max_post=True,
                                    nsim=10,
                                    nwalkers=10,
                                    burnin=10,
                                    niter=10,
                                    seed=100)

        assert pval > 0.001
Exemplo n.º 17
0
        def setup_class(cls):
            m = 1
            nfreq = 100
            freq = np.arange(nfreq)
            noise = np.random.exponential(size=nfreq)
            power = noise * 2.0

            ps = Powerspectrum()
            ps.freq = freq
            ps.power = power
            ps.m = m
            ps.df = freq[1] - freq[0]
            ps.norm = "leahy"

            cls.ps = ps
            cls.a_mean, cls.a_var = 2.0, 1.0

            cls.model = models.Const1D()

            p_amplitude = lambda amplitude: \
                scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
                    amplitude)

            cls.priors = {"amplitude": p_amplitude}
            cls.lpost = PSDPosterior(cls.ps.freq,
                                     cls.ps.power,
                                     cls.model,
                                     m=cls.ps.m)
            cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

            cls.fitmethod = "BFGS"
            cls.max_post = True
            cls.t0 = [2.0]
            cls.neg = True

            pe = ParameterEstimation()
            res = pe.fit(cls.lpost, cls.t0)

            cls.nwalkers = 50
            cls.niter = 100

            np.random.seed(200)
            p0 = np.array([
                np.random.multivariate_normal(res.p_opt, res.cov)
                for i in range(cls.nwalkers)
            ])

            cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
                                                len(res.p_opt),
                                                cls.lpost,
                                                args=[False],
                                                threads=1)

            with catch_warnings(RuntimeWarning):
                _, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
    def test_calibrate_lrt_works_with_sampling(self):
        m = 1
        nfreq = 10000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
                amplitude)


        priors = {"amplitude": p_amplitude_1}

        priors2 = {"amplitude_1": p_amplitude_1,
                      "amplitude_0": p_amplitude_0,
                      "alpha_0": p_alpha_0}


        lpost.logprior = set_logprior(lpost, priors)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
        lpost2.logprior = set_logprior(lpost2, priors2)

        pe = PSDParEst(ps)

        pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
                                [2.0, 1.0, 2.0], sample=None,
                                max_post=True, nsim=10, nwalkers=100,
                                burnin=100, niter=20,
                                seed=100)

        assert pval > 0.001
        def setup_class(cls):
            m = 1
            nfreq = 100000
            freq = np.arange(nfreq)
            noise = np.random.exponential(size=nfreq)
            power = noise * 2.0

            ps = Powerspectrum()
            ps.freq = freq
            ps.power = power
            ps.m = m
            ps.df = freq[1] - freq[0]
            ps.norm = "leahy"

            cls.ps = ps
            cls.a_mean, cls.a_var = 2.0, 1.0

            cls.model = models.Const1D()

            p_amplitude = lambda amplitude: \
                scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
                    amplitude)

            cls.priors = {"amplitude": p_amplitude}
            cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
                                     cls.model, m=cls.ps.m)
            cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

            cls.fitmethod = "BFGS"
            cls.max_post = True
            cls.t0 = [2.0]
            cls.neg = True

            pe = ParameterEstimation()
            res = pe.fit(cls.lpost, cls.t0)

            cls.nwalkers = 100
            cls.niter = 200

            np.random.seed(200)
            p0 = np.array(
                [np.random.multivariate_normal(res.p_opt, res.cov) for
                 i in range(cls.nwalkers)])

            cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
                                                len(res.p_opt), cls.lpost,
                                                args=[False], threads=1)

            _, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
Exemplo n.º 20
0
    def test_fitting_with_ties_and_bounds(self, capsys):
        double_f = lambda model: model.x_0_0 * 2
        model = self.model.copy()
        model = self.model + models.Lorentz1D(amplitude=model.amplitude_0,
                                              x_0=model.x_0_0 * 2,
                                              fwhm=model.fwhm_0)
        model.x_0_0 = self.model.x_0_0
        model.amplitude_0 = self.model.amplitude_0
        model.amplitude_1 = self.model.amplitude_1
        model.fwhm_0 = self.model.fwhm_0
        model.x_0_2.tied = double_f
        model.fwhm_0.bounds = [0, 10]
        model.amplitude_0.fixed = True

        p = model(self.ps.freq)

        noise = np.random.exponential(size=len(p))
        power = noise * p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "leahy"

        pe = PSDParEst(ps)
        llike = PSDLogLikelihood(ps.freq, ps.power, model)

        true_pars = [
            self.amplitude_0, self.x_0_0, self.fwhm_0, self.amplitude_1,
            model.amplitude_2.value, model.x_0_2.value, model.fwhm_2.value
        ]
        res = pe.fit(llike, true_pars)

        res.print_summary(llike)
        out, err = capsys.readouterr()
        assert "100.00000            (Fixed)" in out
        pattern = \
            re.compile(r"5\) Parameter x_0_2\s+: [0-9]\.[0-9]{5}\s+\(Tied\)")
        assert pattern.search(out)

        compare_pars = [
            self.x_0_0, self.fwhm_0, self.amplitude_1, model.amplitude_2.value,
            model.fwhm_2.value
        ]

        assert np.all(np.isclose(compare_pars, res.p_opt, rtol=0.5))
Exemplo n.º 21
0
    def test_plotfits_log_pow(self):
        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = self.ps.power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "none"
        pe = PSDParEst(ps)

        t0 = [2.0, 1, 1, 1]
        res = pe.fit(self.lpost, t0)

        pe.plotfits(res, res2=res, save_plot=True, log=True)

        assert os.path.exists("test_ps_fit.png")
        os.unlink("test_ps_fit.png")
Exemplo n.º 22
0
    def setup_class(cls):
        np.random.seed(1000)
        m = 1
        nfreq = 100
        freq = np.arange(nfreq)
        noise = np.random.exponential(size=nfreq)
        power = noise * 2.0

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.n = freq.shape[0]
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0

        cls.model = models.Const1D()

        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        cls.priors = {"amplitude": p_amplitude}
        cls.lpost = PSDPosterior(cls.ps.freq,
                                 cls.ps.power,
                                 cls.model,
                                 m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "powell"
        cls.max_post = True
        cls.t0 = np.array([2.0])
        cls.neg = True

        cls.opt = scipy.optimize.minimize(cls.lpost,
                                          cls.t0,
                                          method=cls.fitmethod,
                                          args=cls.neg,
                                          tol=1.e-10)

        cls.opt.x = np.atleast_1d(cls.opt.x)
        cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
                                                      cls.opt,
                                                      neg=True)
Exemplo n.º 23
0
    def test_plotfits_pow(self):
        t0 = [2.0, 1, 1, 1]
        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = self.ps.power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "none"
        pe = PSDParEst(ps)
        lpost = PSDPosterior(self.ps, self.model, self.priors)

        res = pe.fit(self.lpost, t0)

        pe.plotfits(res, res2=res, save_plot=True)

        assert os.path.exists("test_ps_fit.png")
        os.unlink("test_ps_fit.png")
Exemplo n.º 24
0
    def test_fitting_with_ties_and_bounds(self, capsys, rebin):
        double_f = lambda model : model.x_0_0 * 2
        model = self.model.copy()
        model += models.Lorentz1D(amplitude=model.amplitude_0,
                                   x_0 = model.x_0_0 * 2,
                                   fwhm = model.fwhm_0)
        model.x_0_0 = self.model.x_0_0
        model.amplitude_0 = self.model.amplitude_0
        model.amplitude_1 = self.model.amplitude_1
        model.fwhm_0 = self.model.fwhm_0
        model.x_0_2.tied = double_f
        model.fwhm_0.bounds = [0, 10]
        model.amplitude_0.fixed = True

        p = model(self.ps.freq)

        noise = np.random.exponential(size=len(p))
        power = noise*p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "leahy"

        if rebin != 0:
            ps = ps.rebin_log(rebin)

        pe = PSDParEst(ps, fitmethod="TNC")
        llike = PSDLogLikelihood(ps.freq, ps.power, model)

        true_pars = [self.x_0_0, self.fwhm_0,
                     self.amplitude_1,
                     model.amplitude_2.value,
                     model.fwhm_2.value]

        res = pe.fit(llike, true_pars, neg=True)

        compare_pars = [self.x_0_0, self.fwhm_0,
                        self.amplitude_1,
                        model.amplitude_2.value,
                        model.fwhm_2.value]

        assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
Exemplo n.º 25
0
    def test_find_highest_outlier_works_as_expected(self):

        mp_ind = 5
        max_power = 1000.0

        ps = Powerspectrum()
        ps.freq = np.arange(10)
        ps.power = np.ones_like(ps.freq)
        ps.power[mp_ind] = max_power
        ps.m = 1
        ps.df = ps.freq[1]-ps.freq[0]
        ps.norm = "leahy"

        pe = PSDParEst(ps)

        max_x, max_ind = pe._find_outlier(ps.freq, ps.power, max_power)

        assert np.isclose(max_x, ps.freq[mp_ind])
        assert max_ind == mp_ind
    def test_find_highest_outlier_works_as_expected(self):

        mp_ind = 5
        max_power = 1000.0

        ps = Powerspectrum()
        ps.freq = np.arange(10)
        ps.power = np.ones_like(ps.freq)
        ps.power[mp_ind] = max_power
        ps.m = 1
        ps.df = ps.freq[1]-ps.freq[0]
        ps.norm = "leahy"

        pe = PSDParEst(ps)

        max_x, max_ind = pe._find_outlier(ps.freq, ps.power, max_power)

        assert np.isclose(max_x, ps.freq[mp_ind])
        assert max_ind == mp_ind
    def test_fitting_with_ties_and_bounds(self, capsys):
        double_f = lambda model : model.x_0_0 * 2
        model = self.model.copy()
        model += models.Lorentz1D(amplitude=model.amplitude_0,
                                   x_0 = model.x_0_0 * 2,
                                   fwhm = model.fwhm_0)
        model.x_0_0 = self.model.x_0_0
        model.amplitude_0 = self.model.amplitude_0
        model.amplitude_1 = self.model.amplitude_1
        model.fwhm_0 = self.model.fwhm_0
        model.x_0_2.tied = double_f
        model.fwhm_0.bounds = [0, 10]
        model.amplitude_0.fixed = True

        p = model(self.ps.freq)

        noise = np.random.exponential(size=len(p))
        power = noise*p

        ps = Powerspectrum()
        ps.freq = self.ps.freq
        ps.power = power
        ps.m = self.ps.m
        ps.df = self.ps.df
        ps.norm = "leahy"

        pe = PSDParEst(ps, fitmethod="TNC")
        llike = PSDLogLikelihood(ps.freq, ps.power, model)

        true_pars = [self.x_0_0, self.fwhm_0,
                     self.amplitude_1,
                     model.amplitude_2.value,
                     model.fwhm_2.value]

        res = pe.fit(llike, true_pars, neg=True)

        compare_pars = [self.x_0_0, self.fwhm_0,
                        self.amplitude_1,
                        model.amplitude_2.value,
                        model.fwhm_2.value]

        assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
Exemplo n.º 28
0
    def test_calibrate_highest_outlier_works_with_sampling(self):
        m = 1
        nfreq = 100
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 5

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        with catch_warnings(RuntimeWarning):
            pval = pe.calibrate_highest_outlier(lpost, [2.0],
                                                sample=None,
                                                max_post=True,
                                                seed=seed,
                                                nsim=nsim,
                                                niter=10,
                                                nwalkers=20,
                                                burnin=10)

        assert pval > 0.001
Exemplo n.º 29
0
    def test_compute_lrt_works(self):

        m = 1
        nfreq = 100000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        s_all = np.atleast_2d(np.ones(10) * 2.0).T

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)

        pe = PSDParEst(ps)

        lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0],
                                             loglike2, [2.0, 1.0, 2.0],
                                             neg=True)
        lrt_sim = pe.simulate_lrts(s_all,
                                   loglike, [2.0],
                                   loglike2, [2.0, 1.0, 2.0],
                                   max_post=False,
                                   seed=100)

        assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
        assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
    def test_compute_lrt_works(self):

        m = 1
        nfreq = 100000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        s_all = np.atleast_2d(np.ones(10) * 2.0).T

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)

        pe = PSDParEst(ps)

        lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
                                             [2.0, 1.0, 2.0], neg=True)
        lrt_sim = pe.simulate_lrts(s_all, loglike, [2.0], loglike2,
                                           [2.0, 1.0, 2.0],
                                           seed=100)

        assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
        assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
    def test_calibrate_highest_outlier_works_with_sampling(self):
        m = 1
        nfreq = 100000
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 10

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        pval = pe.calibrate_highest_outlier(lpost, [2.0], sample=None,
                                            max_post=True, seed=seed,
                                            nsim=nsim, niter=20, nwalkers=100,
                                            burnin=100)

        assert pval > 0.001
Exemplo n.º 32
0
    def test_calibrate_lrt_works_as_expected(self):

        m = 1
        df = 0.01
        freq = np.arange(df, 5 + df, df)
        nfreq = freq.size
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = df
        ps.norm = "leahy"

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        s_all = np.atleast_2d(np.ones(10) * 2.0).T

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, m=1)

        pe = PSDParEst(ps)

        pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
                                [2.0, 1.0, 2.0], sample=s_all,
                                max_post=False, nsim=5,
                                seed=100)

        assert pval > 0.001
Exemplo n.º 33
0
plc = pl + c

# parameters for fake data.
alpha = 2.0
amplitude = 5.0
white_noise = 2.0
freq = np.linspace(0.01, 10.0, int(10.0 / 0.01))
from astropy.modeling.fitting import _fitter_to_model_params
_fitter_to_model_params(plc, [amplitude, alpha, white_noise])
psd_shape = plc(freq)
powers = psd_shape * np.random.chisquare(2, size=psd_shape.shape[0]) / 2.0

ps = Powerspectrum()
ps.freq = freq
ps.power = powers
ps.df = ps.freq[1] - ps.freq[0]
ps.m = 1
loglike = PSDLogLikelihood(ps.freq, ps.power, plc, m=ps.m)
test_pars = [1, 5, 100]
parest = PSDParEst(ps, fitmethod="L-BFGS-B", max_post=True)

# flat prior for the power law index
p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))

# flat prior for the power law amplitude
p_amplitude = lambda amplitude: ((0.01 <= amplitude) & (amplitude <= 10.0))

# normal prior for the white noise parameter
p_whitenoise = lambda white_noise: scipy.stats.norm(2.0, 0.1).pdf(white_noise)

priors = {}
    def setup_class(cls):
        m = 1
        nfreq = 100000
        freq = np.linspace(1, 1000, nfreq)

        np.random.seed(100)  # set the seed for the random number generator
        noise = np.random.exponential(size=nfreq)

        cls.model = models.PowerLaw1D() + models.Const1D()
        cls.model.x_0_0.fixed = True

        cls.alpha_0 = 2.0
        cls.amplitude_0 = 100.0
        cls.amplitude_1 = 2.0

        cls.model.alpha_0 = cls.alpha_0
        cls.model.amplitude_0 = cls.amplitude_0
        cls.model.amplitude_1 = cls.amplitude_1

        p = cls.model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0
        cls.a2_mean, cls.a2_var = 100.0, 10.0

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(
                amplitude)

        cls.priors = {"amplitude_1": p_amplitude_1,
                      "amplitude_0": p_amplitude_0,
                      "alpha_0": p_alpha_0}

        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
                                 cls.model, m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [cls.amplitude_0, cls.alpha_0, cls.amplitude_1]
        cls.neg = True
        cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
                                          method=cls.fitmethod,
                                          args=cls.neg, tol=1.e-5)

        cls.optres = OptimizationResultsSubclassDummy(cls.lpost, cls.opt,
                                                      neg=True)
Exemplo n.º 35
0
    def setup_class(cls):
        m = 1
        nfreq = 100000
        freq = np.linspace(1, 1000, nfreq)

        np.random.seed(100)  # set the seed for the random number generator
        noise = np.random.exponential(size=nfreq)

        cls.model = models.PowerLaw1D() + models.Const1D()
        cls.model.x_0_0.fixed = True

        cls.alpha_0 = 2.0
        cls.amplitude_0 = 100.0
        cls.amplitude_1 = 2.0

        cls.model.alpha_0 = cls.alpha_0
        cls.model.amplitude_0 = cls.amplitude_0
        cls.model.amplitude_1 = cls.amplitude_1

        p = cls.model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0
        cls.a2_mean, cls.a2_var = 100.0, 10.0

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(
                amplitude)

        cls.priors = {
            "amplitude_1": p_amplitude_1,
            "amplitude_0": p_amplitude_0,
            "alpha_0": p_alpha_0
        }

        cls.lpost = PSDPosterior(cls.ps, cls.model)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [cls.amplitude_0, cls.alpha_0, cls.amplitude_1]
        cls.neg = True
        cls.opt = scipy.optimize.minimize(cls.lpost,
                                          cls.t0,
                                          method=cls.fitmethod,
                                          args=cls.neg,
                                          tol=1.e-5)

        cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
                                                      cls.opt,
                                                      neg=True)
    def setup_class(cls):

        m = 1
        nfreq = 100000
        freq = np.linspace(1, 10.0, nfreq)

        rng = np.random.RandomState(100)  # set the seed for the random number generator
        noise = rng.exponential(size=nfreq)

        cls.model = models.Lorentz1D() + models.Const1D()

        cls.x_0_0 = 2.0
        cls.fwhm_0 = 0.1
        cls.amplitude_0 = 100.0

        cls.amplitude_1 = 2.0

        cls.model.x_0_0 = cls.x_0_0
        cls.model.fwhm_0 = cls.fwhm_0
        cls.model.amplitude_0 = cls.amplitude_0
        cls.model.amplitude_1 = cls.amplitude_1

        p = cls.model(freq)

        np.random.seed(400)
        power = noise*p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1]-freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0
        cls.a2_mean, cls.a2_var = 100.0, 10.0

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        p_x_0_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_fwhm_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 0.5).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)

        cls.priors = {"amplitude_1": p_amplitude_1,
                      "amplitude_0": p_amplitude_0,
                      "x_0_0": p_x_0_0,
                      "fwhm_0": p_fwhm_0}

        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
                                 cls.model, m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
        cls.neg = True
Exemplo n.º 37
0
    def setup_class(cls):

        m = 1
        nfreq = 100
        freq = np.linspace(0, 10.0, nfreq + 1)[1:]


        rng = np.random.RandomState(100)  # set the seed for the random number generator
        noise = rng.exponential(size=nfreq)

        cls.model = models.Lorentz1D() + models.Const1D()

        cls.x_0_0 = 2.0
        cls.fwhm_0 = 0.05
        cls.amplitude_0 = 1000.0

        cls.amplitude_1 = 2.0
        cls.model.x_0_0 = cls.x_0_0
        cls.model.fwhm_0 = cls.fwhm_0
        cls.model.amplitude_0 = cls.amplitude_0
        cls.model.amplitude_1 = cls.amplitude_1

        p = cls.model(freq)

        np.random.seed(400)
        power = noise*p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1]-freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0
        cls.a2_mean, cls.a2_var = 100.0, 10.0

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        p_x_0_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_fwhm_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 0.5).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)

        cls.priors = {"amplitude_1": p_amplitude_1,
                      "amplitude_0": p_amplitude_0,
                      "x_0_0": p_x_0_0,
                      "fwhm_0": p_fwhm_0}

        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
                                 cls.model, m=cls.ps.m)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "powell"
        cls.max_post = True
        cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
        cls.neg = True