def test_calibrate_lrt_works_with_sampling(self):
        m = 1
        nfreq = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude_1}

        priors2 = {
            "amplitude_1": p_amplitude_1,
            "amplitude_0": p_amplitude_0,
            "alpha_0": p_alpha_0
        }

        lpost.logprior = set_logprior(lpost, priors)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
        lpost2.logprior = set_logprior(lpost2, priors2)

        pe = PSDParEst(ps)

        with catch_warnings(RuntimeWarning):
            pval = pe.calibrate_lrt(lpost, [2.0],
                                    lpost2, [2.0, 1.0, 2.0],
                                    sample=None,
                                    max_post=True,
                                    nsim=10,
                                    nwalkers=10,
                                    burnin=10,
                                    niter=10,
                                    seed=100)

        assert pval > 0.001
Example #2
0
    def test_logprior_fails_without_prior(self):
        lpost = PSDPosterior(self.ps.freq,
                             self.ps.power,
                             self.model,
                             m=self.ps.m)

        with pytest.raises(AttributeError):
            lpost.logprior([1])
Example #3
0
    def test_logprior(self):
        t0 = [2.0]

        lpost = PSDPosterior(self.ps, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        lp_test = lpost.logprior(t0)
        lp = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
        assert lp == lp_test
Example #4
0
    def test_logprior(self):
        t0 = [2.0]

        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        lp_test = lpost.logprior(t0)
        lp = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
        assert lp == lp_test
    def test_calibrate_lrt_works_with_sampling(self):
        m = 1
        nfreq = 10000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
                amplitude)


        priors = {"amplitude": p_amplitude_1}

        priors2 = {"amplitude_1": p_amplitude_1,
                      "amplitude_0": p_amplitude_0,
                      "alpha_0": p_alpha_0}


        lpost.logprior = set_logprior(lpost, priors)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
        lpost2.logprior = set_logprior(lpost2, priors2)

        pe = PSDParEst(ps)

        pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
                                [2.0, 1.0, 2.0], sample=None,
                                max_post=True, nsim=10, nwalkers=100,
                                burnin=100, niter=20,
                                seed=100)

        assert pval > 0.001
    def test_compute_highest_outlier_works(self):

        mp_ind = 5
        max_power = 1000.0

        ps = Powerspectrum()
        ps.freq = np.arange(10)
        ps.power = np.ones_like(ps.freq)
        ps.power[mp_ind] = max_power
        ps.m = 1
        ps.df = ps.freq[1]-ps.freq[0]
        ps.norm = "leahy"

        model = models.Const1D()
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}

        lpost = PSDPosterior(ps.freq, ps.power, model, 1)
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        res = pe.fit(lpost, [1.0])

        res.mfit = np.ones_like(ps.freq)

        max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)

        assert np.isclose(max_y[0], 2*max_power)
        assert np.isclose(max_x[0], ps.freq[mp_ind])
        assert max_ind == mp_ind
Example #7
0
    def test_making_posterior(self):
        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        assert lpost.x.all() == self.ps.freq.all()
        assert lpost.y.all() == self.ps.power.all()
Example #8
0
    def test_correct_number_of_parameters(self):
        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        with pytest.raises(IncorrectParameterError):
            lpost([2,3])
Example #9
0
    def test_correct_number_of_parameters(self):
        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        with pytest.raises(IncorrectParameterError):
            lpost([2,3])
Example #10
0
    def test_making_posterior(self):
        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        assert lpost.x.all() == self.ps.freq.all()
        assert lpost.y.all() == self.ps.power.all()
Example #11
0
    def test_compute_highest_outlier_works(self):

        mp_ind = 5
        max_power = 1000.0

        ps = Powerspectrum()
        ps.freq = np.arange(10)
        ps.power = np.ones_like(ps.freq)
        ps.power[mp_ind] = max_power
        ps.m = 1
        ps.df = ps.freq[1]-ps.freq[0]
        ps.norm = "leahy"

        model = models.Const1D()
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}

        lpost = PSDPosterior(ps.freq, ps.power, model, 1)
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        res = pe.fit(lpost, [1.0])

        res.mfit = np.ones_like(ps.freq)

        max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)

        assert np.isclose(max_y[0], 2*max_power)
        assert np.isclose(max_x[0], ps.freq[mp_ind])
        assert max_ind == mp_ind
Example #12
0
    def test_negative_loglikelihood(self):
        t0 = [2.0]
        m = self.model(self.ps.freq[1:], t0)
        loglike = np.sum(self.ps.power[1:]/m + np.log(m))

        lpost = PSDPosterior(self.ps, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        loglike_test = lpost.loglikelihood(t0, neg=True)

        assert np.isclose(loglike, loglike_test)
Example #13
0
    def test_counts_are_nan(self):
        y = np.nan * np.ones_like(self.ps.freq)

        ps_nan = copy.copy(self.ps)
        ps_nan.power = np.nan*np.ones_like(self.ps.freq)

        t0 = [2.0]
        m = self.model(self.ps.freq[1:], t0)
        lpost = PSDPosterior(ps_nan.freq, ps_nan.power, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        assert np.isclose(lpost(t0), logmin, 1e-5)
Example #14
0
    def test_counts_are_nan(self):
        y = np.nan * np.ones_like(self.ps.freq)

        ps_nan = copy.copy(self.ps)
        ps_nan.power = np.nan * np.ones_like(self.ps.freq)

        t0 = [2.0]
        m = self.model(self.ps.freq[1:], t0)
        lpost = PSDPosterior(ps_nan.freq, ps_nan.power, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        assert np.isclose(lpost(t0), logmin, 1e-5)
Example #15
0
    def test_negative_loglikelihood(self):
        t0 = [2.0]
        m = self.model(self.ps.freq[1:], t0)
        loglike = np.sum(self.ps.power[1:]/m + np.log(m))

        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        loglike_test = lpost.loglikelihood(t0, neg=True)

        assert np.isclose(loglike, loglike_test)
Example #16
0
    def test_loglikelihood(self):
        t0 = [2.0]
        self.model.amplitude = t0[0]
        mean_model = self.model(self.ps.freq)

        loglike = -np.sum(np.log(mean_model)) - np.sum(self.ps.power/mean_model)

        lpost = PSDPosterior(self.ps, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        loglike_test = lpost.loglikelihood(t0, neg=False)

        assert np.isclose(loglike, loglike_test)
Example #17
0
    def test_negative_posterior(self):
        t0 = [2.0]
        m = self.model(self.ps.freq[1:], t0)
        lpost = PSDPosterior(self.ps, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        post_test = lpost(t0, neg=True)

        loglike = -np.sum(self.ps.power[1:]/m + np.log(m))
        logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
        post = -loglike - logprior

        assert np.isclose(post_test, post, atol=1.e-10)
Example #18
0
    def test_negative_posterior(self):
        t0 = [2.0]
        m = self.model(self.ps.freq[1:], t0)
        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        post_test = lpost(t0, neg=True)

        loglike = -np.sum(self.ps.power[1:]/m + np.log(m))
        logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
        post = -loglike - logprior

        assert np.isclose(post_test, post, atol=1.e-10)
Example #19
0
    def test_loglikelihood(self):
        t0 = [2.0]
        self.model.amplitude = t0[0]
        mean_model = self.model(self.ps.freq)

        loglike = -np.sum(np.log(mean_model)) - np.sum(self.ps.power/mean_model)

        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        loglike_test = lpost.loglikelihood(t0, neg=False)

        assert np.isclose(loglike, loglike_test)
Example #20
0
    def test_posterior(self):
        t0 = [2.0]
        self.model.amplitude = t0[0]

        m = self.model(self.ps.freq[1:])
        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        post_test = lpost(t0, neg=False)

        loglike = -np.sum(self.ps.power[1:]/m + np.log(m))
        logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
        post = loglike + logprior

        assert np.isclose(post_test, post, atol=1.e-10)
Example #21
0
    def test_negative_loglikelihood(self):
        t0 = [2.0]
        self.model.amplitude = t0[0]

        mean_model = self.model(self.ps.freq)

        loglike = 2.0 * self.m * (np.sum(np.log(mean_model)) + np.sum(
            self.ps.power / mean_model) + np.sum(
                (2.0 / (2. * self.m) - 1.0) * np.log(self.ps.power)))

        lpost = PSDPosterior(self.ps, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        loglike_test = lpost.loglikelihood(t0, neg=True)

        assert np.isclose(loglike, loglike_test)
Example #22
0
    def test_negative_posterior(self):
        t0 = [2.0]
        self.model.amplitude = t0[0]

        mean_model = self.model(self.ps.freq)
        lpost = PSDPosterior(self.ps, self.model)
        lpost.logprior = set_logprior(lpost, self.priors)

        post_test = lpost(t0, neg=True)

        loglike = -2.0 * self.m * (np.sum(np.log(mean_model)) + np.sum(
            self.ps.power / mean_model) + np.sum(
                (2.0 / (2. * self.m) - 1.0) * np.log(self.ps.power)))

        logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
        post = -loglike - logprior

        assert np.isclose(post_test, post, atol=1.e-10)
Example #23
0
    def test_negative_loglikelihood(self):
        t0 = [2.0]
        self.model.amplitude = t0[0]

        mean_model = self.model(self.ps.freq)

        loglike = 2.0*self.m*(np.sum(np.log(mean_model)) +
                               np.sum(self.ps.power/mean_model) +
                               np.sum((2.0 / (2. * self.m) - 1.0) *
                                      np.log(self.ps.power)))


        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        loglike_test = lpost.loglikelihood(t0, neg=True)

        assert np.isclose(loglike, loglike_test)
    def test_calibrate_highest_outlier_works_with_sampling(self):
        m = 1
        nfreq = 100
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 5

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        with catch_warnings(RuntimeWarning):
            pval = pe.calibrate_highest_outlier(lpost, [2.0],
                                                sample=None,
                                                max_post=True,
                                                seed=seed,
                                                nsim=nsim,
                                                niter=10,
                                                nwalkers=20,
                                                burnin=10)

        assert pval > 0.001
Example #25
0
    def test_negative_posterior(self):
        t0 = [2.0]
        self.model.amplitude = t0[0]

        mean_model = self.model(self.ps.freq)
        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)
        lpost.logprior = set_logprior(lpost, self.priors)

        post_test = lpost(t0, neg=True)

        loglike = -2.0*self.m*(np.sum(np.log(mean_model)) +
                               np.sum(self.ps.power/mean_model) +
                               np.sum((2.0 / (2. * self.m) - 1.0) *
                                      np.log(self.ps.power)))

        logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
        post = -loglike - logprior

        assert np.isclose(post_test, post, atol=1.e-10)
    def test_calibrate_highest_outlier_works_with_sampling(self):
        m = 1
        nfreq = 100000
        seed = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(seed)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        nsim = 10

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
        p_amplitude = lambda amplitude: \
            scipy.stats.norm(loc=1.0, scale=1.0).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude}
        lpost.logprior = set_logprior(lpost, priors)

        pe = PSDParEst(ps)

        pval = pe.calibrate_highest_outlier(lpost, [2.0], sample=None,
                                            max_post=True, seed=seed,
                                            nsim=nsim, niter=20, nwalkers=100,
                                            burnin=100)

        assert pval > 0.001
Example #27
0
    def test_logprior_fails_without_prior(self):
        lpost = PSDPosterior(self.ps.freq, self.ps.power,
                             self.model, m=self.ps.m)

        with pytest.raises(AttributeError):
            lpost.logprior([1])
Example #28
0
    def test_logprior_fails_without_prior(self):
        lpost = PSDPosterior(self.ps, self.model)

        with pytest.raises(AttributeError):
            lpost.logprior([1])