def test_calibrate_lrt_works_with_sampling(self): m = 1 nfreq = 100 freq = np.linspace(1, 10, nfreq) rng = np.random.RandomState(100) noise = rng.exponential(size=nfreq) model = models.Const1D() model.amplitude = 2.0 p = model(freq) power = noise * p ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" lpost = PSDPosterior(ps.freq, ps.power, model, m=1) p_amplitude_1 = lambda amplitude: \ scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude) p_alpha_0 = lambda alpha: \ scipy.stats.uniform(0.0, 5.0).pdf(alpha) p_amplitude_0 = lambda amplitude: \ scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf( amplitude) priors = {"amplitude": p_amplitude_1} priors2 = { "amplitude_1": p_amplitude_1, "amplitude_0": p_amplitude_0, "alpha_0": p_alpha_0 } lpost.logprior = set_logprior(lpost, priors) model2 = models.PowerLaw1D() + models.Const1D() model2.x_0_0.fixed = True lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1) lpost2.logprior = set_logprior(lpost2, priors2) pe = PSDParEst(ps) with catch_warnings(RuntimeWarning): pval = pe.calibrate_lrt(lpost, [2.0], lpost2, [2.0, 1.0, 2.0], sample=None, max_post=True, nsim=10, nwalkers=10, burnin=10, niter=10, seed=100) assert pval > 0.001
def test_calibrate_lrt_works_with_sampling(self): m = 1 nfreq = 10000 freq = np.linspace(1, 10, nfreq) rng = np.random.RandomState(100) noise = rng.exponential(size=nfreq) model = models.Const1D() model.amplitude = 2.0 p = model(freq) power = noise * p ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" lpost = PSDPosterior(ps.freq, ps.power, model, m=1) p_amplitude_1 = lambda amplitude: \ scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude) p_alpha_0 = lambda alpha: \ scipy.stats.uniform(0.0, 5.0).pdf(alpha) p_amplitude_0 = lambda amplitude: \ scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf( amplitude) priors = {"amplitude": p_amplitude_1} priors2 = {"amplitude_1": p_amplitude_1, "amplitude_0": p_amplitude_0, "alpha_0": p_alpha_0} lpost.logprior = set_logprior(lpost, priors) model2 = models.PowerLaw1D() + models.Const1D() model2.x_0_0.fixed = True lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1) lpost2.logprior = set_logprior(lpost2, priors2) pe = PSDParEst(ps) pval = pe.calibrate_lrt(lpost, [2.0], lpost2, [2.0, 1.0, 2.0], sample=None, max_post=True, nsim=10, nwalkers=100, burnin=100, niter=20, seed=100) assert pval > 0.001
def test_correct_number_of_parameters(self): lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model, m=self.ps.m) lpost.logprior = set_logprior(lpost, self.priors) with pytest.raises(IncorrectParameterError): lpost([2,3])
def test_compute_highest_outlier_works(self): mp_ind = 5 max_power = 1000.0 ps = Powerspectrum() ps.freq = np.arange(10) ps.power = np.ones_like(ps.freq) ps.power[mp_ind] = max_power ps.m = 1 ps.df = ps.freq[1]-ps.freq[0] ps.norm = "leahy" model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=1.0, scale=1.0).pdf( amplitude) priors = {"amplitude": p_amplitude} lpost = PSDPosterior(ps.freq, ps.power, model, 1) lpost.logprior = set_logprior(lpost, priors) pe = PSDParEst(ps) res = pe.fit(lpost, [1.0]) res.mfit = np.ones_like(ps.freq) max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res) assert np.isclose(max_y[0], 2*max_power) assert np.isclose(max_x[0], ps.freq[mp_ind]) assert max_ind == mp_ind
def setup_class(cls): m = 1 nfreq = 100000 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) cls.priors = {"amplitude": p_amplitude} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors) cls.fitmethod = "BFGS" cls.max_post = True cls.t0 = [2.0] cls.neg = True cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0, method=cls.fitmethod, args=cls.neg, tol=1.e-10)
def setup_class(cls): m = 1 nfreq = 100000 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) cls.priors = {"amplitude": p_amplitude} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_set_prior_runs(self): p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))/6.0 p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) & ((np.log(amplitude) <= 10.0)))/20.0 priors = {"alpha":p_alpha, "amplitude":p_amplitude} self.lpost.logprior = set_logprior(self.lpost, priors)
def setup_class(cls): m = 1 nfreq = 100000 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) cls.priors = {"amplitude": p_amplitude} cls.lpost = PSDPosterior(cls.ps, cls.model) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors) cls.fitmethod = "BFGS" cls.max_post = True cls.t0 = [2.0] cls.neg = True cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0, method=cls.fitmethod, args=cls.neg, tol=1.e-10)
def setup_class(cls): np.random.seed(100) m = 1 nfreq = 100 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) cls.priors = {"amplitude": p_amplitude} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_making_posterior(self): lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model, m=self.ps.m) lpost.logprior = set_logprior(lpost, self.priors) assert lpost.x.all() == self.ps.freq.all() assert lpost.y.all() == self.ps.power.all()
def test_set_prior_runs(self): p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.)) / 6.0 p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) & ( (np.log(amplitude) <= 10.0))) / 20.0 priors = {"alpha": p_alpha, "amplitude": p_amplitude} self.lpost.logprior = set_logprior(self.lpost, priors)
def test_prior_returns_logmin_outside_prior_range(self): p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))/6.0 p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) & ((np.log(amplitude) <= 10.0)))/20.0 priors = {"alpha":p_alpha, "amplitude":p_amplitude} self.lpost.logprior = set_logprior(self.lpost, priors) assert self.lpost.logprior([-2.0, np.exp(11.0)]) == logmin
def test_prior_returns_logmin_outside_prior_range(self): p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.)) / 6.0 p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) & ( (np.log(amplitude) <= 10.0))) / 20.0 priors = {"alpha": p_alpha, "amplitude": p_amplitude} self.lpost.logprior = set_logprior(self.lpost, priors) assert self.lpost.logprior([-2.0, np.exp(11.0)]) == logmin
def test_logprior(self): t0 = [10.0] lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) lp_test = lpost.logprior(t0) lp = np.log(scipy.stats.norm(self.countrate, self.cerr).pdf(t0)) assert lp == lp_test
def test_prior_executes_correctly(self): p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))/6.0 p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) & ((np.log(amplitude) <= 10.0)))/20.0 priors = {"alpha":p_alpha, "amplitude":p_amplitude} self.lpost.logprior = set_logprior(self.lpost, priors) true_logprior = np.log(1./6.) + np.log(1./20.0) assert self.lpost.logprior([np.exp(0.0), np.exp(0.0)]) == true_logprior
def setup_class(cls): cls.x = np.arange(100) cls.y = np.ones(cls.x.shape[0]) cls.model = models.Const1D() cls.p = PosteriorClassDummy(cls.x, cls.y, cls.model) p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))/6.0 priors = {"amplitude":p_alpha} cls.p.logprior = set_logprior(cls.p, priors)
def setup_class(cls): cls.x = np.arange(100) cls.y = np.ones(cls.x.shape[0]) cls.model = models.Const1D() cls.p = PosteriorClassDummy(cls.x, cls.y, cls.model) p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.)) / 6.0 priors = {"amplitude": p_alpha} cls.p.logprior = set_logprior(cls.p, priors)
def test_logprior(self): t0 = [2.0] lpost = PSDPosterior(self.ps, self.model) lpost.logprior = set_logprior(lpost, self.priors) lp_test = lpost.logprior(t0) lp = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0)) assert lp == lp_test
def test_prior_executes_correctly(self): p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.)) / 6.0 p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) & ( (np.log(amplitude) <= 10.0))) / 20.0 priors = {"alpha": p_alpha, "amplitude": p_amplitude} self.lpost.logprior = set_logprior(self.lpost, priors) true_logprior = np.log(1. / 6.) + np.log(1. / 20.0) assert self.lpost.logprior([np.exp(0.0), np.exp(0.0)]) == true_logprior
def test_logprior(self): t0 = [10.0] lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) lp_test = lpost.logprior(t0) lp = np.log(scipy.stats.norm(self.countrate, self.cerr).pdf(t0)) assert lp == lp_test
def test_counts_are_nan(self): y = np.nan * np.ones(self.x.shape[0]) t0 = [10.0] self.model.amplitude = t0[0] lpost = GaussianPosterior(self.x, y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) assert np.isclose(lpost(t0), logmin, 1e-5)
def test_counts_are_nan(self): y = np.nan * np.ones(self.x.shape[0]) t0 = [10.0] self.model.amplitude = t0[0] lpost = LaplacePosterior(self.x, y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) assert np.isclose(lpost(t0), logmin, 1e-5)
def test_logprior(self): t0 = [2.0] lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model, m=self.ps.m) lpost.logprior = set_logprior(lpost, self.priors) lp_test = lpost.logprior(t0) lp = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0)) assert lp == lp_test
def setup_class(cls): m = 1 nfreq = 100 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf( amplitude) cls.priors = {"amplitude": p_amplitude} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors) cls.fitmethod = "BFGS" cls.max_post = True cls.t0 = [2.0] cls.neg = True pe = ParameterEstimation() res = pe.fit(cls.lpost, cls.t0) cls.nwalkers = 50 cls.niter = 100 np.random.seed(200) p0 = np.array([ np.random.multivariate_normal(res.p_opt, res.cov) for i in range(cls.nwalkers) ]) cls.sampler = emcee.EnsembleSampler(cls.nwalkers, len(res.p_opt), cls.lpost, args=[False], threads=1) with catch_warnings(RuntimeWarning): _, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_negative_loglikelihood(self): t0 = [2.0] m = self.model(self.ps.freq[1:], t0) loglike = np.sum(self.ps.power[1:]/m + np.log(m)) lpost = PSDPosterior(self.ps, self.model) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=True) assert np.isclose(loglike, loglike_test)
def test_negative_loglikelihood(self): t0 = [2.0] m = self.model(self.ps.freq[1:], t0) loglike = np.sum(self.ps.power[1:]/m + np.log(m)) lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model, m=self.ps.m) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=True) assert np.isclose(loglike, loglike_test)
def test_counts_are_nan(self): y = np.nan * np.ones_like(self.ps.freq) ps_nan = copy.copy(self.ps) ps_nan.power = np.nan * np.ones_like(self.ps.freq) t0 = [2.0] m = self.model(self.ps.freq[1:], t0) lpost = PSDPosterior(ps_nan.freq, ps_nan.power, self.model) lpost.logprior = set_logprior(lpost, self.priors) assert np.isclose(lpost(t0), logmin, 1e-5)
def test_counts_are_nan(self): y = np.nan * np.ones_like(self.ps.freq) ps_nan = copy.copy(self.ps) ps_nan.power = np.nan*np.ones_like(self.ps.freq) t0 = [2.0] m = self.model(self.ps.freq[1:], t0) lpost = PSDPosterior(ps_nan.freq, ps_nan.power, self.model) lpost.logprior = set_logprior(lpost, self.priors) assert np.isclose(lpost(t0), logmin, 1e-5)
def test_negative_loglikelihood(self): t0 = [10.0] self.model.amplitude = t0[0] mean_model = self.model(self.x) loglike = -np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1)) lpost = PoissonPosterior(self.x, self.y, self.model) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=True) assert np.isclose(loglike, loglike_test)
def test_negative_posterior(self): t0 = [2.0] m = self.model(self.ps.freq[1:], t0) lpost = PSDPosterior(self.ps, self.model) lpost.logprior = set_logprior(lpost, self.priors) post_test = lpost(t0, neg=True) loglike = -np.sum(self.ps.power[1:]/m + np.log(m)) logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0)) post = -loglike - logprior assert np.isclose(post_test, post, atol=1.e-10)
def test_loglikelihood(self): t0 = [2.0] self.model.amplitude = t0[0] mean_model = self.model(self.ps.freq) loglike = -np.sum(np.log(mean_model)) - np.sum(self.ps.power/mean_model) lpost = PSDPosterior(self.ps, self.model) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=False) assert np.isclose(loglike, loglike_test)
def test_negative_loglikelihood(self): t0 = [10.0] self.model.amplitude = t0[0] mean_model = self.model(self.x) loglike = -np.sum(-np.log(2.0*self.yerr) - np.abs(self.y - mean_model)/self.yerr) lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=True) assert np.isclose(loglike, loglike_test)
def test_negative_loglikelihood(self): t0 = [10.0] self.model.amplitude = t0[0] mean_model = self.model(self.x) loglike = -np.sum(-0.5*np.log(2.*np.pi) - np.log(self.yerr) - \ 0.5*((self.y - mean_model)/self.yerr)**2.0) lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=True) assert np.isclose(loglike, loglike_test)
def test_negative_posterior(self): t0 = [2.0] m = self.model(self.ps.freq[1:], t0) lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model, m=self.ps.m) lpost.logprior = set_logprior(lpost, self.priors) post_test = lpost(t0, neg=True) loglike = -np.sum(self.ps.power[1:]/m + np.log(m)) logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0)) post = -loglike - logprior assert np.isclose(post_test, post, atol=1.e-10)
def test_negative_loglikelihood(self): t0 = [10.0] self.model.amplitude = t0[0] mean_model = self.model(self.x) loglike = -np.sum(-np.log(2.0 * self.yerr) - np.abs(self.y - mean_model) / self.yerr) lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=True) assert np.isclose(loglike, loglike_test)
def test_loglikelihood(self): t0 = [2.0] self.model.amplitude = t0[0] mean_model = self.model(self.ps.freq) loglike = -np.sum(np.log(mean_model)) - np.sum(self.ps.power/mean_model) lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model, m=self.ps.m) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=False) assert np.isclose(loglike, loglike_test)
def setup_class(cls): m = 1 nfreq = 100000 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf( amplitude) cls.priors = {"amplitude": p_amplitude} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors) cls.fitmethod = "BFGS" cls.max_post = True cls.t0 = [2.0] cls.neg = True pe = ParameterEstimation() res = pe.fit(cls.lpost, cls.t0) cls.nwalkers = 100 cls.niter = 200 np.random.seed(200) p0 = np.array( [np.random.multivariate_normal(res.p_opt, res.cov) for i in range(cls.nwalkers)]) cls.sampler = emcee.EnsembleSampler(cls.nwalkers, len(res.p_opt), cls.lpost, args=[False], threads=1) _, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_posterior(self): t0 = [2.0] self.model.amplitude = t0[0] m = self.model(self.ps.freq[1:]) lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model, m=self.ps.m) lpost.logprior = set_logprior(lpost, self.priors) post_test = lpost(t0, neg=False) loglike = -np.sum(self.ps.power[1:]/m + np.log(m)) logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0)) post = loglike + logprior assert np.isclose(post_test, post, atol=1.e-10)
def test_negative_posterior(self): t0 = [10.0] self.model.amplitude = t0[0] mean_model = self.model(self.x) lpost = PoissonPosterior(self.x, self.y, self.model) lpost.logprior = set_logprior(lpost, self.priors) post_test = lpost(t0, neg=True) loglike = np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1)) logprior = np.log(scipy.stats.norm(self.countrate, self.countrate).pdf(t0)) post = -loglike - logprior assert np.isclose(post_test, post, atol=1.e-10)
def test_negative_loglikelihood(self): t0 = [2.0] self.model.amplitude = t0[0] mean_model = self.model(self.ps.freq) loglike = 2.0 * self.m * (np.sum(np.log(mean_model)) + np.sum( self.ps.power / mean_model) + np.sum( (2.0 / (2. * self.m) - 1.0) * np.log(self.ps.power))) lpost = PSDPosterior(self.ps, self.model) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=True) assert np.isclose(loglike, loglike_test)
def setup_class(cls): np.random.seed(1000) m = 1 nfreq = 100 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.n = freq.shape[0] ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) cls.priors = {"amplitude": p_amplitude} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors) cls.fitmethod = "powell" cls.max_post = True cls.t0 = np.array([2.0]) cls.neg = True cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0, method=cls.fitmethod, args=cls.neg, tol=1.e-10) cls.opt.x = np.atleast_1d(cls.opt.x) cls.optres = OptimizationResultsSubclassDummy(cls.lpost, cls.opt, neg=True)
def test_negative_posterior(self): t0 = [10.0] self.model.amplitude = t0[0] mean_model = self.model(self.x) lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) post_test = lpost(t0, neg=True) loglike = np.sum(-0.5*np.log(2.*np.pi) - np.log(self.yerr) - \ 0.5*((self.y - mean_model)/self.yerr)**2.0) logprior = np.log(scipy.stats.norm(self.countrate, self.cerr).pdf(t0)) post = -loglike - logprior assert np.isclose(post_test, post, atol=1.e-10)
def test_posterior(self): t0 = [10.0] self.model.amplitude = t0[0] mean_model = self.model(self.x) lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) post_test = lpost(t0, neg=False) loglike = np.sum(-np.log(2.0*self.yerr) - np.abs(self.y - mean_model)/self.yerr) logprior = np.log(scipy.stats.norm(self.countrate, self.cerr).pdf(t0)) post = loglike + logprior assert np.isclose(post_test, post, atol=1.e-10)
def test_negative_posterior(self): t0 = [2.0] self.model.amplitude = t0[0] mean_model = self.model(self.ps.freq) lpost = PSDPosterior(self.ps, self.model) lpost.logprior = set_logprior(lpost, self.priors) post_test = lpost(t0, neg=True) loglike = -2.0 * self.m * (np.sum(np.log(mean_model)) + np.sum( self.ps.power / mean_model) + np.sum( (2.0 / (2. * self.m) - 1.0) * np.log(self.ps.power))) logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0)) post = -loglike - logprior assert np.isclose(post_test, post, atol=1.e-10)
def test_posterior(self): t0 = [10.0] self.model.amplitude = t0[0] mean_model = self.model(self.x) lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) post_test = lpost(t0, neg=False) loglike = np.sum(-np.log(2.0 * self.yerr) - np.abs(self.y - mean_model) / self.yerr) logprior = np.log(scipy.stats.norm(self.countrate, self.cerr).pdf(t0)) post = loglike + logprior assert np.isclose(post_test, post, atol=1.e-10)
def test_negative_loglikelihood(self): t0 = [2.0] self.model.amplitude = t0[0] mean_model = self.model(self.ps.freq) loglike = 2.0*self.m*(np.sum(np.log(mean_model)) + np.sum(self.ps.power/mean_model) + np.sum((2.0 / (2. * self.m) - 1.0) * np.log(self.ps.power))) lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model, m=self.ps.m) lpost.logprior = set_logprior(lpost, self.priors) loglike_test = lpost.loglikelihood(t0, neg=True) assert np.isclose(loglike, loglike_test)
def test_calibrate_highest_outlier_works_with_sampling(self): m = 1 nfreq = 100 seed = 100 freq = np.linspace(1, 10, nfreq) rng = np.random.RandomState(seed) noise = rng.exponential(size=nfreq) model = models.Const1D() model.amplitude = 2.0 p = model(freq) power = noise * p ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" nsim = 5 lpost = PSDPosterior(ps.freq, ps.power, model, m=1) p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=1.0, scale=1.0).pdf( amplitude) priors = {"amplitude": p_amplitude} lpost.logprior = set_logprior(lpost, priors) pe = PSDParEst(ps) with catch_warnings(RuntimeWarning): pval = pe.calibrate_highest_outlier(lpost, [2.0], sample=None, max_post=True, seed=seed, nsim=nsim, niter=10, nwalkers=20, burnin=10) assert pval > 0.001
def test_negative_posterior(self): t0 = [2.0] self.model.amplitude = t0[0] mean_model = self.model(self.ps.freq) lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model, m=self.ps.m) lpost.logprior = set_logprior(lpost, self.priors) post_test = lpost(t0, neg=True) loglike = -2.0*self.m*(np.sum(np.log(mean_model)) + np.sum(self.ps.power/mean_model) + np.sum((2.0 / (2. * self.m) - 1.0) * np.log(self.ps.power))) logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0)) post = -loglike - logprior assert np.isclose(post_test, post, atol=1.e-10)
def test_calibrate_highest_outlier_works_with_sampling(self): m = 1 nfreq = 100000 seed = 100 freq = np.linspace(1, 10, nfreq) rng = np.random.RandomState(seed) noise = rng.exponential(size=nfreq) model = models.Const1D() model.amplitude = 2.0 p = model(freq) power = noise * p ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" nsim = 10 lpost = PSDPosterior(ps.freq, ps.power, model, m=1) p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=1.0, scale=1.0).pdf( amplitude) priors = {"amplitude": p_amplitude} lpost.logprior = set_logprior(lpost, priors) pe = PSDParEst(ps) pval = pe.calibrate_highest_outlier(lpost, [2.0], sample=None, max_post=True, seed=seed, nsim=nsim, niter=20, nwalkers=100, burnin=100) assert pval > 0.001
def setup_class(cls): m = 1 nfreq = 100000 freq = np.linspace(1, 1000, nfreq) np.random.seed(100) # set the seed for the random number generator noise = np.random.exponential(size=nfreq) cls.model = models.PowerLaw1D() + models.Const1D() cls.model.x_0_0.fixed = True cls.alpha_0 = 2.0 cls.amplitude_0 = 100.0 cls.amplitude_1 = 2.0 cls.model.alpha_0 = cls.alpha_0 cls.model.amplitude_0 = cls.amplitude_0 cls.model.amplitude_1 = cls.amplitude_1 p = cls.model(freq) power = noise * p ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.a2_mean, cls.a2_var = 100.0, 10.0 p_amplitude_1 = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) p_alpha_0 = lambda alpha: \ scipy.stats.uniform(0.0, 5.0).pdf(alpha) p_amplitude_0 = lambda amplitude: \ scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf( amplitude) cls.priors = {"amplitude_1": p_amplitude_1, "amplitude_0": p_amplitude_0, "alpha_0": p_alpha_0} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors) cls.fitmethod = "BFGS" cls.max_post = True cls.t0 = [cls.amplitude_0, cls.alpha_0, cls.amplitude_1] cls.neg = True cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0, method=cls.fitmethod, args=cls.neg, tol=1.e-5) cls.optres = OptimizationResultsSubclassDummy(cls.lpost, cls.opt, neg=True)
def setup_class(cls): m = 1 nfreq = 100000 freq = np.linspace(1, 10.0, nfreq) rng = np.random.RandomState(100) # set the seed for the random number generator noise = rng.exponential(size=nfreq) cls.model = models.Lorentz1D() + models.Const1D() cls.x_0_0 = 2.0 cls.fwhm_0 = 0.1 cls.amplitude_0 = 100.0 cls.amplitude_1 = 2.0 cls.model.x_0_0 = cls.x_0_0 cls.model.fwhm_0 = cls.fwhm_0 cls.model.amplitude_0 = cls.amplitude_0 cls.model.amplitude_1 = cls.amplitude_1 p = cls.model(freq) np.random.seed(400) power = noise*p ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1]-freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.a2_mean, cls.a2_var = 100.0, 10.0 p_amplitude_1 = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) p_x_0_0 = lambda alpha: \ scipy.stats.uniform(0.0, 5.0).pdf(alpha) p_fwhm_0 = lambda alpha: \ scipy.stats.uniform(0.0, 0.5).pdf(alpha) p_amplitude_0 = lambda amplitude: \ scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude) cls.priors = {"amplitude_1": p_amplitude_1, "amplitude_0": p_amplitude_0, "x_0_0": p_x_0_0, "fwhm_0": p_fwhm_0} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors) cls.fitmethod = "BFGS" cls.max_post = True cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1] cls.neg = True
def test_correct_number_of_parameters(self): lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) with pytest.raises(IncorrectParameterError): lpost([2, 3])
def test_correct_number_of_parameters(self): lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model) lpost.logprior = set_logprior(lpost, self.priors) with pytest.raises(IncorrectParameterError): lpost([2,3])