class TestDoublePowerLaw(unittest.TestCase): def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior["alpha_1"] = Uniform(minimum=-4, maximum=12) self.power_prior["alpha_2"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=40, maximum=100) self.power_prior["break_fraction"] = Uniform(minimum=40, maximum=100) self.n_test = 10 def test_double_power_law_zero_below_mmin(self): for ii in range(self.n_test): parameters = self.power_prior.sample() del parameters["beta"] p_m = mass.double_power_law_primary_mass(self.m1s, **parameters) self.assertEqual(xp.max(p_m[self.m1s <= parameters["mmin"]]), 0.0) def test_power_law_primary_mass_ratio_zero_above_mmax(self): for ii in range(self.n_test): parameters = self.power_prior.sample() p_m = mass.double_power_law_primary_power_law_mass_ratio( self.dataset, **parameters) self.assertEqual( xp.max(p_m[self.dataset["mass_1"] >= parameters["mmax"]]), 0.0)
def test_includephase_likelihood(self): """ Test the likelihood when include phase is set to True. """ het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") # run with includephase as False like1 = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood="studentst") like1.parameters = {"h0": 1e-24} logl1 = like1.log_likelihood() # set includephase to True like2 = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood="studentst") like2.parameters = {"h0": 1e-24} like2.include_phase = True logl2 = like2.log_likelihood() print(f"{logl1:.15f} {logl2:.15f}") assert np.allclose([logl1], [logl2], atol=1e-10, rtol=0.0)
def test_numba_likelihood(self): """ Test likelihood using numba against the standard likelihood. """ het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") for likelihood in ["gaussian", "studentst"]: like1 = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood=likelihood) like1.parameters = {"h0": 1e-24} like2 = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood=likelihood, numba=True) like2.parameters = {"h0": 1e-24} assert like1.log_likelihood() == like2.log_likelihood()
def __init__(self, name): self.name = name if self.name == 'BBH-powerlaw': dist = PriorDict(conversion_function=constrain_m1m2) dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=15000) if self.name == 'BNS': dist = PriorDict(conversion_function=constrain_m1m2) dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=1000) if self.name == 'NSBH': dist = PriorDict(conversion_function=constrain_m1m2) dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=1000) if self.name == 'BBH-constant': dist = PriorDict() dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=15000) self.dist = dist
class TestPrimaryMassRatio(unittest.TestCase): def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior['alpha'] = Uniform(minimum=-4, maximum=12) self.power_prior['beta'] = Uniform(minimum=-4, maximum=12) self.power_prior['mmin'] = Uniform(minimum=3, maximum=10) self.power_prior['mmax'] = Uniform(minimum=40, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior['lam'] = Uniform(minimum=0, maximum=1) self.gauss_prior['mpp'] = Uniform(minimum=20, maximum=60) self.gauss_prior['sigpp'] = Uniform(minimum=0, maximum=10) self.n_test = 10 def test_dynamic(self): parameters = self.power_prior.sample() parameters.update(self.gauss_prior.sample()) parameters = dict(alpha=2.0, mmin=5.0, mmax=45.0, lam=0.1, mpp=35.0, sigpp=1.0, beta=1.0, branch_1=0.12, branch_2=0.01) prob = two_component_primary_mass_ratio_dynamical_without_spins( dataset=self.dataset, **parameters) self.assertTrue( all(prob[self.dataset["mass_1"] * self.dataset["mass_ratio"] <= parameters["mmin"]] == 0))
def simulate_population_posteriors(sig1=5, sig12=5, number_events=10, n_samp=50000, fractional_sigma=1): pop_prior = PriorDict( dict(cos_theta_1=TruncatedNormal(mu=1, sigma=sig1, minimum=-1, maximum=1), cos_theta_12=TruncatedNormal(mu=1, sigma=sig12, minimum=-1, maximum=1))) params = pop_prior.keys() posteriors = {p: [] for p in params} trues = {p: [] for p in params} for i in range(number_events): true = pop_prior.sample() posterior = simulate_posterior(true, n_samples=n_samp, fractional_sigma=1) for p in params: posteriors[p].append(posterior[p].values) trues[p].append(true[p]) for p in params: posteriors[p] = np.array(posteriors[p]) trues[p] = np.array(trues[p]) return dict(trues=trues, posteriors=posteriors)
def setUp(self): self.costilts = xp.linspace(-1, 1, 1000) self.test_data = dict( cos_tilt_1=xp.einsum("i,j->ij", self.costilts, xp.ones_like(self.costilts)), cos_tilt_2=xp.einsum("i,j->ji", self.costilts, xp.ones_like(self.costilts)), ) self.prior = PriorDict(dict(xi_spin=Uniform(0, 1), sigma_spin=Uniform(0, 4))) self.n_test = 100
def setUp(self): self.a_array = xp.linspace(0, 1, 1000) self.test_data = dict( a_1=xp.einsum('i,j->ij', self.a_array, xp.ones_like(self.a_array)), a_2=xp.einsum('i,j->ji', self.a_array, xp.ones_like(self.a_array))) self.prior = PriorDict( dict(amax=Uniform(0.3, 1), alpha_chi=Uniform(1, 4), beta_chi=Uniform(1, 4))) self.n_test = 100
def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior["alpha_1"] = Uniform(minimum=-4, maximum=12) self.power_prior["alpha_2"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=40, maximum=100) self.power_prior["break_fraction"] = Uniform(minimum=40, maximum=100) self.n_test = 10
def test_resampling_posteriors(self): priors = PriorDict( dict(a=Uniform(0, 2), b=Uniform(0, 2), c=Uniform(0, 2))) samples = priors.sample(100) like = HyperparameterLikelihood( posteriors=self.data, hyper_prior=self.model, selection_function=self.selection_function, ln_evidences=self.ln_evidences, ) new_samples = like.posterior_predictive_resample(samples=samples) for key in new_samples: self.assertEqual(new_samples[key].shape, like.data[key].shape)
def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) self.dm = self.m1s[1] - self.m1s[0] self.dq = self.qs[1] - self.qs[0] m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior["alpha"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=30, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior["lam"] = Uniform(minimum=0, maximum=1) self.gauss_prior["mpp"] = Uniform(minimum=20, maximum=60) self.gauss_prior["sigpp"] = Uniform(minimum=0, maximum=10) self.double_gauss_prior = PriorDict() self.double_gauss_prior["lam"] = Uniform(minimum=0, maximum=1) self.double_gauss_prior["lam_1"] = Uniform(minimum=0, maximum=1) self.double_gauss_prior["mpp_1"] = Uniform(minimum=20, maximum=60) self.double_gauss_prior["mpp_2"] = Uniform(minimum=20, maximum=60) self.double_gauss_prior["sigpp_1"] = Uniform(minimum=0, maximum=10) self.double_gauss_prior["sigpp_2"] = Uniform(minimum=0, maximum=10) self.smooth_prior = PriorDict() self.smooth_prior["delta_m"] = Uniform(minimum=0, maximum=10) self.n_test = 10
def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior["alpha"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=40, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior["lam"] = Uniform(minimum=0, maximum=1) self.gauss_prior["mpp"] = Uniform(minimum=20, maximum=60) self.gauss_prior["sigpp"] = Uniform(minimum=0, maximum=10) self.n_test = 10
def setUp(self): self.ms = np.linspace(3, 100, 1000) self.dm = self.ms[1] - self.ms[0] m1s_grid, m2s_grid = xp.meshgrid(self.ms, self.ms) self.dataset = dict(mass_1=m1s_grid, mass_2=m2s_grid) self.power_prior = PriorDict() self.power_prior["alpha"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=40, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior["lam"] = Uniform(minimum=0, maximum=1) self.gauss_prior["mpp"] = Uniform(minimum=20, maximum=60) self.gauss_prior["sigpp"] = Uniform(minimum=0, maximum=10) self.n_test = 10
def test_madau_dickinson_normalised(self): model = redshift.MadauDickinsonRedshift() priors = PriorDict() priors["gamma"] = Uniform(-15, 15) priors["kappa"] = Uniform(-15, 15) priors["z_peak"] = Uniform(0, 5) self._run_model_normalisation(model=model, priors=priors)
def test_wrong_inputs(self): """ Test that exceptions are raised for incorrect inputs to the TargetedPulsarLikelihood. """ with pytest.raises(TypeError): TargetedPulsarLikelihood(None, None) # create HeterodynedData object (no par file) het = HeterodynedData(self.data, times=self.times, detector=self.detector) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") # error with no par file with pytest.raises(ValueError): TargetedPulsarLikelihood(het, PriorDict(priors)) het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) mhet = MultiHeterodynedData(het) # multihet object for testing with pytest.raises(TypeError): TargetedPulsarLikelihood(het, None) with pytest.raises(TypeError): TargetedPulsarLikelihood(mhet, None)
class TestIIDSpin(unittest.TestCase): def setUp(self): self.a_array = xp.linspace(0, 1, 1000) self.costilts = xp.linspace(-1, 1, 1000) self.test_data = dict( a_1=xp.einsum("i,j->ij", self.a_array, xp.ones_like(self.a_array)), a_2=xp.einsum("i,j->ji", self.a_array, xp.ones_like(self.a_array)), cos_tilt_1=xp.einsum("i,j->ij", self.costilts, xp.ones_like(self.costilts)), cos_tilt_2=xp.einsum("i,j->ji", self.costilts, xp.ones_like(self.costilts)), ) self.prior = PriorDict( dict( amax=Uniform(0.3, 1), alpha_chi=Uniform(1, 4), beta_chi=Uniform(1, 4), xi_spin=Uniform(0, 1), sigma_spin=Uniform(0, 4), ) ) self.n_test = 100 def test_iid_matches_independent(self): params = self.prior.sample() mag_params = {key: params[key] for key in ["amax", "alpha_chi", "beta_chi"]} tilt_params = {key: params[key] for key in ["xi_spin", "sigma_spin"]} self.assertEqual( 0.0, xp.max( spin.iid_spin(self.test_data, **params) - spin.iid_spin_magnitude_beta(self.test_data, **mag_params) * spin.iid_spin_orientation_gaussian_isotropic( self.test_data, **tilt_params ) ), )
def setUp(self): self.likelihood = GaussianLikelihood( x=np.linspace(0, 1, 2), y=np.linspace(0, 1, 2), func=lambda x, **kwargs: x, sigma=1, ) self.priors = PriorDict(dict(a=Uniform(0, 1), b=Uniform(0, 1))) self._args = (self.likelihood, self.priors) self._kwargs = dict( outdir="outdir", label="label", use_ratio=False, plot=False, skip_import_verification=True, ) self.sampler = Ptemcee(*self._args, **self._kwargs) self.expected = dict( ntemps=10, nwalkers=100, Tmax=None, betas=None, a=2.0, adaptation_lag=10000, adaptation_time=100, random=None, adapt=False, swap_ratios=False, )
class TestPrimarySecondary(unittest.TestCase): def setUp(self): self.ms = np.linspace(3, 100, 1000) self.dm = self.ms[1] - self.ms[0] m1s_grid, m2s_grid = xp.meshgrid(self.ms, self.ms) self.dataset = dict(mass_1=m1s_grid, mass_2=m2s_grid) self.power_prior = PriorDict() self.power_prior["alpha"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=40, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior["lam"] = Uniform(minimum=0, maximum=1) self.gauss_prior["mpp"] = Uniform(minimum=20, maximum=60) self.gauss_prior["sigpp"] = Uniform(minimum=0, maximum=10) self.n_test = 10 def test_power_law_primary_secondary_zero_below_mmin(self): for ii in range(self.n_test): parameters = self.power_prior.sample() p_m = mass.power_law_primary_secondary_independent( self.dataset, **parameters ) self.assertEqual( xp.max(p_m[self.dataset["mass_2"] <= parameters["mmin"]]), 0.0 ) def test_power_law_primary_secondary_zero_above_mmax(self): for ii in range(self.n_test): parameters = self.power_prior.sample() del parameters["beta"] p_m = mass.power_law_primary_secondary_identical(self.dataset, **parameters) self.assertEqual( xp.max(p_m[self.dataset["mass_1"] >= parameters["mmax"]]), 0.0 ) def test_two_component_primary_secondary_zero_below_mmin(self): for ii in range(self.n_test): parameters = self.power_prior.sample() parameters.update(self.gauss_prior.sample()) del parameters["beta"] p_m = mass.two_component_primary_secondary_identical( self.dataset, **parameters ) self.assertEqual( xp.max(p_m[self.dataset["mass_2"] <= parameters["mmin"]]), 0.0 )
class TestSpinMagnitude(unittest.TestCase): def setUp(self): self.a_array = xp.linspace(0, 1, 1000) self.test_data = dict( a_1=xp.einsum("i,j->ij", self.a_array, xp.ones_like(self.a_array)), a_2=xp.einsum("i,j->ji", self.a_array, xp.ones_like(self.a_array)), ) self.prior = PriorDict( dict(amax=Uniform(0.3, 1), alpha_chi=Uniform(1, 4), beta_chi=Uniform(1, 4))) self.n_test = 100 def tearDown(self): del self.test_data del self.prior del self.a_array del self.n_test def test_spin_magnitude_normalised(self): norms = list() for ii in range(self.n_test): parameters = self.prior.sample() temp = spin.iid_spin_magnitude_beta(self.test_data, **parameters) norms.append(trapz(trapz(temp, self.a_array), self.a_array)) self.assertAlmostEqual(float(xp.max(xp.abs(1 - xp.asarray(norms)))), 0, 1) def test_returns_zero_alpha_beta_less_zero(self): parameters = self.prior.sample() for key in ["alpha_chi", "beta_chi"]: parameters[key] = -1 self.assertEqual( spin.iid_spin_magnitude_beta(self.test_data, **parameters), 0) def test_iid_matches_independent_magnitudes(self): iid_params = self.prior.sample() ind_params = dict() ind_params.update({key + "_1": iid_params[key] for key in iid_params}) ind_params.update({key + "_2": iid_params[key] for key in iid_params}) self.assertEqual( 0.0, xp.max( spin.iid_spin_magnitude_beta(self.test_data, **iid_params) - spin.independent_spin_magnitude_beta(self.test_data, ** ind_params)), )
def load_priors(prior_files): """Return a dict of the {prior_file_name: PriorDict}.""" loaded_priors = dict() for prior_file in prior_files: prior_file_basename = os.path.basename(prior_file) prior = PriorDict(filename=prior_file) loaded_priors.update({prior_file_basename: prior}) return loaded_priors
class TestSmoothedMassDistribution(unittest.TestCase): def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) self.dm = self.m1s[1] - self.m1s[0] self.dq = self.qs[1] - self.qs[0] m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior['alpha'] = Uniform(minimum=-4, maximum=12) self.power_prior['beta'] = Uniform(minimum=-4, maximum=12) self.power_prior['mmin'] = Uniform(minimum=3, maximum=10) self.power_prior['mmax'] = Uniform(minimum=30, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior['lam'] = Uniform(minimum=0, maximum=1) self.gauss_prior['mpp'] = Uniform(minimum=20, maximum=60) self.gauss_prior['sigpp'] = Uniform(minimum=0, maximum=10) self.smooth_prior = PriorDict() self.smooth_prior['delta_m'] = Uniform(minimum=0, maximum=10) self.n_test = 10 def test_delta_m_zero_matches_two_component_primary_mass_ratio(self): max_diffs = list() for ii in range(self.n_test): parameters = self.power_prior.sample() parameters.update(self.gauss_prior.sample()) p_m1 = mass.two_component_primary_mass_ratio( self.dataset, **parameters) parameters['delta_m'] = 0 p_m2 = mass.smoothed_two_component_primary_mass_ratio( self.dataset, **parameters) max_diffs.append(_max_abs_difference(p_m1, p_m2)) self.assertAlmostEqual(max(max_diffs), 0.0) def test_normalised(self): norms = list() for ii in range(self.n_test): parameters = self.power_prior.sample() parameters.update(self.gauss_prior.sample()) parameters.update(self.smooth_prior.sample()) p_m = mass.smoothed_two_component_primary_mass_ratio( self.dataset, **parameters) norms.append(trapz(trapz(p_m, self.m1s), self.qs)) self.assertAlmostEqual(_max_abs_difference(norms, 1.0), 0.0, 2)
def simulate_exact_population_posteriors(sig1=5, sig12=5, number_events=10, n_samp=10000): pop_prior = PriorDict( dict(cos_tilt_1=TruncatedNormal(mu=1, sigma=sig1, minimum=-1, maximum=1), cos_theta_12=TruncatedNormal(mu=1, sigma=sig12, minimum=-1, maximum=1))) posteriors = [pop_prior.sample(n_samp) for _ in range(number_events)] posteriors = ld_to_dl(posteriors) posteriors = {k: np.array(v) for k, v in posteriors.items()} return dict(trues=[], posteriors=posteriors)
def setUp(self): self.a_array = xp.linspace(0, 1, 1000) self.costilts = xp.linspace(-1, 1, 1000) self.test_data = dict( a_1=xp.einsum("i,j->ij", self.a_array, xp.ones_like(self.a_array)), a_2=xp.einsum("i,j->ji", self.a_array, xp.ones_like(self.a_array)), cos_tilt_1=xp.einsum("i,j->ij", self.costilts, xp.ones_like(self.costilts)), cos_tilt_2=xp.einsum("i,j->ji", self.costilts, xp.ones_like(self.costilts)), ) self.prior = PriorDict( dict( amax=Uniform(0.3, 1), alpha_chi=Uniform(1, 4), beta_chi=Uniform(1, 4), xi_spin=Uniform(0, 1), sigma_spin=Uniform(0, 4), ) ) self.n_test = 100
class TestPrimaryMassRatio(unittest.TestCase): def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior['alpha'] = Uniform(minimum=-4, maximum=12) self.power_prior['beta'] = Uniform(minimum=-4, maximum=12) self.power_prior['mmin'] = Uniform(minimum=3, maximum=10) self.power_prior['mmax'] = Uniform(minimum=40, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior['lam'] = Uniform(minimum=0, maximum=1) self.gauss_prior['mpp'] = Uniform(minimum=20, maximum=60) self.gauss_prior['sigpp'] = Uniform(minimum=0, maximum=10) self.n_test = 10 def test_power_law_primary_mass_ratio_zero_below_mmin(self): m2s = self.dataset['mass_1'] * self.dataset['mass_ratio'] for ii in range(self.n_test): parameters = self.power_prior.sample() p_m = mass.power_law_primary_mass_ratio(self.dataset, **parameters) self.assertEqual(xp.max(p_m[m2s <= parameters['mmin']]), 0.0) def test_power_law_primary_mass_ratio_zero_above_mmax(self): for ii in range(self.n_test): parameters = self.power_prior.sample() p_m = mass.power_law_primary_mass_ratio(self.dataset, **parameters) self.assertEqual( xp.max(p_m[self.dataset['mass_1'] >= parameters['mmax']]), 0.0) def test_two_component_primary_mass_ratio_zero_below_mmin(self): m2s = self.dataset['mass_1'] * self.dataset['mass_ratio'] for ii in range(self.n_test): parameters = self.power_prior.sample() parameters.update(self.gauss_prior.sample()) p_m = mass.two_component_primary_mass_ratio( self.dataset, **parameters) self.assertEqual(xp.max(p_m[m2s <= parameters['mmin']]), 0.0)
def get_priors(self, data): priors = PriorDict() name = f'{self.basename}0' priors[name] = Uniform( 0, data.max_flux, name, latex_label='$B_{0}$') for ii in range(1, self.n_polynomials): name = f'{self.basename}{ii}' priors[name] = Normal( 0, data.range_flux / data.duration ** ii / np.math.factorial(ii), name, latex_label=f'$B_{{{ii}}}$') return priors
def test_wrong_likelihood(self): """ Test with a bad likelihood name. """ het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") with pytest.raises(ValueError): _ = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood="blah")
def test_priors(self): """ Test the parsed priors. """ # bad priors (unexpected parameter names) priors = dict() priors["a"] = Uniform(0.0, 1.0, "blah") priors["b"] = 2.0 het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) with pytest.raises(ValueError): _ = TargetedPulsarLikelihood(het, PriorDict(priors))
def convert_to_bilby_res(dat): posteriors = dl_to_ld(dat['posteriors']) trues = dl_to_ld(dat['trues']) results = [] p = PriorDict( dict(cos_theta_12=Uniform(-1, 1), cos_tilt_1=Uniform(-1, 1), weights=Uniform(0, 1))) for i in tqdm(range(len(posteriors)), desc="Converting to Results"): r = Result() r.search_parameter_keys = ['cos_tilt_1', 'cos_theta_12', 'weights'] r.injection_parameters = trues[i] r.priors = p r.label = dat['labels'][i] r.outdir = "plots" r.posterior = pd.DataFrame(posteriors[i]) results.append(r) return results
def test_likelihood_null_likelihood(self): """ Test likelihood and null likelihood. """ het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") for likelihood in ["gaussian", "studentst"]: like = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood=likelihood) like.parameters = {"h0": 0.0} assert like.log_likelihood() == like.noise_log_likelihood()
def get_priors(self, data): priors = PriorDict() # Set up the TOA prior if self.toa_prior_width < 1: if self.toa_prior_time == "auto": t0 = data.estimate_pulse_time() else: t0 = data.start + float(self.toa_prior_time) * data.duration dt = data.duration * self.toa_prior_width priors[self.toa_key] = Uniform( t0 - dt, t0 + dt, self.toa_key, latex_label=self.toa_latex_label) else: priors[self.toa_key] = Uniform( data.start, data.end, self.toa_key, latex_label=self.toa_latex_label) # Set up the beta prior if self.beta_min is None: self.beta_min = data.time_step if self.beta_max is None: self.beta_max = data.duration if self.beta_type == "uniform": priors[self.beta_key] = Uniform( self.beta_min, self.beta_max, self.beta_key, latex_label=self.beta_latex_label ) elif self.beta_type == "log-uniform": priors[self.beta_key] = LogUniform( self.beta_min, self.beta_max, self.beta_key, latex_label=self.beta_latex_label ) else: raise ValueError() # Set up the coefficient prior for key in self.coef_keys: priors[key] = SpikeAndSlab( slab=Uniform(1e-20 * data.max_flux, self.c_max_multiplier * data.range_flux), name=key, mix=self.c_mix, ) return priors