def __init__(self, name): self.name = name if self.name == 'BBH-powerlaw': dist = PriorDict(conversion_function=constrain_m1m2) dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=15000) if self.name == 'BNS': dist = PriorDict(conversion_function=constrain_m1m2) dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=1000) if self.name == 'NSBH': dist = PriorDict(conversion_function=constrain_m1m2) dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=1000) if self.name == 'BBH-constant': dist = PriorDict() dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=15000) self.dist = dist
def test_includephase_likelihood(self): """ Test the likelihood when include phase is set to True. """ het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") # run with includephase as False like1 = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood="studentst") like1.parameters = {"h0": 1e-24} logl1 = like1.log_likelihood() # set includephase to True like2 = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood="studentst") like2.parameters = {"h0": 1e-24} like2.include_phase = True logl2 = like2.log_likelihood() print(f"{logl1:.15f} {logl2:.15f}") assert np.allclose([logl1], [logl2], atol=1e-10, rtol=0.0)
def test_numba_likelihood(self): """ Test likelihood using numba against the standard likelihood. """ het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") for likelihood in ["gaussian", "studentst"]: like1 = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood=likelihood) like1.parameters = {"h0": 1e-24} like2 = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood=likelihood, numba=True) like2.parameters = {"h0": 1e-24} assert like1.log_likelihood() == like2.log_likelihood()
def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) self.dm = self.m1s[1] - self.m1s[0] self.dq = self.qs[1] - self.qs[0] m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior["alpha"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=30, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior["lam"] = Uniform(minimum=0, maximum=1) self.gauss_prior["mpp"] = Uniform(minimum=20, maximum=60) self.gauss_prior["sigpp"] = Uniform(minimum=0, maximum=10) self.double_gauss_prior = PriorDict() self.double_gauss_prior["lam"] = Uniform(minimum=0, maximum=1) self.double_gauss_prior["lam_1"] = Uniform(minimum=0, maximum=1) self.double_gauss_prior["mpp_1"] = Uniform(minimum=20, maximum=60) self.double_gauss_prior["mpp_2"] = Uniform(minimum=20, maximum=60) self.double_gauss_prior["sigpp_1"] = Uniform(minimum=0, maximum=10) self.double_gauss_prior["sigpp_2"] = Uniform(minimum=0, maximum=10) self.smooth_prior = PriorDict() self.smooth_prior["delta_m"] = Uniform(minimum=0, maximum=10) self.n_test = 10
def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior["alpha"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=40, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior["lam"] = Uniform(minimum=0, maximum=1) self.gauss_prior["mpp"] = Uniform(minimum=20, maximum=60) self.gauss_prior["sigpp"] = Uniform(minimum=0, maximum=10) self.n_test = 10
def setUp(self): self.ms = np.linspace(3, 100, 1000) self.dm = self.ms[1] - self.ms[0] m1s_grid, m2s_grid = xp.meshgrid(self.ms, self.ms) self.dataset = dict(mass_1=m1s_grid, mass_2=m2s_grid) self.power_prior = PriorDict() self.power_prior["alpha"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=40, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior["lam"] = Uniform(minimum=0, maximum=1) self.gauss_prior["mpp"] = Uniform(minimum=20, maximum=60) self.gauss_prior["sigpp"] = Uniform(minimum=0, maximum=10) self.n_test = 10
def simulate_population_posteriors(sig1=5, sig12=5, number_events=10, n_samp=50000, fractional_sigma=1): pop_prior = PriorDict( dict(cos_theta_1=TruncatedNormal(mu=1, sigma=sig1, minimum=-1, maximum=1), cos_theta_12=TruncatedNormal(mu=1, sigma=sig12, minimum=-1, maximum=1))) params = pop_prior.keys() posteriors = {p: [] for p in params} trues = {p: [] for p in params} for i in range(number_events): true = pop_prior.sample() posterior = simulate_posterior(true, n_samples=n_samp, fractional_sigma=1) for p in params: posteriors[p].append(posterior[p].values) trues[p].append(true[p]) for p in params: posteriors[p] = np.array(posteriors[p]) trues[p] = np.array(trues[p]) return dict(trues=trues, posteriors=posteriors)
def test_wrong_inputs(self): """ Test that exceptions are raised for incorrect inputs to the TargetedPulsarLikelihood. """ with pytest.raises(TypeError): TargetedPulsarLikelihood(None, None) # create HeterodynedData object (no par file) het = HeterodynedData(self.data, times=self.times, detector=self.detector) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") # error with no par file with pytest.raises(ValueError): TargetedPulsarLikelihood(het, PriorDict(priors)) het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) mhet = MultiHeterodynedData(het) # multihet object for testing with pytest.raises(TypeError): TargetedPulsarLikelihood(het, None) with pytest.raises(TypeError): TargetedPulsarLikelihood(mhet, None)
def setUp(self): self.likelihood = GaussianLikelihood( x=np.linspace(0, 1, 2), y=np.linspace(0, 1, 2), func=lambda x, **kwargs: x, sigma=1, ) self.priors = PriorDict(dict(a=Uniform(0, 1), b=Uniform(0, 1))) self._args = (self.likelihood, self.priors) self._kwargs = dict( outdir="outdir", label="label", use_ratio=False, plot=False, skip_import_verification=True, ) self.sampler = Ptemcee(*self._args, **self._kwargs) self.expected = dict( ntemps=10, nwalkers=100, Tmax=None, betas=None, a=2.0, adaptation_lag=10000, adaptation_time=100, random=None, adapt=False, swap_ratios=False, )
def test_madau_dickinson_normalised(self): model = redshift.MadauDickinsonRedshift() priors = PriorDict() priors["gamma"] = Uniform(-15, 15) priors["kappa"] = Uniform(-15, 15) priors["z_peak"] = Uniform(0, 5) self._run_model_normalisation(model=model, priors=priors)
def setUp(self): self.costilts = xp.linspace(-1, 1, 1000) self.test_data = dict( cos_tilt_1=xp.einsum("i,j->ij", self.costilts, xp.ones_like(self.costilts)), cos_tilt_2=xp.einsum("i,j->ji", self.costilts, xp.ones_like(self.costilts)), ) self.prior = PriorDict(dict(xi_spin=Uniform(0, 1), sigma_spin=Uniform(0, 4))) self.n_test = 100
def load_priors(prior_files): """Return a dict of the {prior_file_name: PriorDict}.""" loaded_priors = dict() for prior_file in prior_files: prior_file_basename = os.path.basename(prior_file) prior = PriorDict(filename=prior_file) loaded_priors.update({prior_file_basename: prior}) return loaded_priors
def setUp(self): self.a_array = xp.linspace(0, 1, 1000) self.test_data = dict( a_1=xp.einsum('i,j->ij', self.a_array, xp.ones_like(self.a_array)), a_2=xp.einsum('i,j->ji', self.a_array, xp.ones_like(self.a_array))) self.prior = PriorDict( dict(amax=Uniform(0.3, 1), alpha_chi=Uniform(1, 4), beta_chi=Uniform(1, 4))) self.n_test = 100
def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) self.dm = self.m1s[1] - self.m1s[0] self.dq = self.qs[1] - self.qs[0] m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior['alpha'] = Uniform(minimum=-4, maximum=12) self.power_prior['beta'] = Uniform(minimum=-4, maximum=12) self.power_prior['mmin'] = Uniform(minimum=3, maximum=10) self.power_prior['mmax'] = Uniform(minimum=30, maximum=100) self.gauss_prior = PriorDict() self.gauss_prior['lam'] = Uniform(minimum=0, maximum=1) self.gauss_prior['mpp'] = Uniform(minimum=20, maximum=60) self.gauss_prior['sigpp'] = Uniform(minimum=0, maximum=10) self.smooth_prior = PriorDict() self.smooth_prior['delta_m'] = Uniform(minimum=0, maximum=10) self.n_test = 10
def setUp(self): self.m1s = np.linspace(3, 100, 1000) self.qs = np.linspace(0.01, 1, 500) m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs) self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid) self.power_prior = PriorDict() self.power_prior["alpha_1"] = Uniform(minimum=-4, maximum=12) self.power_prior["alpha_2"] = Uniform(minimum=-4, maximum=12) self.power_prior["beta"] = Uniform(minimum=-4, maximum=12) self.power_prior["mmin"] = Uniform(minimum=3, maximum=10) self.power_prior["mmax"] = Uniform(minimum=40, maximum=100) self.power_prior["break_fraction"] = Uniform(minimum=40, maximum=100) self.n_test = 10
def test_resampling_posteriors(self): priors = PriorDict( dict(a=Uniform(0, 2), b=Uniform(0, 2), c=Uniform(0, 2))) samples = priors.sample(100) like = HyperparameterLikelihood( posteriors=self.data, hyper_prior=self.model, selection_function=self.selection_function, ln_evidences=self.ln_evidences, ) new_samples = like.posterior_predictive_resample(samples=samples) for key in new_samples: self.assertEqual(new_samples[key].shape, like.data[key].shape)
def get_priors(self, data): priors = PriorDict() name = f'{self.basename}0' priors[name] = Uniform( 0, data.max_flux, name, latex_label='$B_{0}$') for ii in range(1, self.n_polynomials): name = f'{self.basename}{ii}' priors[name] = Normal( 0, data.range_flux / data.duration ** ii / np.math.factorial(ii), name, latex_label=f'$B_{{{ii}}}$') return priors
def test_priors(self): """ Test the parsed priors. """ # bad priors (unexpected parameter names) priors = dict() priors["a"] = Uniform(0.0, 1.0, "blah") priors["b"] = 2.0 het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) with pytest.raises(ValueError): _ = TargetedPulsarLikelihood(het, PriorDict(priors))
def test_wrong_likelihood(self): """ Test with a bad likelihood name. """ het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") with pytest.raises(ValueError): _ = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood="blah")
def simulate_exact_population_posteriors(sig1=5, sig12=5, number_events=10, n_samp=10000): pop_prior = PriorDict( dict(cos_tilt_1=TruncatedNormal(mu=1, sigma=sig1, minimum=-1, maximum=1), cos_theta_12=TruncatedNormal(mu=1, sigma=sig12, minimum=-1, maximum=1))) posteriors = [pop_prior.sample(n_samp) for _ in range(number_events)] posteriors = ld_to_dl(posteriors) posteriors = {k: np.array(v) for k, v in posteriors.items()} return dict(trues=[], posteriors=posteriors)
def convert_to_bilby_res(dat): posteriors = dl_to_ld(dat['posteriors']) trues = dl_to_ld(dat['trues']) results = [] p = PriorDict( dict(cos_theta_12=Uniform(-1, 1), cos_tilt_1=Uniform(-1, 1), weights=Uniform(0, 1))) for i in tqdm(range(len(posteriors)), desc="Converting to Results"): r = Result() r.search_parameter_keys = ['cos_tilt_1', 'cos_theta_12', 'weights'] r.injection_parameters = trues[i] r.priors = p r.label = dat['labels'][i] r.outdir = "plots" r.posterior = pd.DataFrame(posteriors[i]) results.append(r) return results
def setUp(self): self.a_array = xp.linspace(0, 1, 1000) self.costilts = xp.linspace(-1, 1, 1000) self.test_data = dict( a_1=xp.einsum("i,j->ij", self.a_array, xp.ones_like(self.a_array)), a_2=xp.einsum("i,j->ji", self.a_array, xp.ones_like(self.a_array)), cos_tilt_1=xp.einsum("i,j->ij", self.costilts, xp.ones_like(self.costilts)), cos_tilt_2=xp.einsum("i,j->ji", self.costilts, xp.ones_like(self.costilts)), ) self.prior = PriorDict( dict( amax=Uniform(0.3, 1), alpha_chi=Uniform(1, 4), beta_chi=Uniform(1, 4), xi_spin=Uniform(0, 1), sigma_spin=Uniform(0, 4), ) ) self.n_test = 100
def test_likelihood_null_likelihood(self): """ Test likelihood and null likelihood. """ het = HeterodynedData(self.data, times=self.times, detector=self.detector, par=self.parfile) priors = dict() priors["h0"] = Uniform(0.0, 1.0e-23, "h0") for likelihood in ["gaussian", "studentst"]: like = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood=likelihood) like.parameters = {"h0": 0.0} assert like.log_likelihood() == like.noise_log_likelihood()
def get_priors(self, data): priors = PriorDict() # Set up the TOA prior if self.toa_prior_width < 1: if self.toa_prior_time == "auto": t0 = data.estimate_pulse_time() else: t0 = data.start + float(self.toa_prior_time) * data.duration dt = data.duration * self.toa_prior_width priors[self.toa_key] = Uniform( t0 - dt, t0 + dt, self.toa_key, latex_label=self.toa_latex_label) else: priors[self.toa_key] = Uniform( data.start, data.end, self.toa_key, latex_label=self.toa_latex_label) # Set up the beta prior if self.beta_min is None: self.beta_min = data.time_step if self.beta_max is None: self.beta_max = data.duration if self.beta_type == "uniform": priors[self.beta_key] = Uniform( self.beta_min, self.beta_max, self.beta_key, latex_label=self.beta_latex_label ) elif self.beta_type == "log-uniform": priors[self.beta_key] = LogUniform( self.beta_min, self.beta_max, self.beta_key, latex_label=self.beta_latex_label ) else: raise ValueError() # Set up the coefficient prior for key in self.coef_keys: priors[key] = SpikeAndSlab( slab=Uniform(1e-20 * data.max_flux, self.c_max_multiplier * data.range_flux), name=key, mix=self.c_mix, ) return priors
def test_data_input(self): """ Test input data """ # single detector and single data file config = "par-file = {}\ndata-file = {}\nprior = {}\ndata-kwargs={}" configfile = "config_test.ini" datafile = self.H1file[1] datakwargs = {"remove_outliers": False} with open(configfile, "w") as fp: fp.write( config.format(self.parfile, datafile, self.priorfile, datakwargs)) # no detector specified with pytest.raises(ValueError): pe(config=configfile) with pytest.raises(ValueError): pe(par_file=self.parfile, data_file=datafile) # not prior file specified with pytest.raises(ValueError): pe(par_file=self.parfile, data_file=datafile, detector="H1") # comparisons # pass as keyword arguments (detector as keyword) t1kw1 = pe( par_file=self.parfile, data_file=datafile, detector="H1", prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file string) t1kw2 = pe( par_file=self.parfile, data_file="{}:{}".format("H1", datafile), prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file dict) t1kw3 = pe( par_file=self.parfile, data_file={"H1": datafile}, prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as config file config = ( "par-file = {}\ndata-file = {}\nprior = {}\ndetector = H1\ndata-kwargs = {}" ) with open(configfile, "w") as fp: fp.write( config.format(self.parfile, datafile, self.priorfile, datakwargs)) t1c1 = pe(config=configfile) # use the data_file_2f option instead t1kw4 = pe( par_file=self.parfile, data_file_2f=datafile, detector="H1", prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file string) t1kw5 = pe( par_file=self.parfile, data_file_2f="{}:{}".format("H1", datafile), prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file dict) t1kw6 = pe( par_file=self.parfile, data_file_2f={"H1": datafile}, prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as config file config = "par-file = {}\ndata-file-2f = {}\nprior = {}\ndetector = H1\ndata-kwargs = {}" with open(configfile, "w") as fp: fp.write( config.format(self.parfile, datafile, self.priorfile, datakwargs)) t1c2 = pe(config=configfile) # perform consistency checks for tv in [t1kw1, t1kw2, t1kw3, t1c1, t1kw4, t1kw5, t1kw6, t1c2]: assert len(tv.hetdata) == 1 assert tv.hetdata["H1"][0].par["F"][0] == self.f0 assert tv.hetdata.detectors[0] == "H1" assert tv.hetdata.freq_factors[0] == 2 assert np.allclose(tv.hetdata["H1"][0].data.real, self.H1data[1][:, 1]) assert np.allclose(tv.hetdata["H1"][0].data.imag, self.H1data[1][:, 2]) assert np.allclose(tv.hetdata["H1"][0].times.value, self.times) assert PriorDict(tv.prior) == self.priorbilby # now pass two detectors # pass as keyword arguments (detector as keyword) t2kw1 = pe( par_file=self.parfile, data_file=[self.H1file[1], self.L1file[1]], detector=["H1", "L1"], prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file string) t2kw2 = pe( par_file=self.parfile, data_file=[ "{}:{}".format("H1", self.H1file[1]), "{}:{}".format("L1", self.L1file[1]), ], prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file dict) t2kw3 = pe( par_file=self.parfile, data_file={ "H1": self.H1file[1], "L1": self.L1file[1] }, prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as config file config = ("par-file = {}\n" "data-file = [{}, {}]\n" "prior = {}\n" "detector = [H1, L1]\n" "data-kwargs = {}") with open(configfile, "w") as fp: fp.write( config.format( self.parfile, self.H1file[1], self.L1file[1], self.priorfile, datakwargs, )) t2c1 = pe(config=configfile) # use the data_file_2f option instead t2kw4 = pe( par_file=self.parfile, data_file_2f=[self.H1file[1], self.L1file[1]], detector=["H1", "L1"], prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file string) t2kw5 = pe( par_file=self.parfile, data_file_2f=[ "{}:{}".format("H1", self.H1file[1]), "{}:{}".format("L1", self.L1file[1]), ], prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file dict) t2kw6 = pe( par_file=self.parfile, data_file_2f={ "H1": self.H1file[1], "L1": self.L1file[1] }, prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as config file config = ("par-file = {}\n" "data-file-2f = [{}, {}]\n" "prior = {}\n" "detector = [H1, L1]\n" "data-kwargs = {}") with open(configfile, "w") as fp: fp.write( config.format( self.parfile, self.H1file[1], self.L1file[1], self.priorfile, datakwargs, )) t2c2 = pe(config=configfile) # perform consistency checks for tv in [t2kw1, t2kw2, t2kw3, t2c1, t2kw4, t2kw5, t2kw6, t2c2]: assert len(tv.hetdata) == 2 for i, det, data in zip(range(2), ["H1", "L1"], [self.H1data[1], self.L1data[1]]): assert tv.hetdata.detectors[i] == det assert tv.hetdata.freq_factors[0] == 2 assert tv.hetdata[det][0].par["F"][0] == self.f0 assert np.allclose(tv.hetdata[det][0].data.real, data[:, 1]) assert np.allclose(tv.hetdata[det][0].data.imag, data[:, 2]) assert np.allclose(tv.hetdata[det][0].times.value, self.times) assert PriorDict(tv.prior) == self.priorbilby # pass data at 1f datafile = self.H1file[0] t3kw1 = pe( par_file=self.parfile, data_file_1f=datafile, detector="H1", prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file string) t3kw2 = pe( par_file=self.parfile, data_file_1f="{}:{}".format("H1", datafile), prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file dict) t3kw3 = pe( par_file=self.parfile, data_file_1f={"H1": datafile}, prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as config file config = "par-file = {}\ndata-file-1f = {}\nprior = {}\ndetector = H1\ndata-kwargs = {}" with open(configfile, "w") as fp: fp.write( config.format(self.parfile, datafile, self.priorfile, datakwargs)) t3c1 = pe(config=configfile) # perform consistency checks for tv in [t3kw1, t3kw2, t3kw3, t3c1]: assert len(tv.hetdata) == 1 assert tv.hetdata.detectors[0] == "H1" assert tv.hetdata.freq_factors[0] == 1 assert tv.hetdata["H1"][0].par["F"][0] == self.f0 assert np.allclose(tv.hetdata["H1"][0].data.real, self.H1data[0][:, 1]) assert np.allclose(tv.hetdata["H1"][0].data.imag, self.H1data[0][:, 2]) assert np.allclose(tv.hetdata["H1"][0].times.value, self.times) assert PriorDict(tv.prior) == self.priorbilby # test with two detectors and two frequencies # pass as keyword arguments (detector as keyword) t4kw1 = pe( par_file=self.parfile, data_file_1f=[self.H1file[0], self.L1file[0]], data_file_2f=[self.H1file[1], self.L1file[1]], detector=["H1", "L1"], prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file string) t4kw2 = pe( par_file=self.parfile, data_file_1f=[ "{}:{}".format("H1", self.H1file[0]), "{}:{}".format("L1", self.L1file[0]), ], data_file_2f=[ "{}:{}".format("H1", self.H1file[1]), "{}:{}".format("L1", self.L1file[1]), ], prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as keyword arguments (detector in data file dict) t4kw3 = pe( par_file=self.parfile, data_file_1f={ "H1": self.H1file[0], "L1": self.L1file[0] }, data_file_2f={ "H1": self.H1file[1], "L1": self.L1file[1] }, prior=self.priorbilby, data_kwargs=datakwargs, ) # pass as config file config = ("par-file = {}\n" "data-file-1f = [{}, {}]\n" "data-file-2f = [{}, {}]\n" "prior = {}\n" "detector = [H1, L1]\n" "data-kwargs = {}") with open(configfile, "w") as fp: fp.write( config.format( self.parfile, self.H1file[0], self.L1file[0], self.H1file[1], self.L1file[1], self.priorfile, datakwargs, )) t4c1 = pe(config=configfile) # perform consistency checks for tv in [t4kw1, t4kw2, t4kw3, t4c1]: assert len(tv.hetdata) == 4 for i, det, data1f, data2f in zip( range(2), ["H1", "L1"], [self.H1data[0], self.L1data[0]], [self.H1data[1], self.L1data[1]], ): assert tv.hetdata.detectors[i] == det assert tv.hetdata[det][0].freq_factor == 1.0 assert tv.hetdata[det][1].freq_factor == 2.0 assert tv.hetdata[det][0].par["F"][0] == self.f0 assert np.allclose(tv.hetdata[det][0].data.real, data1f[:, 1]) assert np.allclose(tv.hetdata[det][0].data.imag, data1f[:, 2]) assert np.allclose(tv.hetdata[det][0].times.value, self.times) assert tv.hetdata[det][1].par["F"][0] == self.f0 assert np.allclose(tv.hetdata[det][1].data.real, data2f[:, 1]) assert np.allclose(tv.hetdata[det][1].data.imag, data2f[:, 2]) assert np.allclose(tv.hetdata[det][1].times.value, self.times) assert PriorDict(tv.prior) == self.priorbilby os.remove(configfile)
def setup_class(cls): """ Create data set files for use. """ seed = 88523 # random seed start = 1000000000 # GPS start end = 1000086400 # GPS end step = 60 # time step size # time stamp array cls.times = np.arange(start, end, step) size = len(cls.times) # create pulsar parameter file cls.f0 = 100.1 # frequency parcontent = ("PSRJ J0341-1253\n" "F0 {}\n" "F1 6.5e-12\n" "RAJ 03:41:00.0\n" "DECJ -12:53:00.0\n" "PEPOCH 56789") cls.parfile = "pe_test.par" with open(cls.parfile, "w") as fp: fp.write(parcontent.format(cls.f0)) # set random seed np.random.seed(seed) # create simulated H1 data (at 1 and 2f) cls.H1data = [] cls.H1file = [] cls.H1fileh5 = [] H1sigma = 1e-24 for i in [1, 2]: cls.H1data.append( np.vstack(( cls.times, H1sigma * np.random.randn(size), H1sigma * np.random.randn(size), )).T) cls.H1file.append("H1data{}f.txt".format(i)) np.savetxt(cls.H1file[-1], cls.H1data[-1]) # create HDF5 version of data cls.H1fileh5.append("H1data{}f.hdf5".format(i)) hd = HeterodynedData(data=cls.H1data[-1], detector="H1", par=cls.parfile) hd.write(cls.H1fileh5[-1]) # create simulated L1 data cls.L1data = [] cls.L1file = [] cls.L1fileh5 = [] L1sigma = 0.7e-24 for i in [1, 2]: cls.L1data.append( np.vstack(( cls.times, L1sigma * np.random.randn(size), L1sigma * np.random.randn(size), )).T) cls.L1file.append("L1data{}f.txt".format(i)) np.savetxt(cls.L1file[-1], cls.L1data[-1]) # create HDF5 version of data cls.L1fileh5.append("L1data{}f.hdf5".format(i)) hd = HeterodynedData(data=cls.L1data[-1], detector="L1", par=cls.parfile) hd.write(cls.L1fileh5[-1]) # create a pulsar parameter file containing GW signal parameters # (for comparison with lalapps_pulsar_parameter_estimation_nested) parcontent = ("PSRJ J0341-1253\n" "F0 {}\n" "F1 6.5e-12\n" "RAJ 03:41:00.0\n" "DECJ -12:53:00.0\n" "PEPOCH 56789\n" "C21 6.2e-24\n" "C22 3.4e-25\n" "PHI21 0.4\n" "PHI22 1.3\n" "PSI 1.1\n" "IOTA 0.9\n" "UNITS TCB") cls.parfilesig = "pe_test_sig.par" with open(cls.parfilesig, "w") as fp: fp.write(parcontent.format(cls.f0)) # set data pre-produced using lalapps_pulsar_parameter_estimation_nested # with the same parameter file cls.sigH11f = np.loadtxt( os.path.join( os.path.dirname(os.path.realpath(__file__)), "data", "inj_test.txt_H1_1.0_signal_only", )) cls.sigL11f = np.loadtxt( os.path.join( os.path.dirname(os.path.realpath(__file__)), "data", "inj_test.txt_L1_1.0_signal_only", )) cls.sigH12f = np.loadtxt( os.path.join( os.path.dirname(os.path.realpath(__file__)), "data", "inj_test.txt_H1_2.0_signal_only", )) cls.sigL12f = np.loadtxt( os.path.join( os.path.dirname(os.path.realpath(__file__)), "data", "inj_test.txt_L1_2.0_signal_only", )) # create a prior file cls.priorfile = "pe_test.prior" cls.priormin = 0.0 cls.priormax = 1e-22 priorcontent = "h0 = Uniform(name='h0', minimum={}, maximum={})" with open(cls.priorfile, "w") as fp: fp.write(priorcontent.format(cls.priormin, cls.priormax)) cls.priorbilby = PriorDict(cls.priorfile)
def get_prior(): return PriorDict( dict(cos_tilt_1=Uniform(-1, 1, "cos_tilt_1", r"$\cos\theta_{1}$"), cos_theta_12=Uniform(-1, 1, "cos_theta_12", r"$\cos\theta_{12}$")))
def test_powerlaw_normalised(self): model = redshift.PowerLawRedshift() priors = PriorDict() priors["lamb"] = Uniform(-15, 15) self._run_model_normalisation(model=model, priors=priors)
priors["q22"] = Uniform(q22range[0], q22range[1], "q22", latex_label=r"$Q_{22}$") priors["phi0"] = Uniform( phi0range[0], phi0range[1], "phi0", latex_label=r"$\phi_0$", unit="rad" ) priors["psi"] = Uniform( psirange[0], psirange[1], "psi", latex_label=r"$\psi$", unit="rad" ) priors["cosiota"] = Uniform( cosiotarange[0], cosiotarange[1], "cosiota", latex_label=r"$\cos{\iota}$" ) Nlive = 1024 # number of nested sampling live points # run bilby for i, het in enumerate(hets): # set the likelihood for bilby likelihood = TargetedPulsarLikelihood(het, PriorDict(priors)) thislabel = "{}_{}".format(label, i) # run bilby result = bilby.run_sampler( likelihood=likelihood, priors=priors, sampler="cpnest", nlive=Nlive, outdir=outdir, label=thislabel, use_ratio=False, )
Dictionary containing sampled parameter values, 'RM1_radm2', 'RM1_radm2', 'fracPol1', 'fracPol2' Returns ------- dict: Dictionary with constraint parameter 'delta_RM1_RM2_radm2' and 'sum_p1_p2' added. """ converted_parameters = parameters.copy() converted_parameters['delta_RM1_RM2_radm2'] = parameters[ 'RM1_radm2'] - parameters['RM2_radm2'] converted_parameters[ 'sum_p1_p2'] = parameters['fracPol1'] + parameters['fracPol2'] return converted_parameters priors = PriorDict(conversion_function=converter) priors['fracPol1'] = bilby.prior.Uniform( minimum=0.001, maximum=1.0, name='fracPol1', latex_label='$p_1$', ) priors['fracPol2'] = bilby.prior.Uniform( minimum=0.001, maximum=1.0, name='fracPol2', latex_label='$p_2$', ) priors['psi01_deg'] = bilby.prior.Uniform(