Ejemplo n.º 1
0
 def test_madau_dickinson_normalised(self):
     model = redshift.MadauDickinsonRedshift()
     priors = PriorDict()
     priors["gamma"] = Uniform(-15, 15)
     priors["kappa"] = Uniform(-15, 15)
     priors["z_peak"] = Uniform(0, 5)
     self._run_model_normalisation(model=model, priors=priors)
Ejemplo n.º 2
0
    def test_absolute_overlap(self):
        priors = BBHPriorDict(aligned_spin=True)
        del priors["mass_1"], priors["mass_2"]
        priors["total_mass"] = Uniform(5, 50)
        priors["mass_ratio"] = Uniform(0.5, 1)
        priors["geocent_time"] = Uniform(-10, 10)

        n_samples = 100
        all_parameters = pd.DataFrame(priors.sample(n_samples))
        overlaps = list()

        for ii in range(n_samples):
            parameters = dict(all_parameters.iloc[ii])
            bilby_pols = self.bilby_wfg.frequency_domain_strain(parameters)
            gpu_pols = self.gpu_wfg.frequency_domain_strain(parameters)
            bilby_strain = self.ifo.get_detector_response(
                waveform_polarizations=bilby_pols, parameters=parameters)
            gpu_strain = self.ifo.get_detector_response(
                waveform_polarizations=gpu_pols, parameters=parameters)
            inner_product = noise_weighted_inner_product(
                aa=bilby_strain,
                bb=gpu_strain,
                power_spectral_density=self.ifo.power_spectral_density_array,
                duration=self.duration)
            overlap = (inner_product /
                       self.ifo.optimal_snr_squared(signal=bilby_strain)**0.5 /
                       self.ifo.optimal_snr_squared(signal=gpu_strain)**0.5)
            overlaps.append(overlap)
        self.assertTrue(min(np.abs(overlaps)) > 0.995)
Ejemplo n.º 3
0
 def setUp(self):
     self.likelihood = GaussianLikelihood(
         x=np.linspace(0, 1, 2),
         y=np.linspace(0, 1, 2),
         func=lambda x, **kwargs: x,
         sigma=1,
     )
     self.priors = PriorDict(dict(a=Uniform(0, 1), b=Uniform(0, 1)))
     self._args = (self.likelihood, self.priors)
     self._kwargs = dict(
         outdir="outdir",
         label="label",
         use_ratio=False,
         plot=False,
         skip_import_verification=True,
     )
     self.sampler = Ptemcee(*self._args, **self._kwargs)
     self.expected = dict(
         ntemps=10,
         nwalkers=100,
         Tmax=None,
         betas=None,
         a=2.0,
         adaptation_lag=10000,
         adaptation_time=100,
         random=None,
         adapt=False,
         swap_ratios=False,
     )
Ejemplo n.º 4
0
 def setUp(self):
     self.costilts = xp.linspace(-1, 1, 1000)
     self.test_data = dict(
         cos_tilt_1=xp.einsum("i,j->ij", self.costilts, xp.ones_like(self.costilts)),
         cos_tilt_2=xp.einsum("i,j->ji", self.costilts, xp.ones_like(self.costilts)),
     )
     self.prior = PriorDict(dict(xi_spin=Uniform(0, 1), sigma_spin=Uniform(0, 4)))
     self.n_test = 100
Ejemplo n.º 5
0
    def test_bilby_to_lalinference(self):
        mass_1 = [1, 20]
        mass_2 = [1, 20]
        chirp_mass = [1, 5]
        mass_ratio = [0.125, 1]

        bilby_prior = BBHPriorDict(dictionary=dict(
            chirp_mass=Uniform(name='chirp_mass', minimum=chirp_mass[0], maximum=chirp_mass[1]),
            mass_ratio=Uniform(name='mass_ratio', minimum=mass_ratio[0], maximum=mass_ratio[1]),
            mass_2=Constraint(name='mass_2', minimum=mass_1[0], maximum=mass_1[1]),
            mass_1=Constraint(name='mass_1', minimum=mass_2[0], maximum=mass_2[1])))

        lalinf_prior = BBHPriorDict(dictionary=dict(
            mass_ratio=Constraint(name='mass_ratio', minimum=mass_ratio[0], maximum=mass_ratio[1]),
            chirp_mass=Constraint(name='chirp_mass', minimum=chirp_mass[0], maximum=chirp_mass[1]),
            mass_2=Uniform(name='mass_2', minimum=mass_1[0], maximum=mass_1[1]),
            mass_1=Uniform(name='mass_1', minimum=mass_2[0], maximum=mass_2[1])))

        nsamples = 5000
        bilby_samples = bilby_prior.sample(nsamples)
        bilby_samples, _ = conversion.convert_to_lal_binary_black_hole_parameters(
            bilby_samples)

        # Quicker way to generate LA prior samples (rather than specifying Constraint)
        lalinf_samples = []
        while len(lalinf_samples) < nsamples:
            s = lalinf_prior.sample()
            if s["mass_1"] < s["mass_2"]:
                s["mass_1"], s["mass_2"] = s["mass_2"], s["mass_1"]
            if s["mass_2"] / s["mass_1"] > 0.125:
                lalinf_samples.append(s)
        lalinf_samples = pd.DataFrame(lalinf_samples)
        lalinf_samples["mass_ratio"] = lalinf_samples["mass_2"] / lalinf_samples["mass_1"]

        # Construct fake result object
        result = bilby.core.result.Result()
        result.search_parameter_keys = ["mass_ratio", "chirp_mass"]
        result.meta_data = dict()
        result.priors = bilby_prior
        result.posterior = pd.DataFrame(bilby_samples)
        result_converted = bilby.gw.prior.convert_to_flat_in_component_mass_prior(result)

        if "plot" in sys.argv:
            # Useful for debugging
            plt.hist(bilby_samples["mass_ratio"], bins=50, density=True, alpha=0.5)
            plt.hist(result_converted.posterior["mass_ratio"], bins=50, density=True, alpha=0.5)
            plt.hist(lalinf_samples["mass_ratio"], bins=50, alpha=0.5, density=True)
            plt.show()

        # Check that the non-reweighted posteriors fail a KS test
        ks = ks_2samp(bilby_samples["mass_ratio"], lalinf_samples["mass_ratio"])
        print("Non-reweighted KS test = ", ks)
        self.assertFalse(ks.pvalue > 0.05)

        # Check that the non-reweighted posteriors pass a KS test
        ks = ks_2samp(result_converted.posterior["mass_ratio"], lalinf_samples["mass_ratio"])
        print("Reweighted KS test = ", ks)
        self.assertTrue(ks.pvalue > 0.001)
Ejemplo n.º 6
0
 def setUp(self):
     self.a_array = xp.linspace(0, 1, 1000)
     self.test_data = dict(
         a_1=xp.einsum('i,j->ij', self.a_array, xp.ones_like(self.a_array)),
         a_2=xp.einsum('i,j->ji', self.a_array, xp.ones_like(self.a_array)))
     self.prior = PriorDict(
         dict(amax=Uniform(0.3, 1), alpha_chi=Uniform(1, 4),
              beta_chi=Uniform(1, 4)))
     self.n_test = 100
Ejemplo n.º 7
0
    def test_base_distribution(self):
        """
        Test the BaseDistribution object.
        """

        name = "test"

        # test failure for unknown distribution
        with pytest.raises(ValueError):
            BaseDistribution(name, "kjsgdkdgkjgsda")

        # test failure for inappropriate bounds
        with pytest.raises(ValueError):
            BaseDistribution(name, "gaussian", low=0.0, high=-1.0)

        # test failure for unknown hyperparameter name
        with pytest.raises(KeyError):
            hyper = {"mu": [1], "dkgwkufd": [2]}
            BaseDistribution(name, "gaussian", hyperparameters=hyper)

        # test failure with invalid hyperparameter type
        with pytest.raises(TypeError):
            BaseDistribution(name, "gaussian", hyperparameters="blah")

        with pytest.raises(TypeError):
            BaseDistribution(name, "exponential", hyperparameters="blah")

        # test default log_pdf is NaN
        hyper = {"mu": 2.0}
        dist = BaseDistribution(name, "exponential", hyperparameters=hyper)

        assert dist["mu"] == hyper["mu"]
        assert np.isnan(dist.log_pdf({}, 0))
        assert dist.sample({}) is None

        # test failure when getting unknown item
        with pytest.raises(KeyError):
            _ = dist["kgksda"]

        del dist

        # test setter failure
        dist = BaseDistribution(name, "exponential")

        with pytest.raises(KeyError):
            dist["madbks"] = Uniform(0.0, 1.0, "mu")

        # test setter
        dist["mu"] = Uniform(0.0, 1.0, "mu")
        assert isinstance(dist["mu"], Uniform)

        # test failure with invalid hyperparameter type
        with pytest.raises(TypeError):
            hyper = "blah"
            BaseDistribution(name, "deltafunction", hyperparameters=hyper)
 def test_resampling_posteriors(self):
     priors = PriorDict(
         dict(a=Uniform(0, 2), b=Uniform(0, 2), c=Uniform(0, 2)))
     samples = priors.sample(100)
     like = HyperparameterLikelihood(
         posteriors=self.data,
         hyper_prior=self.model,
         selection_function=self.selection_function,
         ln_evidences=self.ln_evidences,
     )
     new_samples = like.posterior_predictive_resample(samples=samples)
     for key in new_samples:
         self.assertEqual(new_samples[key].shape, like.data[key].shape)
Ejemplo n.º 9
0
    def test_numba_likelihood(self):
        """
        Test likelihood using numba against the standard likelihood.
        """

        het = HeterodynedData(self.data,
                              times=self.times,
                              detector=self.detector,
                              par=self.parfile)

        priors = dict()
        priors["h0"] = Uniform(0.0, 1.0e-23, "h0")

        for likelihood in ["gaussian", "studentst"]:
            like1 = TargetedPulsarLikelihood(het,
                                             PriorDict(priors),
                                             likelihood=likelihood)
            like1.parameters = {"h0": 1e-24}

            like2 = TargetedPulsarLikelihood(het,
                                             PriorDict(priors),
                                             likelihood=likelihood,
                                             numba=True)
            like2.parameters = {"h0": 1e-24}

            assert like1.log_likelihood() == like2.log_likelihood()
Ejemplo n.º 10
0
    def test_exponential(self):
        """
        Test the ExponentialDistribution class.
        """

        name = "test"

        dist = ExponentialDistribution(name, mu=1.0)
        assert dist["mu"] == 1.0
        assert dist.fixed["mu"] is True

        dist = ExponentialDistribution(name, mu=Uniform(0.0, 1.0, "mu"))

        value = -1.0
        hyper = {"mu": 0.5}
        assert isinstance(dist["mu"], Uniform)
        assert dist.fixed["mu"] is False
        assert dist.log_pdf(value, hyper) == -np.inf
        assert np.exp(dist.log_pdf(value, hyper)) == dist.pdf(value, hyper)

        # check drawn sample is within bounds
        assert dist.low < dist.sample(hyper) < dist.high

        # draw multiple samples
        N = 100
        samples = dist.sample(hyper, size=N)
        assert len(samples) == N
        assert np.all((samples > dist.low) & (samples < dist.high))

        value = 1.0
        hyper = {"kgsdg": 0.5}
        with pytest.raises(KeyError):
            dist.log_pdf(value, hyper)
Ejemplo n.º 11
0
    def test_wrong_inputs(self):
        """
        Test that exceptions are raised for incorrect inputs to the
        TargetedPulsarLikelihood.
        """

        with pytest.raises(TypeError):
            TargetedPulsarLikelihood(None, None)

        # create HeterodynedData object (no par file)
        het = HeterodynedData(self.data,
                              times=self.times,
                              detector=self.detector)

        priors = dict()
        priors["h0"] = Uniform(0.0, 1.0e-23, "h0")

        # error with no par file
        with pytest.raises(ValueError):
            TargetedPulsarLikelihood(het, PriorDict(priors))

        het = HeterodynedData(self.data,
                              times=self.times,
                              detector=self.detector,
                              par=self.parfile)
        mhet = MultiHeterodynedData(het)  # multihet object for testing

        with pytest.raises(TypeError):
            TargetedPulsarLikelihood(het, None)

        with pytest.raises(TypeError):
            TargetedPulsarLikelihood(mhet, None)
Ejemplo n.º 12
0
    def test_includephase_likelihood(self):
        """
        Test the likelihood when include phase is set to True.
        """

        het = HeterodynedData(self.data,
                              times=self.times,
                              detector=self.detector,
                              par=self.parfile)

        priors = dict()
        priors["h0"] = Uniform(0.0, 1.0e-23, "h0")

        # run with includephase as False
        like1 = TargetedPulsarLikelihood(het,
                                         PriorDict(priors),
                                         likelihood="studentst")
        like1.parameters = {"h0": 1e-24}

        logl1 = like1.log_likelihood()

        # set includephase to True
        like2 = TargetedPulsarLikelihood(het,
                                         PriorDict(priors),
                                         likelihood="studentst")
        like2.parameters = {"h0": 1e-24}
        like2.include_phase = True

        logl2 = like2.log_likelihood()

        print(f"{logl1:.15f} {logl2:.15f}")

        assert np.allclose([logl1], [logl2], atol=1e-10, rtol=0.0)
Ejemplo n.º 13
0
    def test_create_distribution(self):
        """
        Test the create_distribution() function.
        """

        name = "test"
        with pytest.raises(ValueError):
            create_distribution(name, "kjbskdvakvkd")

        with pytest.raises(TypeError):
            create_distribution(name, 2.3)

        gausskwargs = {"mus": [1.0, 2.0], "sigmas": [1.0, 2.0]}
        dist = create_distribution(name, "Gaussian", gausskwargs)

        assert isinstance(dist, BoundedGaussianDistribution)
        assert (dist["mu0"] == gausskwargs["mus"][0]
                and dist["mu1"] == gausskwargs["mus"][1])
        assert (dist["sigma0"] == gausskwargs["sigmas"][0]
                and dist["sigma1"] == gausskwargs["sigmas"][1])
        del dist

        expkwargs = {"mu": Uniform(0.0, 1.0, "mu")}
        dist = create_distribution(name, "Exponential", expkwargs)
        assert isinstance(dist, ExponentialDistribution)
        assert dist["mu"] == expkwargs["mu"]

        newdist = create_distribution(name, dist)
        assert isinstance(newdist, ExponentialDistribution)
        assert newdist["mu"] == dist["mu"]

        deltakwargs = {"peak": Uniform(0.0, 1.0, "peak")}
        dist = create_distribution(name, "DeltaFunction", deltakwargs)
        assert isinstance(dist, DeltaFunctionDistribution)
        assert dist["peak"] == deltakwargs["peak"]

        powerlawkwargs = {
            "alpha": Uniform(-1, 1, name="alpha"),
            "minimum": 0.00001,
            "maximum": 1000.0,
        }
        dist = create_distribution(name, "PowerLaw", powerlawkwargs)
        assert isinstance(dist, PowerLawDistribution)
        assert dist["alpha"] == powerlawkwargs["alpha"]
        assert dist["minimum"] == powerlawkwargs["minimum"]
        assert dist["maximum"] == powerlawkwargs["maximum"]
Ejemplo n.º 14
0
    def test_powerlaw(self):
        """
        Test the PowerLawDistribution class.
        """

        name = "test"

        dist = PowerLawDistribution(name, alpha=1.0, minimum=0.1, maximum=10.0)
        assert dist["alpha"] == 1.0
        assert dist.fixed["alpha"] is True
        assert dist["minimum"] == 0.1
        assert dist.fixed["minimum"] is True
        assert dist["maximum"] == 10.0
        assert dist.fixed["maximum"] is True

        # test out of bounds
        with pytest.raises(ValueError):
            PowerLawDistribution(name, alpha=1.0, minimum=-1.0, maximum=10.0)

        with pytest.raises(ValueError):
            PowerLawDistribution(name,
                                 alpha=1.0,
                                 minimum=-np.inf,
                                 maximum=10.0)

        with pytest.raises(ValueError):
            PowerLawDistribution(name, alpha=1.0, minimum=1.0, maximum=0.5)

        with pytest.raises(ValueError):
            PowerLawDistribution(name, alpha=1.0, minimum=1.0, maximum=-np.inf)

        minimum = 0.001
        maximum = 10.0
        dist = PowerLawDistribution(name,
                                    alpha=Uniform(0.0, 1.0, "alpha"),
                                    minimum=minimum,
                                    maximum=maximum)

        value = -1.0
        hyper = {"alpha": 0.5}
        assert isinstance(dist["alpha"], Uniform)
        assert dist.fixed["alpha"] is False
        assert dist.log_pdf(value, hyper) == -np.inf
        assert np.exp(dist.log_pdf(value, hyper)) == dist.pdf(value, hyper)

        # check drawn sample is within bounds
        assert minimum < dist.sample(hyper) < maximum

        # draw multiple samples
        N = 100
        samples = dist.sample(hyper, size=N)
        assert len(samples) == N
        assert np.all((samples > minimum) & (samples < maximum))

        value = 1.0
        hyper = {"kgsdg": 0.5}
        with pytest.raises(KeyError):
            dist.log_pdf(value, hyper)
def convert_to_bilby_res(dat):
    posteriors = dl_to_ld(dat['posteriors'])
    trues = dl_to_ld(dat['trues'])
    results = []
    p = PriorDict(
        dict(cos_theta_12=Uniform(-1, 1),
             cos_tilt_1=Uniform(-1, 1),
             weights=Uniform(0, 1)))
    for i in tqdm(range(len(posteriors)), desc="Converting to Results"):
        r = Result()
        r.search_parameter_keys = ['cos_tilt_1', 'cos_theta_12', 'weights']
        r.injection_parameters = trues[i]
        r.priors = p
        r.label = dat['labels'][i]
        r.outdir = "plots"
        r.posterior = pd.DataFrame(posteriors[i])
        results.append(r)
    return results
Ejemplo n.º 16
0
 def setUp(self):
     self.a_array = xp.linspace(0, 1, 1000)
     self.costilts = xp.linspace(-1, 1, 1000)
     self.test_data = dict(
         a_1=xp.einsum("i,j->ij", self.a_array, xp.ones_like(self.a_array)),
         a_2=xp.einsum("i,j->ji", self.a_array, xp.ones_like(self.a_array)),
         cos_tilt_1=xp.einsum("i,j->ij", self.costilts, xp.ones_like(self.costilts)),
         cos_tilt_2=xp.einsum("i,j->ji", self.costilts, xp.ones_like(self.costilts)),
     )
     self.prior = PriorDict(
         dict(
             amax=Uniform(0.3, 1),
             alpha_chi=Uniform(1, 4),
             beta_chi=Uniform(1, 4),
             xi_spin=Uniform(0, 1),
             sigma_spin=Uniform(0, 4),
         )
     )
     self.n_test = 100
Ejemplo n.º 17
0
    def get_priors(self, data):
        priors = PriorDict()

        # Set up the TOA prior
        if self.toa_prior_width < 1:
            if self.toa_prior_time == "auto":
                t0 = data.estimate_pulse_time()
            else:
                t0 = data.start + float(self.toa_prior_time) * data.duration
            dt = data.duration * self.toa_prior_width
            priors[self.toa_key] = Uniform(
                t0 - dt, t0 + dt, self.toa_key, latex_label=self.toa_latex_label)
        else:
            priors[self.toa_key] = Uniform(
                data.start, data.end, self.toa_key, latex_label=self.toa_latex_label)

        # Set up the beta prior
        if self.beta_min is None:
            self.beta_min = data.time_step
        if self.beta_max is None:
            self.beta_max = data.duration

        if self.beta_type == "uniform":
            priors[self.beta_key] = Uniform(
                self.beta_min, self.beta_max, self.beta_key, latex_label=self.beta_latex_label
            )
        elif self.beta_type == "log-uniform":
            priors[self.beta_key] = LogUniform(
                self.beta_min, self.beta_max, self.beta_key, latex_label=self.beta_latex_label
            )
        else:
            raise ValueError()

        # Set up the coefficient prior
        for key in self.coef_keys:
            priors[key] = SpikeAndSlab(
                slab=Uniform(1e-20 * data.max_flux, self.c_max_multiplier * data.range_flux),
                name=key,
                mix=self.c_mix,
            )
        return priors
Ejemplo n.º 18
0
def create_population_prior(pop_parameters, prior_path):
    """Remove tilt angle priors, replace with population priors"""
    prior = bilby.prior.PriorDict(filename=prior_path)
    for param in ['cos_tilt_1', 'cos_tilt_2', 'phi_12', 'theta_jn', 'phi_jl']:
        prior.pop(param)
    for i in [1, 12]:
        kwargs = dict(mu=1, minimum=-1, maximum=1)
        prior[f'cos_theta_{i}'] = TruncatedNormal(
            sigma=pop_parameters[f'sigma_{i}'], **kwargs)
    prior[f'phi_1'] = Uniform(name='phi_1',
                              minimum=0,
                              maximum=2 * np.pi,
                              boundary='periodic')
    prior[f'phi_z_s12'] = Uniform(name='phi_z_s12',
                                  minimum=0,
                                  maximum=2 * np.pi,
                                  boundary='periodic')
    prior[f'incl'] = Uniform(name='incl',
                             minimum=0,
                             maximum=2 * np.pi,
                             boundary='periodic')
    return prior
Ejemplo n.º 19
0
    def get_priors(self, data):
        priors = PriorDict()

        name = f'{self.basename}0'
        priors[name] = Uniform(
            0, data.max_flux, name, latex_label='$B_{0}$')
        for ii in range(1, self.n_polynomials):
            name = f'{self.basename}{ii}'
            priors[name] = Normal(
                0,
                data.range_flux / data.duration ** ii / np.math.factorial(ii),
                name,
                latex_label=f'$B_{{{ii}}}$')
        return priors
Ejemplo n.º 20
0
 def setUp(self):
     self.ms = np.linspace(3, 100, 1000)
     self.dm = self.ms[1] - self.ms[0]
     m1s_grid, m2s_grid = xp.meshgrid(self.ms, self.ms)
     self.dataset = dict(mass_1=m1s_grid, mass_2=m2s_grid)
     self.power_prior = PriorDict()
     self.power_prior["alpha"] = Uniform(minimum=-4, maximum=12)
     self.power_prior["beta"] = Uniform(minimum=-4, maximum=12)
     self.power_prior["mmin"] = Uniform(minimum=3, maximum=10)
     self.power_prior["mmax"] = Uniform(minimum=40, maximum=100)
     self.gauss_prior = PriorDict()
     self.gauss_prior["lam"] = Uniform(minimum=0, maximum=1)
     self.gauss_prior["mpp"] = Uniform(minimum=20, maximum=60)
     self.gauss_prior["sigpp"] = Uniform(minimum=0, maximum=10)
     self.n_test = 10
Ejemplo n.º 21
0
 def setUp(self):
     self.m1s = np.linspace(3, 100, 1000)
     self.qs = np.linspace(0.01, 1, 500)
     m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs)
     self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid)
     self.power_prior = PriorDict()
     self.power_prior["alpha"] = Uniform(minimum=-4, maximum=12)
     self.power_prior["beta"] = Uniform(minimum=-4, maximum=12)
     self.power_prior["mmin"] = Uniform(minimum=3, maximum=10)
     self.power_prior["mmax"] = Uniform(minimum=40, maximum=100)
     self.gauss_prior = PriorDict()
     self.gauss_prior["lam"] = Uniform(minimum=0, maximum=1)
     self.gauss_prior["mpp"] = Uniform(minimum=20, maximum=60)
     self.gauss_prior["sigpp"] = Uniform(minimum=0, maximum=10)
     self.n_test = 10
Ejemplo n.º 22
0
    def test_wrong_likelihood(self):
        """
        Test with a bad likelihood name.
        """

        het = HeterodynedData(self.data,
                              times=self.times,
                              detector=self.detector,
                              par=self.parfile)

        priors = dict()
        priors["h0"] = Uniform(0.0, 1.0e-23, "h0")

        with pytest.raises(ValueError):
            _ = TargetedPulsarLikelihood(het,
                                         PriorDict(priors),
                                         likelihood="blah")
Ejemplo n.º 23
0
    def test_priors(self):
        """
        Test the parsed priors.
        """

        # bad priors (unexpected parameter names)
        priors = dict()
        priors["a"] = Uniform(0.0, 1.0, "blah")
        priors["b"] = 2.0

        het = HeterodynedData(self.data,
                              times=self.times,
                              detector=self.detector,
                              par=self.parfile)

        with pytest.raises(ValueError):
            _ = TargetedPulsarLikelihood(het, PriorDict(priors))
Ejemplo n.º 24
0
 def setUp(self):
     self.m1s = np.linspace(3, 100, 1000)
     self.qs = np.linspace(0.01, 1, 500)
     self.dm = self.m1s[1] - self.m1s[0]
     self.dq = self.qs[1] - self.qs[0]
     m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs)
     self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid)
     self.power_prior = PriorDict()
     self.power_prior['alpha'] = Uniform(minimum=-4, maximum=12)
     self.power_prior['beta'] = Uniform(minimum=-4, maximum=12)
     self.power_prior['mmin'] = Uniform(minimum=3, maximum=10)
     self.power_prior['mmax'] = Uniform(minimum=30, maximum=100)
     self.gauss_prior = PriorDict()
     self.gauss_prior['lam'] = Uniform(minimum=0, maximum=1)
     self.gauss_prior['mpp'] = Uniform(minimum=20, maximum=60)
     self.gauss_prior['sigpp'] = Uniform(minimum=0, maximum=10)
     self.smooth_prior = PriorDict()
     self.smooth_prior['delta_m'] = Uniform(minimum=0, maximum=10)
     self.n_test = 10
Ejemplo n.º 25
0
 def setUp(self):
     self.m1s = np.linspace(3, 100, 1000)
     self.qs = np.linspace(0.01, 1, 500)
     m1s_grid, qs_grid = xp.meshgrid(self.m1s, self.qs)
     self.dataset = dict(mass_1=m1s_grid, mass_ratio=qs_grid)
     self.power_prior = PriorDict()
     self.power_prior["alpha_1"] = Uniform(minimum=-4, maximum=12)
     self.power_prior["alpha_2"] = Uniform(minimum=-4, maximum=12)
     self.power_prior["beta"] = Uniform(minimum=-4, maximum=12)
     self.power_prior["mmin"] = Uniform(minimum=3, maximum=10)
     self.power_prior["mmax"] = Uniform(minimum=40, maximum=100)
     self.power_prior["break_fraction"] = Uniform(minimum=40, maximum=100)
     self.n_test = 10
Ejemplo n.º 26
0
    def test_likelihood_null_likelihood(self):
        """
        Test likelihood and null likelihood.
        """

        het = HeterodynedData(self.data,
                              times=self.times,
                              detector=self.detector,
                              par=self.parfile)

        priors = dict()
        priors["h0"] = Uniform(0.0, 1.0e-23, "h0")

        for likelihood in ["gaussian", "studentst"]:
            like = TargetedPulsarLikelihood(het,
                                            PriorDict(priors),
                                            likelihood=likelihood)
            like.parameters = {"h0": 0.0}

            assert like.log_likelihood() == like.noise_log_likelihood()
Ejemplo n.º 27
0
    def test_deltafunction(self):
        """
        Test the DeltaFunctionDistribution class.
        """

        name = "test"

        dist = DeltaFunctionDistribution(name, peak=1.0)
        assert dist["peak"] == 1.0
        assert dist.fixed["peak"] is True
        assert np.all(dist.sample(size=10) == 1.0)

        dist = DeltaFunctionDistribution(name, peak=Uniform(0.0, 1.0, "peak"))

        value = 0.1
        hyper = {"peak": 0.5}
        assert isinstance(dist["peak"], Uniform)
        assert dist.fixed["peak"] is False
        assert dist.log_pdf(value, hyper) == -np.inf
        assert np.exp(dist.log_pdf(value, hyper)) == dist.pdf(value, hyper)

        value = 0.5
        assert dist.log_pdf(value, hyper) == 0.0
        assert np.exp(dist.log_pdf(value, hyper)) == dist.pdf(value, hyper)

        # check drawn sample is within bounds
        assert dist.low < dist.sample(hyper) < dist.high

        # draw multiple samples
        N = 100
        samples = dist.sample(hyper, size=N)
        assert len(samples) == N
        assert np.all((samples > dist.low) & (samples < dist.high))

        value = 1.0
        hyper = {"kgsdg": 0.5}
        with pytest.raises(KeyError):
            dist.log_pdf(value, hyper)
Ejemplo n.º 28
0
h0range = [0.0, 1e-23]

# set prior for lalapps_pulsar_parameter_estimation_nested
priorfile = os.path.join(outdir, "{}_prior.txt".format(label))
priorcontent = """H0 uniform {} {}
PHI0 uniform {} {}
PSI uniform {} {}
COSIOTA uniform {} {}
"""
with open(priorfile, "w") as fp:
    fp.write(
        priorcontent.format(*(h0range + phi0range + psirange + cosiotarange)))

# set prior for bilby
priors = OrderedDict()
priors["h0"] = Uniform(h0range[0], h0range[1], "h0", latex_label=r"$h_0$")
priors["phi0"] = Uniform(phi0range[0],
                         phi0range[1],
                         "phi0",
                         latex_label=r"$\phi_0$",
                         unit="rad")
priors["psi"] = Uniform(psirange[0],
                        psirange[1],
                        "psi",
                        latex_label=r"$\psi$",
                        unit="rad")
priors["cosiota"] = Uniform(cosiotarange[0],
                            cosiotarange[1],
                            "cosiota",
                            latex_label=r"$\cos{\iota}$")
Ejemplo n.º 29
0
def get_prior():
    return PriorDict(
        dict(cos_tilt_1=Uniform(-1, 1, "cos_tilt_1", r"$\cos\theta_{1}$"),
             cos_theta_12=Uniform(-1, 1, "cos_theta_12",
                                  r"$\cos\theta_{12}$")))
Ejemplo n.º 30
0
C22 uniform {} {}
PHI21 uniform {} {}
PHI22 uniform {} {}
PSI uniform {} {}
COSIOTA uniform {} {}
"""
with open(priorfile, "w") as fp:
    fp.write(
        priorcontent.format(
            *(c21range + c22range + phi21range + phi22range + psirange + cosiotarange)
        )
    )

# set prior for bilby
priors = OrderedDict()
priors["c21"] = Uniform(c21range[0], c21range[1], "c21", latex_label=r"$C_{21}$")
priors["c22"] = Uniform(c22range[0], c22range[1], "c22", latex_label=r"$C_{22}$")
priors["phi21"] = Uniform(
    phi21range[0], phi21range[1], "phi21", latex_label=r"$\Phi_{21}$", unit="rad"
)
priors["phi22"] = Uniform(
    phi22range[0], phi22range[1], "phi22", latex_label=r"$\Phi_{22}$", unit="rad"
)
priors["psi"] = Uniform(
    psirange[0], psirange[1], "psi", latex_label=r"$\psi$", unit="rad"
)
priors["cosiota"] = Uniform(
    cosiotarange[0], cosiotarange[1], "cosiota", latex_label=r"$\cos{\iota}$"
)

# run lalapps_pulsar_parameter_estimation_nested