class TestLikelihoodBase(unittest.TestCase): def setUp(self): self.likelihood = Likelihood() def tearDown(self): del self.likelihood def test_repr(self): self.likelihood = Likelihood(parameters=["a", "b"]) expected = "Likelihood(parameters=['a', 'b'])" self.assertEqual(expected, repr(self.likelihood)) def test_base_log_likelihood(self): self.assertTrue(np.isnan(self.likelihood.log_likelihood())) def test_base_noise_log_likelihood(self): self.assertTrue(np.isnan(self.likelihood.noise_log_likelihood())) def test_base_log_likelihood_ratio(self): self.assertTrue(np.isnan(self.likelihood.log_likelihood_ratio())) def test_meta_data_unset(self): self.assertEqual(self.likelihood.meta_data, None) def test_meta_data_set_fail(self): with self.assertRaises(ValueError): self.likelihood.meta_data = 10 def test_meta_data(self): meta_data = dict(x=1, y=2) self.likelihood.meta_data = meta_data self.assertEqual(self.likelihood.meta_data, meta_data)
def __init__( self, interferometers, waveform_generator, priors=None, distance_marginalization=True, phase_marginalization=True, time_marginalization=False, ): """ A likelihood object, able to compute the likelihood of the data given some model parameters The simplest frequency-domain gravitational wave transient likelihood. Does not include time/phase marginalization. Parameters ---------- interferometers: list A list of `bilby.gw.detector.Interferometer` instances - contains the detector data and power spectral densities waveform_generator: bilby.gw.waveform_generator.WaveformGenerator An object which computes the frequency-domain strain of the signal, given some set of parameters """ Likelihood.__init__(self, dict()) self.interferometers = interferometers self.waveform_generator = waveform_generator self._noise_log_l = np.nan self.psds = dict() self.strain = dict() self._data_to_gpu() if priors is None: self.priors = priors else: self.priors = priors.copy() self.distance_marginalization = distance_marginalization self.phase_marginalization = phase_marginalization if self.distance_marginalization: self._setup_distance_marginalization() priors["luminosity_distance"] = priors["luminosity_distance"].minimum if self.phase_marginalization: priors["phase"] = 0.0 self.time_marginalization = False
class TestLikelihoodBase(unittest.TestCase): def setUp(self): self.likelihood = Likelihood() def tearDown(self): del self.likelihood def test_repr(self): self.likelihood = Likelihood(parameters=['a', 'b']) expected = 'Likelihood(parameters=[\'a\', \'b\'])' self.assertEqual(expected, repr(self.likelihood)) def test_base_log_likelihood(self): self.assertTrue(np.isnan(self.likelihood.log_likelihood())) def test_base_noise_log_likelihood(self): self.assertTrue(np.isnan(self.likelihood.noise_log_likelihood())) def test_base_log_likelihood_ratio(self): self.assertTrue(np.isnan(self.likelihood.log_likelihood_ratio()))
def __init__(self, posteriors, hyper_prior, sampling_prior=None, ln_evidences=None, max_samples=1e100, selection_function=lambda args: 1, conversion_function=lambda args: (args, None), cupy=True): """ Parameters ---------- posteriors: list An list of pandas data frames of samples sets of samples. Each set may have a different size. These can contain a `prior` column containing the original prior values. hyper_prior: `bilby.hyper.model.Model` The population model, this can alternatively be a function. sampling_prior: `bilby.hyper.model.Model` *DEPRECATED* The sampling prior, this can alternatively be a function. ln_evidences: list, optional Log evidences for single runs to ensure proper normalisation of the hyperparameter likelihood. If not provided, the original evidences will be set to 0. This produces a Bayes factor between the sampling power_prior and the hyperparameterised model. selection_function: func Function which evaluates your population selection function. conversion_function: func Function which converts a dictionary of sampled parameter to a dictionary of parameters of the population model. max_samples: int, optional Maximum number of samples to use from each set. cupy: bool If True and a compatible CUDA environment is available, cupy will be used for performance. Note: this requires setting up your hyper_prior properly. """ if cupy and not CUPY_LOADED: logger.warning('Cannot import cupy, falling back to numpy.') self.samples_per_posterior = max_samples self.data = self.resample_posteriors(posteriors, max_samples=max_samples) if not isinstance(hyper_prior, Model): hyper_prior = Model([hyper_prior]) self.hyper_prior = hyper_prior Likelihood.__init__(self, hyper_prior.parameters) if sampling_prior is not None: logger.warning('Passing a sampling_prior is deprecated. This ' 'should be passed as a column in the posteriors.') if not isinstance(sampling_prior, Model): sampling_prior = Model([sampling_prior]) self.sampling_prior = sampling_prior.prob(self.data) elif 'prior' in self.data: self.sampling_prior = self.data.pop('prior') else: logger.info('No prior values provided, defaulting to 1.') self.sampling_prior = 1 if ln_evidences is not None: self.total_noise_evidence = np.sum(ln_evidences) else: self.total_noise_evidence = np.nan self.conversion_function = conversion_function self.selection_function = selection_function self.n_posteriors = len(posteriors) self.samples_factor =\ - self.n_posteriors * np.log(self.samples_per_posterior)
def __init__( self, posteriors, hyper_prior, sampling_prior=None, ln_evidences=None, max_samples=1e100, selection_function=lambda args: 1, conversion_function=lambda args: (args, None), cupy=True, ): """ Parameters ---------- posteriors: list An list of pandas data frames of samples sets of samples. Each set may have a different size. These can contain a `prior` column containing the original prior values. hyper_prior: `bilby.hyper.model.Model` The population model, this can alternatively be a function. sampling_prior: array-like *DEPRECATED* The sampling prior, this can alternatively be a function. THIS WILL BE REMOVED IN THE NEXT RELEASE. ln_evidences: list, optional Log evidences for single runs to ensure proper normalisation of the hyperparameter likelihood. If not provided, the original evidences will be set to 0. This produces a Bayes factor between the sampling power_prior and the hyperparameterised model. selection_function: func Function which evaluates your population selection function. conversion_function: func Function which converts a dictionary of sampled parameter to a dictionary of parameters of the population model. max_samples: int, optional Maximum number of samples to use from each set. cupy: bool If True and a compatible CUDA environment is available, cupy will be used for performance. Note: this requires setting up your hyper_prior properly. """ if cupy and not CUPY_LOADED: logger.warning("Cannot import cupy, falling back to numpy.") self.samples_per_posterior = max_samples self.data = self.resample_posteriors(posteriors, max_samples=max_samples) if isinstance(hyper_prior, types.FunctionType): hyper_prior = Model([hyper_prior]) elif not (hasattr(hyper_prior, 'parameters') and callable(getattr(hyper_prior, 'prob'))): raise AttributeError( "hyper_prior must either be a function, " "or a class with attribute 'parameters' and method 'prob'") self.hyper_prior = hyper_prior Likelihood.__init__(self, hyper_prior.parameters) if sampling_prior is not None: raise ValueError( "Passing a sampling_prior is deprecated and will be removed " "in the next release. This should be passed as a 'prior' " "column in the posteriors.") elif "prior" in self.data: self.sampling_prior = self.data.pop("prior") else: logger.info("No prior values provided, defaulting to 1.") self.sampling_prior = 1 if ln_evidences is not None: self.total_noise_evidence = np.sum(ln_evidences) else: self.total_noise_evidence = np.nan self.conversion_function = conversion_function self.selection_function = selection_function self.n_posteriors = len(posteriors)
def test_repr(self): self.likelihood = Likelihood(parameters=["a", "b"]) expected = "Likelihood(parameters=['a', 'b'])" self.assertEqual(expected, repr(self.likelihood))
def setUp(self): self.likelihood = Likelihood()
def test_repr(self): self.likelihood = Likelihood(parameters=['a', 'b']) expected = 'Likelihood(parameters=[\'a\', \'b\'])' self.assertEqual(expected, repr(self.likelihood))