def get_randomized_source_counts(self, source_model_counts):
        idx = self._spectrum_plugin.observed_count_errors > 0

        randomized_source_counts = np.zeros_like(source_model_counts)

        randomized_source_counts[idx] = np.random.normal(
            loc=source_model_counts[idx],
            scale=self._spectrum_plugin.observed_count_errors[idx],
        )

        # Issue a warning if the generated background is less than zero, and fix it by placing it at zero

        idx = randomized_source_counts < 0  # type: np.ndarray

        negative_source_n = nb_sum(idx)

        if negative_source_n > 0:
            log.warning(
                "Generated source has negative counts "
                "in %i channels. Fixing them to zero" % (negative_source_n)
            )

            randomized_source_counts[idx] = 0

        return randomized_source_counts
Ejemplo n.º 2
0
    def get_randomized_background_counts(self):
        # Now randomize the expectations.

        _, background_model_counts = self.get_current_value()

        # We cannot generate variates with zero sigma. They variates from those channel will always be zero
        # This is a limitation of this whole idea. However, remember that by construction an error of zero
        # it is only allowed when the background counts are zero as well.
        idx = self._spectrum_plugin.background_count_errors > 0

        randomized_background_counts = np.zeros_like(background_model_counts)

        randomized_background_counts[idx] = np.random.normal(
            loc=background_model_counts[idx],
            scale=self._spectrum_plugin.background_count_errors[idx],
        )

        # Issue a warning if the generated background is less than zero, and fix it by placing it at zero

        idx = randomized_background_counts < 0  # type: np.ndarray

        negative_background_n = nb_sum(idx)

        if negative_background_n > 0:
            log.warning(
                "Generated background has negative counts "
                "in %i channels. Fixing them to zero" % (negative_background_n)
            )

            randomized_background_counts[idx] = 0

        return randomized_background_counts
Ejemplo n.º 3
0
    def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
        expected_model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)

        loglike, bkg_model = poisson_observed_gaussian_background(
            self._spectrum_plugin.current_observed_counts,
            self._spectrum_plugin.current_background_counts,
            self._spectrum_plugin.current_background_count_errors,
            expected_model_counts,
        )

        return nb_sum(loglike), bkg_model
Ejemplo n.º 4
0
    def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
        # Scale factor between source and background spectrum
        model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)

        loglike, bkg_model = poisson_observed_poisson_background(
            self._spectrum_plugin.current_observed_counts,
            self._spectrum_plugin.current_background_counts,
            self._spectrum_plugin.scale_factor,
            model_counts,
        )

        return nb_sum(loglike), bkg_model
Ejemplo n.º 5
0
    def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
        
        model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)

        chi2_ = half_chi2(
            self._spectrum_plugin.current_observed_counts,
            self._spectrum_plugin.current_observed_count_errors,
            model_counts,
        )

        assert np.all(np.isfinite(chi2_))

        return nb_sum(chi2_) * (-1), None
Ejemplo n.º 6
0
    def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
        # In this likelihood the background becomes part of the model, which means that
        # the uncertainty in the background is completely neglected

        model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)

        loglike, _ = poisson_log_likelihood_ideal_bkg(
            self._spectrum_plugin.current_observed_counts,
            self._spectrum_plugin.current_scaled_background_counts,
            model_counts,
        )

        return nb_sum(loglike), None
Ejemplo n.º 7
0
    def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
        # In this likelihood the background becomes part of the model, which means that
        # the uncertainty in the background is completely neglected

        model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)

        # we scale the background model to the observation

        background_model_counts = (
            self._spectrum_plugin.get_background_model()
            * self._spectrum_plugin.scale_factor
        )

        loglike, _ = poisson_log_likelihood_ideal_bkg(
            self._spectrum_plugin.current_observed_counts,
            background_model_counts,
            model_counts,
        )

        bkg_log_like = self._spectrum_plugin.background_plugin.get_log_like()

        total_log_like = nb_sum(loglike) + bkg_log_like

        return total_log_like, None
Ejemplo n.º 8
0
    def _log_like(self, trial_values):
        """Compute the log-likelihood"""

        # Get the value of the log-likelihood for this parameters

        try:

            log_like_values = np.zeros(self._n_plugins)

            # Loop over each dataset and get the likelihood values for each set
            if not self._share_spectrum:
                # Old way; every dataset independendly - This is fine if the
                # spectrum calc is fast.

                for i, dataset in enumerate(self._data_list.values()):

                    log_like_values[i] = dataset.get_log_like()

            else:
                # If the calculation for the input spectrum of one of the sources is expensive
                # we want to avoid calculating the same thing several times.

                # Precalc the spectrum for all different Ebin_in that are used in the plugins
                precalc_fluxes = []

                for base_key, e_edges in zip(
                        self._share_spectrum_object.base_plugin_key,
                        self._share_spectrum_object.data_ein_edges):
                    if e_edges is None:
                        precalc_fluxes.append(None)
                    else:
                        precalc_fluxes.append(
                            self._data_list[base_key]._integral_flux())

                # Use these precalculated spectra to get the log_like for all plugins
                for i, dataset in enumerate(list(self._data_list.values())):
                    # call get log_like with precalculated spectrum
                    if self._share_spectrum_object.data_ein_edges[
                            self._share_spectrum_object.
                            data_ebin_connect[i]] is not None:
                        log_like_values[i] = dataset.get_log_like(
                            precalc_fluxes=precalc_fluxes[
                                self._share_spectrum_object.
                                data_ebin_connect[i]])
                    else:
                        log_like_values[i] = dataset.get_log_like()

        except ModelAssertionViolation:

            # Fit engine or sampler outside of allowed zone

            return -np.inf

        except:

            # We don't want to catch more serious issues

            raise

        # Sum the values of the log-like

        log_like = nb_sum(log_like_values)

        if not np.isfinite(log_like):
            # Issue warning
            keys = self._likelihood_model.free_parameters.keys()
            params = [
                f"{key}: {self._likelihood_model.free_parameters[key].value}"
                for key in keys
            ]

            log.warning(
                f"Likelihood value is infinite for parameters: {params}")

            return -np.inf

        return log_like