コード例 #1
0
def poisson_observed_poisson_background(observed_counts, background_counts,
                                        exposure_ratio, expected_model_counts):

    # TODO: check this with simulations

    # Just a name change to make writing formulas a little easier

    alpha = exposure_ratio
    b = background_counts
    o = observed_counts
    M = expected_model_counts

    # Nuisance parameter for Poisson likelihood
    # NOTE: B_mle is zero when b is zero!

    sqr = np.sqrt(4 * (alpha + alpha**2) * b * M + ((alpha + 1) * M - alpha *
                                                    (o + b))**2)

    B_mle = 1 / (2.0 * alpha * (1 + alpha)) * (alpha * (o + b) -
                                               (alpha + 1) * M + sqr)

    # Profile likelihood

    loglike = xlogy(o, alpha*B_mle + M) + xlogy(b, B_mle) - (alpha+1) * B_mle - M - \
              logfactorial(b) - logfactorial(o)

    return loglike, B_mle * alpha
コード例 #2
0
    def inner_fit(self):
        '''
        This is used for the profile likelihood. Keeping fixed all parameters in the
        modelManager, this method minimize the logLike over the remaining nuisance
        parameters, i.e., the parameters belonging only to the model for this
        particular detector
        '''
        self._updateGtlikeModel()

        if not self.innerMinimization:

            log_like = self.like.logLike.value()

        else:

            try:
                # Use .optimize instead of .fit because we don't need the errors
                # (.optimize is faster than .fit)
                self.like.optimize(0)
            except:
                # This is necessary because sometimes fitting algorithms go and explore extreme region of the
                # parameter space, which might turn out to give strange model shapes and therefore
                # problems in the likelihood fit
                print("Warning: failed likelihood fit (probably parameters are too extreme).")
                return 1e5
            else:
                # Update the value for the nuisance parameters
                for par in self.nuisance_parameters.values():
                    newValue = self.getNuisanceParameterValue(par.name)
                    par.value = newValue
                pass

                log_like = self.like.logLike.value()

        return log_like - logfactorial(self.like.total_nobs())
コード例 #3
0
ファイル: GenericOGIPLike.py プロジェクト: sybenzvi/3ML
    def _computeLogLike(self, modelCounts):

        idx = modelCounts > 0

        return numpy.sum(
            -modelCounts[idx] + self.counts[idx] * numpy.log(modelCounts[idx]) - logfactorial(self.counts[idx])
        )
コード例 #4
0
ファイル: FermiLATLike.py プロジェクト: sybenzvi/3ML
    def inner_fit(self):
        '''
        This is used for the profile likelihood. Keeping fixed all parameters in the
        modelManager, this method minimize the logLike over the remaining nuisance
        parameters, i.e., the parameters belonging only to the model for this
        particular detector
        '''
        self._updateGtlikeModel()

        if not self.innerMinimization:

            log_like = self.like.logLike.value()

        else:

            try:
                # Use .optimize instead of .fit because we don't need the errors
                # (.optimize is faster than .fit)
                self.like.optimize(0)
            except:
                # This is necessary because sometimes fitting algorithms go and explore extreme region of the
                # parameter space, which might turn out to give strange model shapes and therefore
                # problems in the likelihood fit
                print("Warning: failed likelihood fit (probably parameters are too extreme).")
                return 1e5
            else:
                # Update the value for the nuisance parameters
                for par in self.nuisanceParameters.values():
                    newValue = self.getNuisanceParameterValue(par.name)
                    par.value = newValue
                pass

                log_like = self.like.logLike.value()

        return log_like - logfactorial(self.like.total_nobs())
コード例 #5
0
def poisson_observed_gaussian_background(observed_counts, background_counts,
                                         background_error,
                                         expected_model_counts):

    # This loglike assume Gaussian errors on the background and Poisson uncertainties on the
    # observed counts. It is a profile likelihood.

    MB = background_counts + expected_model_counts
    s2 = background_error**2  # type: np.ndarray

    b = 0.5 * (np.sqrt(MB**2 - 2 * s2 *
                       (MB - 2 * observed_counts) + background_error**4) +
               background_counts - expected_model_counts - s2
               )  # type: np.ndarray

    # Now there are two branches: when the background is 0 we are in the normal situation of a pure
    # Poisson likelihood, while when the background is not zero we use the profile likelihood

    # NOTE: bkgErr can be 0 only when also bkgCounts = 0
    # Also it is evident from the expression above that when bkgCounts = 0 and bkgErr=0 also b=0

    # Let's do the branch with background > 0 first

    idx = background_counts > 0

    log_likes = np.empty_like(expected_model_counts)

    log_likes[idx] = (
        -(b[idx] - background_counts[idx])**2 / (2 * s2[idx]) +
        observed_counts[idx] * np.log(b[idx] + expected_model_counts[idx]) -
        b[idx] - expected_model_counts[idx] -
        logfactorial(observed_counts[idx]) - 0.5 * log(2 * np.pi) -
        np.log(background_error[idx]))

    # Let's do the other branch

    nidx = ~idx

    # the 1e-100 in the log is to avoid zero divisions
    # This is the Poisson likelihood with no background
    log_likes[nidx] = xlogy(observed_counts[nidx], expected_model_counts[nidx]) - \
                      expected_model_counts[nidx] - logfactorial(observed_counts[nidx])

    return log_likes, b
コード例 #6
0
ファイル: likelihood_functions.py プロジェクト: giacomov/3ML
def poisson_observed_gaussian_background(observed_counts, background_counts, background_error, expected_model_counts):

    # This loglike assume Gaussian errors on the background and Poisson uncertainties on the
    # observed counts. It is a profile likelihood.

    MB = background_counts + expected_model_counts
    s2 = background_error ** 2 # type: np.ndarray

    b = 0.5 * (np.sqrt(MB ** 2 - 2 * s2 * (MB - 2 * observed_counts) + background_error ** 4)
               + background_counts - expected_model_counts - s2) # type: np.ndarray

    # Now there are two branches: when the background is 0 we are in the normal situation of a pure
    # Poisson likelihood, while when the background is not zero we use the profile likelihood

    # NOTE: bkgErr can be 0 only when also bkgCounts = 0
    # Also it is evident from the expression above that when bkgCounts = 0 and bkgErr=0 also b=0

    # Let's do the branch with background > 0 first

    idx = background_counts > 0

    log_likes = np.empty_like(expected_model_counts)

    log_likes[idx] = (-(b[idx] - background_counts[idx]) ** 2 / (2 * s2[idx])
                      + observed_counts[idx] * np.log(b[idx] + expected_model_counts[idx])
                      - b[idx] - expected_model_counts[idx] - logfactorial(observed_counts[idx])
                      - 0.5 * log(2 * np.pi) - np.log(background_error[idx]))

    # Let's do the other branch

    nidx = ~idx

    # the 1e-100 in the log is to avoid zero divisions
    # This is the Poisson likelihood with no background
    log_likes[nidx] = xlogy(observed_counts[nidx], expected_model_counts[nidx]) - \
                      expected_model_counts[nidx] - logfactorial(observed_counts[nidx])

    return log_likes, b
コード例 #7
0
ファイル: FermiLATLike.py プロジェクト: sybenzvi/3ML
    def get_log_like(self):
        '''
        Return the value of the log-likelihood with the current values for the
        parameters stored in the ModelManager instance
        '''
        self._updateGtlikeModel()
        try:

            value = self.like.logLike.value()

        except:

            raise

        return value - logfactorial(self.like.total_nobs())
コード例 #8
0
ファイル: likelihood_functions.py プロジェクト: giacomov/3ML
def poisson_observed_poisson_background(observed_counts, background_counts, exposure_ratio, expected_model_counts):

    # TODO: check this with simulations

    # Just a name change to make writing formulas a little easier

    alpha = exposure_ratio
    b = background_counts
    o = observed_counts
    M = expected_model_counts

    # Nuisance parameter for Poisson likelihood
    # NOTE: B_mle is zero when b is zero!

    sqr = np.sqrt(4 * (alpha + alpha ** 2) * b * M + ((alpha + 1) * M - alpha * (o + b)) ** 2)

    B_mle = 1 / (2.0 * alpha * (1+alpha)) * (alpha * (o + b) - (alpha+1) * M + sqr)

    # Profile likelihood

    loglike = xlogy(o, alpha*B_mle + M) + xlogy(b, B_mle) - (alpha+1) * B_mle - M - \
              logfactorial(b) - logfactorial(o)

    return loglike, B_mle * alpha
コード例 #9
0
    def get_log_like(self):
        '''
        Return the value of the log-likelihood with the current values for the
        parameters stored in the ModelManager instance
        '''
        self._updateGtlikeModel()
        try:

            value = self.like.logLike.value()

        except:

            raise

        return value - logfactorial(self.like.total_nobs())
コード例 #10
0
ファイル: HAL.py プロジェクト: elijah-umd/hawc_hal
    def _compute_likelihood_biases(self):

        for bin_label in self._maptree:

            data_analysis_bin = self._maptree[bin_label]

            this_log_factorial = np.sum(logfactorial(data_analysis_bin.observation_map.as_partial()))
            self._log_factorials[bin_label] = this_log_factorial

            # As bias we use the likelihood value for the saturated model
            obs = data_analysis_bin.observation_map.as_partial()
            bkg = data_analysis_bin.background_map.as_partial()

            sat_model = np.clip(obs - bkg, 1e-50, None).astype(np.float64)

            self._saturated_model_like_per_maptree[bin_label] = log_likelihood(obs, bkg, sat_model) - this_log_factorial
コード例 #11
0
ファイル: HAL.py プロジェクト: jlzhang001/hawc_hal
    def _compute_likelihood_biases(self):

        for i, data_analysis_bin in enumerate(self._maptree):

            this_log_factorial = np.sum(
                logfactorial(data_analysis_bin.observation_map.as_partial()))
            self._log_factorials[i] = this_log_factorial

            # As bias we use the likelihood value for the saturated model
            obs = data_analysis_bin.observation_map.as_partial()
            bkg = data_analysis_bin.background_map.as_partial()

            sat_model = np.maximum(obs - bkg, 1e-30).astype(np.float64)

            self._saturated_model_like_per_maptree[i] = log_likelihood(
                obs, bkg, sat_model) - this_log_factorial
コード例 #12
0
ファイル: FermiLATLike.py プロジェクト: giacomov/3ML
    def get_log_like(self):
        '''
        Return the value of the log-likelihood with the current values for the
        parameters stored in the ModelManager instance
        '''

        self._updateGtlikeModel()

        if self.fit_nuisance_params:

            for parameter in self.nuisance_parameters:
                self.setNuisanceParameterValue(parameter, self.nuisance_parameters[parameter].value)

            self.like.syncSrcParams()
            
        log_like = self.like.logLike.value()

        return log_like - logfactorial(self.like.total_nobs())
コード例 #13
0
ファイル: FermiLATLike.py プロジェクト: Husky22/threeML
    def get_log_like(self):
        '''
        Return the value of the log-likelihood with the current values for the
        parameters stored in the ModelManager instance
        '''

        self._updateGtlikeModel()

        if self.fit_nuisance_params:

            for parameter in self.nuisance_parameters:
                self.setNuisanceParameterValue(parameter, self.nuisance_parameters[parameter].value)

            self.like.syncSrcParams()
            
        log_like = self.like.logLike.value()

        return log_like - logfactorial(self.like.total_nobs())
コード例 #14
0
ファイル: FermipyLike.py プロジェクト: janbolmer/threeML
    def get_log_like(self):
        '''
        Return the value of the log-likelihood with the current values for the
        parameters stored in the ModelManager instance
        '''

        # Update all sources on the fermipy side
        self._update_model_in_fermipy()

        # Get value of the log likelihood

        try:

            value = self._gta.like.logLike.value()

        except:

            raise

        return value - logfactorial(self._gta.like.total_nobs())
コード例 #15
0
def poisson_log_likelihood_ideal_bkg(observed_counts, expected_bkg_counts, expected_model_counts):
    """
    Poisson log-likelihood for the case where the background has no uncertainties:

    L = \sum_{i=0}^{N}~o_i~\log{(m_i + b_i)} - (m_i + b_i) - \log{o_i!}

    :param observed_counts:
    :param expected_bkg_counts:
    :param expected_model_counts:
    :return: (log_like vector, background vector)
    """

    # Model predicted counts
    # In this likelihood the background becomes part of the model, which means that
    # the uncertainty in the background is completely neglected

    predicted_counts = expected_bkg_counts + expected_model_counts

    log_likes = xlogy(observed_counts, predicted_counts) - predicted_counts - logfactorial(observed_counts)

    return log_likes, expected_bkg_counts
コード例 #16
0
ファイル: likelihood_functions.py プロジェクト: giacomov/3ML
def poisson_log_likelihood_ideal_bkg(observed_counts, expected_bkg_counts, expected_model_counts):
    """
    Poisson log-likelihood for the case where the background has no uncertainties:

    L = \sum_{i=0}^{N}~o_i~\log{(m_i + b_i)} - (m_i + b_i) - \log{o_i!}

    :param observed_counts:
    :param expected_bkg_counts:
    :param expected_model_counts:
    :return: (log_like vector, background vector)
    """

    # Model predicted counts
    # In this likelihood the background becomes part of the model, which means that
    # the uncertainty in the background is completely neglected

    predicted_counts = expected_bkg_counts + expected_model_counts

    log_likes = xlogy(observed_counts, predicted_counts) - predicted_counts - logfactorial(observed_counts)

    return log_likes, expected_bkg_counts
コード例 #17
0
ファイル: FermiGBMLike.py プロジェクト: hayalaso/3ML
 def _computeLogLike(self, modelCounts):
   
   return numpy.sum(- modelCounts 
                    + self.counts * numpy.log(modelCounts)
                    - logfactorial(self.counts) )