Пример #1
0
def test_mean_poisson_likelihoood_gaussian():
    prediction = np.array([1, 1, 1], dtype="float")
    spe = 0.5

    small_mean_likelihood = mean_poisson_likelihood_gaussian(
        prediction, spe, 0)
    large_mean_likelihood = mean_poisson_likelihood_gaussian(
        prediction, spe, 1)

    assert small_mean_likelihood < large_mean_likelihood
Пример #2
0
    def get_likelihood(self, source_x, source_y, core_x, core_y,
                       energy, x_max_scale, goodness_of_fit=False):
        """Get the likelihood that the image predicted at the given test
        position matches the camera image.

        Parameters
        ----------
        source_x: float
            Source position of shower in the nominal system (in deg)
        source_y: float
            Source position of shower in the nominal system (in deg)
        core_x: float
            Core position of shower in tilted telescope system (in m)
        core_y: float
            Core position of shower in tilted telescope system (in m)
        energy: float
            Shower energy (in TeV)
        x_max_scale: float
            Scaling factor applied to geometrically calculated Xmax
        goodness_of_fit: boolean
            Determines whether expected likelihood should be subtracted from result
        Returns
        -------
        float: Likelihood the model represents the camera image at this position

        """
        # First we add units back onto everything.  Currently not
        # handled very well, maybe in future we could just put
        # everything in the correct units when loading in the class
        # and ignore them from then on

        zenith = (np.pi / 2) - self.array_direction.alt.to(u.rad).value
        azimuth = self.array_direction.az

        # Geometrically calculate the depth of maximum given this test position
        x_max = self.get_shower_max(source_x, source_y,
                                    core_x, core_y,
                                    zenith)
        x_max *= x_max_scale

        # Calculate expected Xmax given this energy
        x_max_exp = guess_shower_depth(energy)  # / np.cos(20*u.deg)

        # Convert to binning of Xmax
        x_max_bin = x_max - x_max_exp

        # Check for range
        if x_max_bin > 200:
            x_max_bin = 200
        if x_max_bin < -100:
            x_max_bin = -100

        # Calculate impact distance for all telescopes
        impact = np.sqrt(np.power(self.tel_pos_x - core_x, 2)
                         + np.power(self.tel_pos_y - core_y, 2))
        # And the expected rotation angle
        phi = np.arctan2((self.tel_pos_x - core_x),
                         (self.tel_pos_y - core_y)) * u.rad

        # Rotate and translate all pixels such that they match the
        # template orientation
        pix_y_rot, pix_x_rot = self.rotate_translate(
            self.pixel_x,
            self.pixel_y,
            source_x, source_y, phi
        )

        # In the interpolator class we can gain speed advantages by using masked arrays
        # so we need to make sure here everything is masked
        prediction = ma.zeros(self.image.shape)
        prediction.mask = ma.getmask(self.image)

        time_gradients = np.zeros((self.image.shape[0],2))

        # Loop over all telescope types and get prediction
        for tel_type in np.unique(self.tel_types).tolist():
            type_mask = self.tel_types == tel_type
            prediction[type_mask] = \
                self.image_prediction(tel_type, energy *
                                      np.ones_like(impact[type_mask]),
                                      impact[type_mask], x_max_bin *
                                      np.ones_like(impact[type_mask]),
                                      pix_x_rot[type_mask] * (180 / math.pi) * -1,
                                      pix_y_rot[type_mask] * (180 / math.pi))

            if self.use_time_gradient:
                time_gradients[type_mask] = \
                    self.predict_time(tel_type,
                                      energy * np.ones_like(impact[type_mask]),
                                      impact[type_mask],
                                      x_max_bin * np.ones_like(impact[type_mask]))

        if self.use_time_gradient:
            time_mask = np.logical_and(np.invert(ma.getmask(self.image)),
                                       self.time > 0)
            weight = np.sqrt(self.image) * time_mask
            rv = norm()

            sx = pix_x_rot * weight
            sxx = pix_x_rot * pix_x_rot * weight

            sy = self.time * weight
            sxy = self.time * pix_x_rot * weight
            d = weight.sum(axis=1) * sxx.sum(axis=1) - sx.sum(axis=1) * sx.sum(axis=1)
            time_fit = (weight.sum(axis=1) * sxy.sum(axis=1) - sx.sum(axis=1) * sy.sum(
                axis=1)) / d
            time_fit /= -1 * (180 / math.pi)
            chi2 = -2 * np.log(rv.pdf((time_fit - time_gradients.T[0])/
                                        time_gradients.T[1]))

        # Likelihood function will break if we find a NaN or a 0
        prediction[np.isnan(prediction)] = 1e-8
        prediction[prediction < 1e-8] = 1e-8
        prediction *= self.template_scale

        # Get likelihood that the prediction matched the camera image
        like = poisson_likelihood_gaussian(self.image, prediction, self.spe, self.ped)
        like[np.isnan(like)] = 1e9
        like *= np.invert(ma.getmask(self.image))
        like = ma.MaskedArray(like, mask=ma.getmask(self.image))

        array_like = like
        if goodness_of_fit:
            return np.sum(like - mean_poisson_likelihood_gaussian(prediction, self.spe,
                                                                  self.ped))

        prior_pen = 0
        # Add prior penalities if we have them
        array_like += 1e-8
        if "energy" in self.priors:
            prior_pen += energy_prior(energy, index=-1)
        if "xmax" in self.priors:
            prior_pen += xmax_prior(energy, x_max)

        array_like += prior_pen / float(len(array_like))

        if self.array_return:
            array_like = array_like.ravel()
            return array_like[np.invert(ma.getmask(array_like))]

        final_sum = array_like.sum()
        if self.use_time_gradient:
            final_sum += chi2.sum() #* np.sum(ma.getmask(self.image))

        return final_sum
Пример #3
0
    def get_likelihood(self, source_x, source_y, core_x, core_y,
                       energy, x_max_scale, goodness_of_fit=False):
        """Get the likelihood that the image predicted at the given test
        position matches the camera image.

        Parameters
        ----------
        source_x: float
            Source position of shower in the nominal system (in deg)
        source_y: float
            Source position of shower in the nominal system (in deg)
        core_x: float
            Core position of shower in tilted telescope system (in m)
        core_y: float
            Core position of shower in tilted telescope system (in m)
        energy: float
            Shower energy (in TeV)
        x_max_scale: float
            Scaling factor applied to geometrically calculated Xmax
        goodness_of_fit: boolean
            Determines whether expected likelihood should be subtracted from result
        Returns
        -------
        float: Likelihood the model represents the camera image at this position

        """
        # First we add units back onto everything.  Currently not
        # handled very well, maybe in future we could just put
        # everything in the correct units when loading in the class
        # and ignore them from then on

        zenith = (np.pi / 2) - self.array_direction.alt.to(u.rad).value
        azimuth = self.array_direction.az

        # Geometrically calculate the depth of maximum given this test position
        x_max = self.get_shower_max(source_x, source_y,
                                    core_x, core_y,
                                    zenith)
        x_max *= x_max_scale

        # Calculate expected Xmax given this energy
        x_max_exp = guess_shower_depth(energy)  # / np.cos(20*u.deg)

        # Convert to binning of Xmax
        x_max_bin = x_max - x_max_exp

        # Check for range
        if x_max_bin > 200:
            x_max_bin = 200
        if x_max_bin < -100:
            x_max_bin = -100

        # Calculate impact distance for all telescopes
        impact = np.sqrt(np.power(self.tel_pos_x - core_x, 2)
                         + np.power(self.tel_pos_y - core_y, 2))
        # And the expected rotation angle
        phi = np.arctan2((self.tel_pos_x - core_x),
                         (self.tel_pos_y - core_y)) * u.rad

        # Rotate and translate all pixels such that they match the
        # template orientation
        pix_y_rot, pix_x_rot = self.rotate_translate(
            self.pixel_x,
            self.pixel_y,
            source_x, source_y, phi
        )

        # In the interpolator class we can gain speed advantages by using masked arrays
        # so we need to make sure here everything is masked
        prediction = ma.zeros(self.image.shape)
        prediction.mask = ma.getmask(self.image)

        time_gradients = np.zeros((self.image.shape[0],2))

        # Loop over all telescope types and get prediction
        for tel_type in np.unique(self.tel_types).tolist():
            type_mask = self.tel_types == tel_type
            prediction[type_mask] = \
                self.image_prediction(tel_type, energy *
                                      np.ones_like(impact[type_mask]),
                                      impact[type_mask], x_max_bin *
                                      np.ones_like(impact[type_mask]),
                                      pix_x_rot[type_mask] * (180 / math.pi) * -1,
                                      pix_y_rot[type_mask] * (180 / math.pi))

            if self.use_time_gradient:
                time_gradients[type_mask] = \
                    self.predict_time(tel_type,
                                      energy * np.ones_like(impact[type_mask]),
                                      impact[type_mask],
                                      x_max_bin * np.ones_like(impact[type_mask]))

        if self.use_time_gradient:
            time_mask = np.logical_and(np.invert(ma.getmask(self.image)),
                                       self.time > 0)
            weight = np.sqrt(self.image) * time_mask
            rv = norm()

            sx = pix_x_rot * weight
            sxx = pix_x_rot * pix_x_rot * weight

            sy = self.time * weight
            sxy = self.time * pix_x_rot * weight
            d = weight.sum(axis=1) * sxx.sum(axis=1) - sx.sum(axis=1) * sx.sum(axis=1)
            time_fit = (weight.sum(axis=1) * sxy.sum(axis=1) - sx.sum(axis=1) * sy.sum(
                axis=1)) / d
            time_fit /= -1 * (180 / math.pi)
            chi2 = -2 * np.log(rv.pdf((time_fit - time_gradients.T[0])/
                                        time_gradients.T[1]))

        # Likelihood function will break if we find a NaN or a 0
        prediction[np.isnan(prediction)] = 1e-8
        prediction[prediction < 1e-8] = 1e-8
        prediction *= self.template_scale

        # Get likelihood that the prediction matched the camera image
        like = poisson_likelihood_gaussian(self.image, prediction, self.spe, self.ped)
        like[np.isnan(like)] = 1e9
        like *= np.invert(ma.getmask(self.image))
        like = ma.MaskedArray(like, mask=ma.getmask(self.image))

        array_like = like
        if goodness_of_fit:
            return np.sum(like - mean_poisson_likelihood_gaussian(prediction, self.spe,
                                                                  self.ped))

        prior_pen = 0
        # Add prior penalities if we have them
        array_like += 1e-8
        if "energy" in self.priors:
            prior_pen += energy_prior(energy, index=-1)
        if "xmax" in self.priors:
            prior_pen += xmax_prior(energy, x_max)

        array_like += prior_pen / float(len(array_like))

        if self.array_return:
            array_like = array_like.ravel()
            return array_like[np.invert(ma.getmask(array_like))]

        final_sum = array_like.sum()
        if self.use_time_gradient:
            final_sum += chi2.sum() #* np.sum(ma.getmask(self.image))

        return final_sum