Пример #1
0
    def __init__(self, name):

        # Create the dictionary of nuisance parameters
        # these hold the sky and background fraction
        # for the SPI mask fit.
        # TODO: I have just set them to arbitrary values

        self._nuisance_parameters = collections.OrderedDict()

        param_name = "%s_sky" % name

        self._nuisance_parameters[param_name] = Parameter(param_name,
                                                          1.0,
                                                          min_value=0.5,
                                                          max_value=1.5,
                                                          delta=0.01)
        self._nuisance_parameters[param_name].fix = True

        param_name = "%s_bkg" % name

        self._nuisance_parameters[param_name] = Parameter(param_name,
                                                          1.0,
                                                          min_value=0.5,
                                                          max_value=1.5,
                                                          delta=0.01)
        self._nuisance_parameters[param_name].fix = True

        super(SPILike, self).__init__(name, self._nuisance_parameters)
Пример #2
0
    def _setNuisanceParameters(self):

        # Get the list of the sources
        sources = list(self.like.model.srcNames)

        freeParamNames = []
        for srcName in sources:
            thisNamesV = pyLike.StringVector()
            thisSrc = self.like.logLike.getSource(srcName)
            thisSrc.spectrum().getFreeParamNames(thisNamesV)
            thisNames = ["%s_%s" % (srcName, x) for x in thisNamesV]
            freeParamNames.extend(thisNames)
        pass

        nuisanceParameters = collections.OrderedDict()

        for name in freeParamNames:

            value = self.getNuisanceParameterValue(name)
            bounds = self.getNuisanceParameterBounds(name)
            delta = self.getNuisanceParameterDelta(name)

            nuisanceParameters["%s_%s" % (self.name, name)] = Parameter(
                "%s_%s" % (self.name, name),
                value,
                min_value=bounds[0],
                max_value=bounds[1],
                delta=delta,
            )

            nuisanceParameters["%s_%s" %
                               (self.name,
                                name)].free = self.fit_nuisance_params

        return nuisanceParameters
Пример #3
0
    def _setNuisanceParameters(self):

        # Get the list of the sources
        sources = list(self.like.model.srcNames)

        freeParamNames = []
        for srcName in sources:
            thisNamesV = pyLike.StringVector()
            thisSrc = self.like.logLike.getSource(srcName)
            thisSrc.spectrum().getFreeParamNames(thisNamesV)
            thisNames = map(lambda x: "%s_%s" % (srcName, x), thisNamesV)
            freeParamNames.extend(thisNames)
        pass

        nuisanceParameters = collections.OrderedDict()

        for name in freeParamNames:

            value = self.getNuisanceParameterValue(name)
            bounds = self.getNuisanceParameterBounds(name)
            delta = self.getNuisanceParameterDelta(name)

            nuisanceParameters["%s_%s" % (self.name, name)] = Parameter("%s_%s" % (self.name, name),
                                                                        value,
                                                                        min_value=bounds[0],
                                                                        max_value=bounds[1],
                                                                        delta=delta)

            nuisanceParameters["%s_%s" % (self.name, name)].free = self.innerMinimization

            # Prepare a callback which will set the parameter value in the pyLikelihood object if it gets
            # changed
            # def this_callback(parameter):
            #
            #     _, src, pname = parameter.name.split("_")
            #
            #     try:
            #
            #         self.like.model[src].funcs['Spectrum'].getParam(pname).setValue(parameter.value)
            #
            #     except:
            #
            #         import pdb;pdb.set_trace()
            #
            # nuisanceParameters["%s_%s" % (self.name, name)].add_callback(this_callback)

        return nuisanceParameters
Пример #4
0
    def __init__(self, name, maptree, response_file, roi, flat_sky_pixels_size=0.17):

        # Store ROI
        self._roi = roi

        # Set up the flat-sky projection
        self._flat_sky_projection = roi.get_flat_sky_projection(flat_sky_pixels_size)

        # Read map tree (data)
        self._maptree = map_tree_factory(maptree, roi=roi)

        # Read detector response_file
        self._response = hawc_response_factory(response_file)

        # Use a renormalization of the background as nuisance parameter
        # NOTE: it is fixed to 1.0 unless the user explicitly sets it free (experimental)
        self._nuisance_parameters = collections.OrderedDict()
        self._nuisance_parameters['%s_bkg_renorm' % name] = Parameter('%s_bkg_renorm' % name, 1.0,
                                                                      min_value=0.5, max_value=1.5,
                                                                      delta=0.01,
                                                                      desc="Renormalization for background map",
                                                                      free=False,
                                                                      is_normalization=False)

        # Instance parent class

        super(HAL, self).__init__(name, self._nuisance_parameters)

        self._likelihood_model = None

        # These lists will contain the maps for the point sources
        self._convolved_point_sources = ConvolvedSourcesContainer()
        # and this one for extended sources
        self._convolved_ext_sources = ConvolvedSourcesContainer()

        # All energy/nHit bins are loaded in memory
        self._all_planes = list(self._maptree.analysis_bins_labels)

        # The active planes list always contains the list of *indexes* of the active planes
        self._active_planes = None

        # Set up the transformations from the flat-sky projection to Healpix, as well as the list of active pixels
        # (one for each energy/nHit bin). We make a separate transformation because different energy bins might have
        # different nsides
        self._active_pixels = collections.OrderedDict()
        self._flat_sky_to_healpix_transform = collections.OrderedDict()

        for bin_id in self._maptree:

            this_maptree = self._maptree[bin_id]
            this_nside = this_maptree.nside
            this_active_pixels = roi.active_pixels(this_nside)

            this_flat_sky_to_hpx_transform = FlatSkyToHealpixTransform(self._flat_sky_projection.wcs,
                                                                       'icrs',
                                                                       this_nside,
                                                                       this_active_pixels,
                                                                       (self._flat_sky_projection.npix_width,
                                                                        self._flat_sky_projection.npix_height),
                                                                       order='bilinear')

            self._active_pixels[bin_id] = this_active_pixels
            self._flat_sky_to_healpix_transform[bin_id] = this_flat_sky_to_hpx_transform

        # This will contain a list of PSF convolutors for extended sources, if there is any in the model

        self._psf_convolutors = None

        # Pre-compute the log-factorial factor in the likelihood, so we do not keep to computing it over and over
        # again.
        self._log_factorials = collections.OrderedDict()

        # We also apply a bias so that the numerical value of the log-likelihood stays small. This helps when
        # fitting with algorithms like MINUIT because the convergence criterium involves the difference between
        # two likelihood values, which would be affected by numerical precision errors if the two values are
        # too large
        self._saturated_model_like_per_maptree = collections.OrderedDict()

        # The actual computation is in a method so we can recall it on clone (see the get_simulated_dataset method)
        self._compute_likelihood_biases()

        # This will save a clone of self for simulations
        self._clone = None

        # Integration method for the PSF (see psf_integration_method)
        self._psf_integration_method = "exact"
Пример #5
0
    def __init__(self,
                 name,
                 observation,
                 background,
                 response,
                 interval_number=None,
                 verbose=False):
        """

        The Polarization likelihood for POLAR. This plugin is heavily modeled off
        the 3ML dispersion based plugins. It interpolates the spectral photon model
        over the scattering angle bins to allow for spectral + polarization analysis.



        :param interval_number: The time interval starting from 1.
        :param name: The name of the plugin
        :param observation: The POLAR observation file
        :param background: The POLAR background file
        :param response: The POLAR polarization response

        :param verbose:

        """

        # if we pass a string, there may be multiple time intervals
        # saved so we must specify a time interval

        if isinstance(observation, str):
            assert interval_number is not None, 'must specify an interval number'

            # this is a file
            read_file = ModulationCurveFile.read(observation)

            # create the bmc
            observation = read_file.to_binned_modulation_curve(
                interval=interval_number)

        # the same applies for the background
        if isinstance(background, str):
            assert interval_number is not None, 'must specify an interval number'

            # this is a file
            read_file = ModulationCurveFile.read(background)

            background = read_file.to_binned_modulation_curve(
                interval=interval_number)

        assert isinstance(observation, BinnedModulationCurve
                          ), 'The observation must be a BinnedModulationCurve'
        assert isinstance(background, BinnedModulationCurve
                          ), 'The observation must be a BinnedModulationCurve'

        # attach the required variables

        self._observation = observation
        self._background = background

        self._observed_counts = observation.counts.astype(np.int64)
        self._background_counts = background.counts
        self._background_count_errors = background.count_errors
        self._scale = observation.exposure / background.exposure
        self._exposure = observation.exposure
        self._background_exposure = background.exposure

        self._likelihood_model = None
        self._rebinner = None

        # now do some double checks

        assert len(self._observed_counts) == len(self._background_counts)

        self._n_synthetic_datasets = 0

        # set up the effective area correction

        self._nuisance_parameter = Parameter(
            "cons_%s" % name,
            1.0,
            min_value=0.8,
            max_value=1.2,
            delta=0.05,
            free=False,
            desc="Effective area correction for %s" % name)

        nuisance_parameters = collections.OrderedDict()
        nuisance_parameters[
            self._nuisance_parameter.name] = self._nuisance_parameter

        # pass to the plugin proto

        super(PolarLike, self).__init__(name, nuisance_parameters)

        # The following vectors are the ones that will be really used for the computation. At the beginning they just
        # point to the original ones, but if a rebinner is used and/or a mask is created through set_active_measurements,
        # they will contain the rebinned and/or masked versions

        self._current_observed_counts = self._observed_counts
        self._current_background_counts = self._background_counts
        self._current_background_count_errors = self._background_count_errors

        self._verbose = verbose

        # we can either attach or build a response

        assert isinstance(response, str) or isinstance(
            response, PolarResponse
        ), 'The response must be a file name or a PolarResponse'

        if isinstance(response, PolarResponse):

            self._response = response

        else:

            self._response = PolarResponse(response)

        # attach the interpolators to the

        self._all_interp = self._response.interpolators

        # we also make sure the lengths match up here
        assert self._response.n_scattering_bins == len(
            self._observation.counts
        ), 'observation counts shape does not agree with response shape'
Пример #6
0
    def __init__(self, name, maptree, response, n_transits=None, fullsky=False):

        # This controls if the likeHAWC class should load the entire
        # map or just a small disc around a source (faster).
        # Default is the latter, which is way faster. LIFF will decide
        # autonomously which ROI to use depending on the source model

        self._fullsky = bool(fullsky)

        # Sanitize files in input (expand variables and so on)

        self._maptree = os.path.abspath(sanitize_filename(maptree))

        self._response = os.path.abspath(sanitize_filename(response))

        # Check that they exists and can be read

        if not file_existing_and_readable(self._maptree):
            raise IOError("MapTree %s does not exist or is not readable" % maptree)

        if not file_existing_and_readable(self._response):
            raise IOError("Response %s does not exist or is not readable" % response)

        # Post-pone the creation of the LIFF instance to when
        # we have the likelihood model

        self._instanced = False

        # Number of transits
        if n_transits is not None:

            self._n_transits = float(n_transits)

        else:

            self._n_transits = None

        # Default list of bins

        self._bin_list = self._min_and_max_to_list(defaultMinChannel, defaultMaxChannel)

        # By default the fit of the CommonNorm is deactivated
        # NOTE: this flag sets the internal common norm minimization of LiFF, not
        # the common norm as nuisance parameter (which is controlled by activate_CommonNorm() and
        # deactivate_CommonNorm()
        self._fit_commonNorm = False

        # This is to keep track of whether the user defined a ROI or not

        self._roi_ra = None
        self._roi_fits = None
        self._roi_galactic = False

        # Create the dictionary of nuisance parameters

        self._nuisance_parameters = collections.OrderedDict()

        param_name = "%s_ComNorm" % name

        self._nuisance_parameters[param_name] = Parameter(
            param_name, 1.0, min_value=0.5, max_value=1.5, delta=0.01
        )
        self._nuisance_parameters[param_name].fix = True

        super(HAWCLike, self).__init__(name, self._nuisance_parameters)