示例#1
0
 def loadUnbinnedObs(self, f, verbosity=0):
     if verbosity:
         print 'Loading unbinned observation:',f['ft1']
     obs = UA.UnbinnedObs(eventFile=f['ft1'], scFile=f['ft2'],
                                        expMap=f['emap'],expCube=f['ecube'],
                                        irfs=f['irfs'])
     like = UA.UnbinnedAnalysis(obs, srcModel=self.model,
                                              optimizer=self.optimizer)
     return [ obs, like ]
示例#2
0
    def makeObs(self):
        """Creates either a binned or unbinned observation object for
        use in the likelihood analysis.  This function checks for all
        of the needed files first.  If you do not have a needed file,
        see the quickAnalysis module for creation.  This function
        should be run before any of the init or fit functions."""

        if (self.commonConf['binned']):
            try:
                qU.checkForFiles(self.logger, [
                    self.commonConf['base'] + '_srcMaps.fits',
                    self.commonConf['base'] + '_ltcube.fits',
                    self.commonConf['base'] + '_BinnedExpMap.fits'
                ])
                self.obs = BAn.BinnedObs(
                    srcMaps=self.commonConf['base'] + '_srcMaps.fits',
                    expCube=self.commonConf['base'] + '_ltcube.fits',
                    binnedExpMap=self.commonConf['base'] +
                    '_BinnedExpMap.fits',
                    irfs=self.commonConf['irfs'])
            except (qU.FileNotFound):
                self.logger.critical("One or more needed files do not exist")
                sys.exit()
        else:
            try:
                qU.checkForFiles(self.logger, [
                    self.commonConf['base'] + '_filtered_gti.fits',
                    self.commonConf['base'] + '_SC.fits',
                    self.commonConf['base'] + '_expMap.fits',
                    self.commonConf['base'] + '_ltcube.fits'
                ])
                self.obs = UbAn.UnbinnedObs(
                    self.commonConf['base'] + '_filtered_gti.fits',
                    self.commonConf['base'] + '_SC.fits',
                    expMap=self.commonConf['base'] + '_expMap.fits',
                    expCube=self.commonConf['base'] + '_ltcube.fits',
                    irfs=self.commonConf['irfs'])
            except (qU.FileNotFound):
                self.logger.critical("One or more needed files do not exist")
                sys.exit()
        self.logger.info(self.ret.subn(', ', str(self.obs))[0])
                   poi_values=poi_values,
                   poi_probs=poi_probs,
                   poi_dlogL=poi_dlogL,
                   flux_emin=emin,
                   flux_emax=emax)

    return ul_flux, results


if __name__ == "__main__":
    import sys

    srcName = "EMS0001"
    obs = UnbinnedAnalysis.UnbinnedObs('ft1_roi.fits',
                                       scFile='ft2.fits',
                                       expMap='expMap.fits',
                                       expCube='expCube.fits',
                                       irfs='P6_V9_DIFFUSE')

    #min_opt = 'InteractiveMinuit,MIN 0 $TOL,HESSE,.q'
    #pro_opt = 'InteractiveMinuit,SET STR 0,MIN 0 $TOL,.q'
    min_opt = 'MINUIT'
    pro_opt = None

    like = UnbinnedAnalysis.UnbinnedAnalysis(obs, 'model.xml', min_opt)

    src_spectrum = like[srcName].funcs['Spectrum']
    par = src_spectrum.getParam("Index")
    if par:
        par.setFree(False)
        par.setValue(-2.0)
示例#4
0
    def __init__(
        self,
        name,
        eventFile,
        ft2File,
        livetimeCube,
        kind,
        exposureMap=None,
        sourceMaps=None,
        binnedExpoMap=None,
        source_name=None,
    ):

        # Initially the nuisance parameters dict is empty, as we don't know yet
        # the likelihood model. They will be updated in set_model

        super(FermiLATLike, self).__init__(name, {})

        # Read the ROI cut
        cc = pyLike.RoiCuts()
        cc.readCuts(eventFile, "EVENTS")
        self.ra, self.dec, self.rad = cc.roiCone()

        # Read the IRF selection
        c = pyLike.Cuts(eventFile, "EVENTS")
        self.irf = c.CALDB_implied_irfs()

        self.ft2File = ft2File
        self.livetimeCube = livetimeCube

        # These are the boundaries and the number of energies for the computation
        # of the model (in keV)
        self.emin = 1e4
        self.emax = 5e8
        self.Nenergies = 200

        # This is the limit on the effective area correction factor,
        # which is a multiplicative factor in front of the whole model
        # to account for inter-calibration issues. By default it can vary
        # by 10%. This can be changed by issuing:
        # FermiLATUnbinnedLikeInstance.effCorrLimit = [new limit]
        # where for example a [new limit] of 0.2 allow for an effective
        # area correction up to +/- 20 %

        self.effCorrLimit = 0.1

        if kind.upper() != "UNBINNED" and kind.upper() != "BINNED":

            raise RuntimeError("Accepted values for the kind parameter are: " +
                               "binned, unbinned. You specified: %s" % (kind))

        else:

            self.kind = kind.upper()

        if kind.upper() == "UNBINNED":

            assert exposureMap is not None, "You have to provide an exposure map"

            self.eventFile = eventFile
            self.exposureMap = exposureMap
            # Read the files and generate the pyLikelihood object
            self.obs = UnbinnedAnalysis.UnbinnedObs(
                self.eventFile,
                self.ft2File,
                expMap=self.exposureMap,
                expCube=self.livetimeCube,
                irfs=self.irf,
            )

        elif kind.upper() == "BINNED":

            assert sourceMaps is not None, "You have to provide a source map"
            assert (
                binnedExpoMap
                is not None), "You have to provided a (binned) exposure map"

            self.sourceMaps = sourceMaps
            self.binnedExpoMap = binnedExpoMap

            self.obs = BinnedAnalysis.BinnedObs(
                srcMaps=self.sourceMaps,
                expCube=self.livetimeCube,
                binnedExpMap=self.binnedExpoMap,
                irfs=self.irf,
            )
        pass

        # Activate inner minimization by default
        self.setInnerMinimization(True)

        self._source_name = source_name
示例#5
0
def runLikelihood(subdir, tpl_file):
    '''This runction runs the likelihood code on a set of pixels in a
    subdirectory.  It takes as input the subdirectory to work on and a
    template counts map.  It reads it's configuration from a pickle
    file (par.pck) that should be located in the subdirectory and the
    pixel locations from another pickle file (pixel.pck).  It then
    creats an overall likelihood object, does a quick global fit and
    then loops over the pixels.  At each pixel, it creats a test
    source, fits that source, calculates the TS of the source and
    writes the results to an output file in the subdirectory called
    ts_results.dat.'''

    parfile = open("par.pck", "r")
    pars = pickle.load(parfile)

    pixelfile = open("pixel.pck", "r")
    pixels = pickle.load(pixelfile)

    pixel_coords = PixelCoords(tpl_file)

    if pars['statistic'] == 'UNBINNED':
        import UnbinnedAnalysis as UBAn
        obs = UBAn.UnbinnedObs(resolve_fits_files(pars['evfile']),
                               resolve_fits_files(pars['scfile']),
                               expMap='../' + pars['expmap'],
                               expCube='../' + pars['expcube'],
                               irfs=pars['irfs'])
        like = UBAn.UnbinnedAnalysis(obs, '../' + pars['srcmdl'],
                                     pars['optimizer'])
    elif pars['statistic'] == 'BINNED':
        import BinnedAnalysis as BAn
        obs = BAn.BinnedObs(srcMaps='../' + pars['srcmaps'],
                            expCube='../' + pars['expcube'],
                            binnedExpMap='../' + pars['bexpmap'],
                            irfs=pars['irfs'])
        like = BAn.BinnedAnalysis(obs, '../' + pars['srcmdl'],
                                  pars['optimizer'])

    like.setFitTolType(pars['toltype'])
    like.optimize(0)
    loglike0 = like()
    test_src = getPointSource(like)
    target_name = 'testSource'
    test_src.setName(target_name)
    outfile = 'ts_results.dat'
    finished_pixels = []
    if os.path.isfile(outfile):
        input = open(outfile, 'r')
        for line in input:
            tokens = line.strip().split()
            ij = int(tokens[0]), int(tokens[1])
            finished_pixels.append(ij)
        input.close()
    output = open(outfile, 'a')
    for indx, i, j in pixels:
        if (i, j) in finished_pixels:
            continue
        ra, dec = pixel_coords(i, j)
        test_src.setDir(ra, dec, True, False)
        like.addSource(test_src)
        like.optimize(0)
        ts = -2 * (like() - loglike0)
        output.write("%3i  %3i %.3f  %.3f  %.5f\n" % (i, j, ra, dec, ts))
        output.flush()
        like.deleteSource(target_name)
    output.close()
示例#6
0
def bayesian_ul(**kwargs):

    # Instance the unbinned analysis

    print("Instancing pyLikelihood...")

    unbinned_observation = UnbinnedAnalysis.UnbinnedObs(
        kwargs['ft1'], kwargs['ft2'], kwargs['expomap'], kwargs['ltcube'],
        'CALDB')

    pylike_instance = UnbinnedAnalysis.UnbinnedAnalysis(
        unbinned_observation, kwargs['xml'], kwargs['engine'])

    print("done")

    # Let's start by computing the semi-Bayesian UL from the Science Tools

    print("Semi-bayesian upper limit computation with ST...")

    # Sync and fit
    pylike_instance.syncSrcParams()
    pylike_instance.fit()

    # Compute ST upper limit

    ul = UpperLimits.UpperLimit(pylike_instance, kwargs['src'])

    try:

        st_bayes_ul, parameter_value = ul.bayesianUL(0.95,
                                                     emin=kwargs['emin'],
                                                     emax=kwargs['emax'])

    except:

        # This fails sometimes with RuntimeError: Attempt to set parameter value outside bounds.

        print("\n\nWARNING: upper limit computation with ST has failed! \n\n")

        st_bayes_ul = -1
        st_bayes_ul_ene = -1

        # Get back to a good state
        pylike_instance = UnbinnedAnalysis.UnbinnedAnalysis(
            unbinned_observation, kwargs['xml'], kwargs['engine'])
        pylike_instance.fit()

    else:
        # Convert to energy flux
        best_fit_photon_index = pylike_instance[
            kwargs['src']].src.spectrum().parameter('Index').getValue()

        st_bayes_ul_ene = st_bayes_ul * get_conversion_factor(
            best_fit_photon_index, kwargs)

    print("done")

    # Now find out our free parameters, and define a prior for them

    # Prepare the dictionary of parameters. Note that by default they get a uniform prior
    # between the current min and max values

    free_parameters = collections.OrderedDict()

    for p in pylike_instance.model.params:

        if p.isFree():
            source_name = p.srcName
            parameter_name = p.parameter.getName()
            p.parameter.setScale(1.0)

            free_parameters[(source_name, parameter_name)] = MyParameter(p)

    # Now set the priors and the boundaries

    # Update boundaries (they will be propagated to the prior as well)

    # Isotropic template

    if (kwargs['iso'], 'Normalization') in free_parameters:

        try:

            free_parameters[(kwargs['iso'], 'Normalization')].bounds = (0, 100)

        except:

            # This happens if the best fit value is outside those boundaries
            free_parameters[(kwargs['iso'], 'Normalization')].value = 1.0
            free_parameters[(kwargs['iso'], 'Normalization')].bounds = (0, 100)

    else:

        print("WARNING: Isotropic template is not free to vary (or absent)")

    # Galactic template (Truncated Gaussian with systematic error)

    if (kwargs['gal'], 'Value') in free_parameters:

        try:

            free_parameters[(kwargs['gal'], 'Value')].bounds = (0.1, 10.0)

            free_parameters[(kwargs['gal'],
                             'Value')].prior = TruncatedGaussianPrior(
                                 1.0, kwargs['gal_sys_err'])

        except:
            # This happens if the best fit value is outside those boundaries
            free_parameters[(kwargs['gal'], 'Value')].value = 1.0
            free_parameters[(kwargs['gal'], 'Value')].bounds = (0.1, 10.0)

    else:

        print("WARNING: Galactic template is not free to vary (or absent)")

    # Photon flux (uniform prior)

    if (kwargs['src'], 'Integral') in free_parameters:

        try:

            free_parameters[(kwargs['src'], 'Integral')].bounds = (0, 10)

        except:

            free_parameters[(kwargs['src'], 'Integral')].value = 1e-7
            free_parameters[(kwargs['src'], 'Integral')].bounds = (0, 10)

    else:

        raise RuntimeError(
            "The Integral parameter must be a free parameter of source %s" %
            kwargs['src'])

    # Photon index

    if (kwargs['src'], 'Index') in free_parameters:

        try:

            free_parameters[(kwargs['src'],
                             'Index')].bounds = (kwargs['min_index'],
                                                 kwargs['max_index'])

        except:

            raise RuntimeError(
                "It looks like the best fit photon index is outside the boundaries "
                "provided in the command line")

    else:

        raise RuntimeError(
            "The Index parameter must be a free parameter of source %s" %
            kwargs['src'])

    # Execute a fit to get to a good state with the new boundaries
    pylike_instance.fit()

    # Print the configuration
    print("\nFree parameters:")
    print("----------------\n")

    for k, v in free_parameters.iteritems():
        print("* %s of %s (%s)" % (k[1], k[0], v.prior.name))

    print("")

    # Generate the randomized starting points for the Emcee sampler

    ndim, nwalkers = len(free_parameters), kwargs['n_walkers']

    p0 = [
        map(lambda p: p.get_random_init(0.1), free_parameters.values())
        for i in range(nwalkers)
    ]

    # Instance the sampler
    posterior = Posterior(free_parameters.values(), pylike_instance)

    # Now check that the starting points we have are good (otherwise the sampler will go awry)

    for pp in p0:

        this_ln = posterior.lnprob(pp)

        if not np.isfinite(this_ln):

            raise RuntimeError(
                "Infinite for values %s while setting up walkers" % pp)

    sampler = emcee.EnsembleSampler(nwalkers, ndim, posterior.lnprob)

    print("Burn in...")

    pos, prob, state = sampler.run_mcmc(p0, kwargs['burn_in'])

    print("done")

    sampler.reset()

    print("Sampling...")

    samples = sampler.run_mcmc(pos, kwargs['n_samples'])

    print("done")

    print("Mean acceptance fraction: {0:.3f}".format(
        np.mean(sampler.acceptance_fraction)))

    # Make the corner plot

    samples = sampler.flatchain

    labels = map(lambda x: "%s" % (x[1]), free_parameters.keys())

    print("Producing corner plot...")

    fig = corner.corner(samples,
                        show_titles=True,
                        quantiles=[0.5, 0.50, 0.95],
                        title_fmt=u'.2g',
                        labels=labels,
                        plot_contours=True,
                        plot_density=False)

    fig.tight_layout()

    fig.savefig(kwargs['corner_plot'])

    print("done")

    # Now compute the upper limits

    # Find index of normalization

    norm_index = free_parameters.keys().index((kwargs['src'], 'Integral'))

    # Find index of photon index
    ph_index_index = free_parameters.keys().index((kwargs['src'], 'Index'))

    photon_fluxes = np.zeros(samples.shape[0])
    energy_fluxes = np.zeros(samples.shape[0])

    conversion_factors = np.zeros(samples.shape[0])

    for i, current_sample in enumerate(samples):

        # Set the Integral parameter to the current value

        free_parameters[(kwargs['src'],
                         'Integral')].scaled_value = current_sample[norm_index]

        # Set the photon index to the current value

        current_photon_index = current_sample[ph_index_index]

        free_parameters[(kwargs['src'],
                         'Index')].scaled_value = current_photon_index

        pylike_instance.syncSrcParams()

        # Get photon flux for this sample

        photon_flux = pylike_instance[kwargs['src']].flux(
            kwargs['emin'], kwargs['emax'])

        # Get energy flux for this value

        conv = get_conversion_factor(current_photon_index, kwargs)

        energy_flux = photon_flux * conv

        # Save the results

        photon_fluxes[i] = photon_flux
        energy_fluxes[i] = energy_flux
        conversion_factors[i] = conv

    # Now compute the 95 percentile

    photon_flux_p95 = np.percentile(photon_fluxes, 95)
    energy_flux_p95 = np.percentile(energy_fluxes, 95)

    # Save the samples

    np.savez(kwargs['output_file'] + "_samples", samples=samples)

    np.savez(kwargs['output_file'],
             photon_fluxes=photon_fluxes,
             energy_fluxes=energy_fluxes,
             photon_flux_p95=photon_flux_p95,
             energy_flux_p95=energy_flux_p95,
             st_bayes_ul=st_bayes_ul,
             st_bayes_ul_ene=st_bayes_ul_ene)

    # Now summarize the results

    print("\nUpper limit computation results:")
    print("----------------------------------\n")
    print("Photon flux:\n")
    print("  * Semi-bayes from ST   : %g" % (st_bayes_ul))
    print("  * Bayesian             : %g" % photon_flux_p95)

    print("\nEnergy flux:\n")
    print("  * Semi-bayes from ST   : %g" % st_bayes_ul_ene)
    print("  * Bayesian             : %g" % energy_flux_p95)
示例#7
0
     # Find expomap
     
     expmaps = glob.glob("%s_expomap.fit*" % root_name)
     
     assert len(expmaps) == 1, "Couldn't find exopmap"
     
     expmap = expmaps[0]
     
     # Find XML model output of gtdolike
     xmls = glob.glob("%s_likeRes.xml" % root_name)
     
     assert len(xmls) == 1, "Couldn't find XML"
     
     xml_res = xmls[0]
     
     obs = UnbinnedAnalysis.UnbinnedObs(filteredeventfile, dataset['ft2file'], expMap=expmap, expCube=ltcube)
     like = UnbinnedAnalysis.UnbinnedAnalysis(obs, xml_res, 'MINUIT')
     
     ftm = FastTSMap(like)
     (bestra, bestdec), maxTS = ftm.search_for_maximum(args.ra, args.dec, float(half_size), int(n_side), verbose=False)
     
 
 #Now append the results for this interval
 grb                          = filter(lambda x:x.name.find("GRB")>=0,sources)[0]
 
 if args.tsmap_spec is not None:
     
     if maxTS > grb.TS:
         
         print("\n\n=========================================")
         print(" Fast TS Map has found a better position")