示例#1
0
    def initAltFit(self, opt="MINUIT"):
        """Initiallizes a minuit optimizer to use as a backup to the
        DRM optimizer.  This function is used internally in the fitDRM
        function so you probably will never use it.  You need to run
        makeObs before you run this function.  If it hasn't been run,
        this function will exit."""

        try:
            self.obs
        except AttributeError:
            self.logger.critical(
                "Obs object does not exist.  Create it first with the makeObs function"
            )
            return

        try:
            qU.checkForFiles(self.logger, [self.likelihoodConf['model']])
            if (self.commonConf['binned']):
                self.ALTFIT = BAn.BinnedAnalysis(self.obs,
                                                 self.likelihoodConf['model'],
                                                 optimizer=opt)
            else:
                self.ALTFIT = UbAn.UnbinnedAnalysis(
                    self.obs, self.likelihoodConf['model'], optimizer=opt)
            self.ALTFIT.tol = float(self.likelihoodConf['drmtol'])
            self.ALTFITobj = pyLike.Minuit(self.ALTFIT.logLike)
            self.logger.info(self.ret.subn(', ', str(self.ALTFIT))[0])
        except (qU.FileNotFound):
            self.logger.critical("One or more needed files do not exist")
            return
示例#2
0
    def initDRM(self):
        """Initializes the DRM optimizer (either binned or unbinned).
        This is usually the second function that you run when using
        this module.  You need to run makeObs before you run this
        function.  If it hasn't been run, this function will exit."""

        try:
            self.obs
        except AttributeError:
            self.logger.critical(
                "Obs object does not exist.  Create it first with the makeObs function"
            )
            return

        try:
            qU.checkForFiles(self.logger, [self.likelihoodConf['model']])
            if (self.commonConf['binned']):
                self.DRM = BAn.BinnedAnalysis(self.obs,
                                              self.likelihoodConf['model'],
                                              optimizer="DRMNGB")
            else:
                self.DRM = UbAn.UnbinnedAnalysis(self.obs,
                                                 self.likelihoodConf['model'],
                                                 optimizer="DRMNGB")
                self.DRM.tol = float(self.likelihoodConf['drmtol'])
                self.logger.info(self.ret.subn(', ', str(self.DRM))[0])
        except (qU.FileNotFound):
            self.logger.critical("One or more needed files do not exist")
            return
示例#3
0
    def set_model(self, likelihoodModel, source_name=None):
        """
        Set the model to be used in the joint minimization.
        Must be a LikelihoodModel instance.

        This method can also set or override a previously set source name.
        """

        # with suppress_stdout():

        if self._source_name is not None:
            if (source_name
                    is not None) and (source_name != self._source_name):
                log.warning('Changing target source from %s to %s' %
                            (self._source_name, source_name))
                self._source_name = source_name

            assert self._source_name in likelihoodModel.point_sources, (
                'Source %s is not a source in the likelihood model! ' %
                self._source_name)

        self.lmc = LikelihoodModelConverter(likelihoodModel,
                                            self.irf,
                                            source_name=self._source_name)
        self.lmc.setFileSpectrumEnergies(self.emin, self.emax, self.Nenergies)

        xmlFile = str("%s.xml" % get_random_unique_name())

        temp_files = self.lmc.writeXml(xmlFile, self.ra, self.dec, self.rad)

        if self.kind == "BINNED":
            self.like = BinnedAnalysis.BinnedAnalysis(self.obs,
                                                      xmlFile,
                                                      optimizer="DRMNFB")

        else:
            #import pdb;pdb.set_trace()
            self.like = UnbinnedAnalysis.UnbinnedAnalysis(self.obs,
                                                          xmlFile,
                                                          optimizer="DRMNFB")

        self.likelihoodModel = likelihoodModel

        # Here we need also to compute the logLike value, so that the model
        # in the XML file will be chanded if needed
        dumb = self.get_log_like()

        # Since now the Galactic template is in RAM, we can remove the temporary file
        os.remove(self.lmc._unique_filename)
        os.remove(xmlFile)

        # Delete temporary spectral files
        for temp_file in temp_files:

            os.remove(temp_file)

        # Build the list of the nuisance parameters
        new_nuisance_parameters = self._setNuisanceParameters()

        self.update_nuisance_parameters(new_nuisance_parameters)
    def _new_log_like(self, event_file):

        new_obs = FastUnbinnedObs(event_file, self._orig_log_like.observation)

        # Create empty XML to trick UnbinnedAnalysis in not reading up any source.
        # We will then add the source that have been already loaded in the original likelihood object.
        # This saves a lot because sources like the Galactic template do not need to be reloaded again from disk
        with open("__empty_xml.xml", "w+") as f:
            f.write('<source_library title="source library"></source_library>')

        # Load pyLike (we use DRMNFB because it is fast, much faster than Minuit, and we do not care about the errors)
        new_like = UnbinnedAnalysis.UnbinnedAnalysis(new_obs,
                                                     "__empty_xml.xml",
                                                     optimizer=self._optimizer)

        # Now load the sources from the other object
        for source_name in self._orig_log_like.sourceNames():

            if source_name[-1] == 'e' and source_name.find("Template") < 0:

                # Extended source, jump it (we didn't compute gtdiffrsp because it crashes)
                continue

            new_like.addSource(self._orig_log_like.logLike.source(source_name))

        return new_like
    def __init__(self,
                 ft1,
                 ft2,
                 expmap,
                 ltcube,
                 xml_file,
                 path_of_tar_file_with_simulated_ft1_files,
                 tsmap_spec=None,
                 srcname='GRB'):

        # Process the simulations applying the same cuts as in the data file
        sp = SimulationProcessor(ft1, ft2,
                                 path_of_tar_file_with_simulated_ft1_files)

        ra_center, dec_center, radius = sp.roi

        # Now create the likelihood object
        obs = MyUnbinnedObs(ft1, ft2, expMap=expmap, expCube=ltcube)
        like = UnbinnedAnalysis.UnbinnedAnalysis(obs, xml_file, "MINUIT")

        fast_ts = FastTS(like, ts_map_spec=tsmap_spec, target_source=srcname)

        # Get the TSs
        self._tss = fast_ts.process_ft1s(sp.processed_ft1s,
                                         ra_center=ra_center,
                                         dec_center=dec_center)
示例#6
0
 def loadUnbinnedObs(self, f, verbosity=0):
     if verbosity:
         print 'Loading unbinned observation:',f['ft1']
     obs = UA.UnbinnedObs(eventFile=f['ft1'], scFile=f['ft2'],
                                        expMap=f['emap'],expCube=f['ecube'],
                                        irfs=f['irfs'])
     like = UA.UnbinnedAnalysis(obs, srcModel=self.model,
                                              optimizer=self.optimizer)
     return [ obs, like ]
示例#7
0
def runLikelihood(subdir, tpl_file):
    '''This runction runs the likelihood code on a set of pixels in a
    subdirectory.  It takes as input the subdirectory to work on and a
    template counts map.  It reads it's configuration from a pickle
    file (par.pck) that should be located in the subdirectory and the
    pixel locations from another pickle file (pixel.pck).  It then
    creats an overall likelihood object, does a quick global fit and
    then loops over the pixels.  At each pixel, it creats a test
    source, fits that source, calculates the TS of the source and
    writes the results to an output file in the subdirectory called
    ts_results.dat.'''

    parfile = open("par.pck", "r")
    pars = pickle.load(parfile)

    pixelfile = open("pixel.pck", "r")
    pixels = pickle.load(pixelfile)

    pixel_coords = PixelCoords(tpl_file)

    obs = UnbinnedObs(resolve_fits_files(pars['evfile']),
                      resolve_fits_files(pars['scfile']),
                      expMap='../' + pars['expmap'],
                      expCube='../' + pars['expcube'],
                      irfs=pars['irfs'])

    like = UnbinnedAnalysis(obs, '../' + pars['srcmdl'], pars['optimizer'])
    like.setFitTolType(pars['toltype'])
    like.optimize(0)
    loglike0 = like()
    test_src = getPointSource(like)
    target_name = 'testSource'
    test_src.setName(target_name)
    outfile = 'ts_results.dat'
    finished_pixels = []
    if os.path.isfile(outfile):
        input = open(outfile, 'r')
        for line in input:
            tokens = line.strip().split()
            ij = int(tokens[0]), int(tokens[1])
            finished_pixels.append(ij)
        input.close()
    output = open(outfile, 'a')
    for indx, i, j in pixels:
        if (i, j) in finished_pixels:
            continue
        ra, dec = pixel_coords(i, j)
        test_src.setDir(ra, dec, True, False)
        like.addSource(test_src)
        like.optimize(0)
        ts = -2 * (like() - loglike0)
        output.write("%3i  %3i %.3f  %.3f  %.5f\n" % (i, j, ra, dec, ts))
        output.flush()
        like.deleteSource(target_name)
    output.close()
    def unBinnedlikeFit2(self, optmz, write):
        """ Computig a second likelihood fit needed to compute the SED"""
       
        global like2 
        like2 = UnbinnedAnalysis(obs,str(self.src)+"_model_likehoodFit1.xml" ,optimizer=optmz)
        like2.tol = 1e-8
        like2obj = pyLike.NewMinuit(like2.logLike)
        like2.fit(verbosity=1,covar=True,optObject=like2obj)

        if write:
            like2.logLike.writeXml(str(self.src)+"_model_likehoodFit2.xml")
    def unBinnedlikeFit1(self, optmz, emin, emax, plot, write, analysisType):
        """Computing a first likelihood fit."""
        
      #  global obs 
      #  obs = UnbinnedObs(self.gtiFile, self.scFile, expMap=self.expmapFile, expCube=self.ltcubeFile, irfs=self.irfs)        
        like1 = UnbinnedAnalysis(obs,self.model,optimizer=optmz)
        like1.tol = 0.1
#        like1.setEnergyRange(emin, emax)
        like1.fit(verbosity=1)
        
        if plot:
            like1.plot()
        if write:
            like1.logLike.writeXml(str(self.src)+"_model_likehoodFit1.xml")
示例#10
0
def run(Name, Ra, Dec, minEnergy, maxEnergy, SCFile, ulTS, NMtol,evclass,model):

    if evclass == 512:
        irf = "P8R2_ULTRACLEAN_V6"
    elif evclass == 128:
        irf = "P8R2_SOURCE_V6"
    elif evclass == 256:
        irf = "P8R2_CLEAN_V6"
    elif evclass == 1024:
        irf = "P8R2_ULTRACLEANVETO_V6"

    print "This is multiUBLike.\nPlease make sure that you have added a model for your source with the name: " + str(Name)

    print "Calcualting the diffuse response for photons in this bin."    
    my_apps.diffResps['evfile'] = Name + '_gtmktime.fits'
    my_apps.diffResps['scfile'] = SCFile
    my_apps.diffResps['srcmdl'] = model
    my_apps.diffResps['irfs'] = irf
    my_apps.diffResps.run()

    print "Finished calculating diffuse response. Now moving to conduct a UNBINNED likelihood analysis."

    obs = UnbinnedObs(Name + '_gtmktime.fits', SCFile,expMap= Name + '_expMap.fits',expCube= Name + '_ltcube.fits', irfs=irf)
    analysis = UnbinnedAnalysis(obs,model,optimizer='NewMinuit')
    likeObj = pyLike.NewMinuit(analysis.logLike)
    analysis.tol = NMtol
    lkl = analysis.fit(verbosity=0,covar=True,optObject=likeObj)
    analysis.writeXml( Name + '_output_model.xml')
    fit = likeObj.getRetCode()
    print "Likelihood has converged whith Code " + str(likeObj.getRetCode())

    multiLike.printResults(analysis,Name, minEnergy,maxEnergy)
    print "Fit has likelihood: " + str(lkl)

    print "\nThe TS is below the threshold, calculating 95% confidence-level Bayesian upper limit."
    limit=0#,results = IUL.calc_int(analysis,Name,cl=0.95,emin=minEnergy, emax=maxEnergy)
    print "Bayesian upper limit: " + str(limit) + " photons/cm^2/s"

    #Calls for the prefactor/err, index/err, and scale

    N0 = analysis.model[Name].funcs['Spectrum'].getParam('Prefactor').value()
    N0_err = analysis.model[Name].funcs['Spectrum'].getParam('Prefactor').error()
    gamma = analysis.model[Name].funcs['Spectrum'].getParam('Index').value()
    gamma_err = analysis.model[Name].funcs['Spectrum'].getParam('Index').error()
    E0 = analysis.model[Name].funcs['Spectrum'].getParam('Scale').value()

    #Array that returns the results of the unbinned analysis
    #log-likelihood,flux,flux_err,test statisitc
    Return = [lkl,analysis.flux(Name, emin=minEnergy, emax=maxEnergy),analysis.fluxError(Name, emin=minEnergy, emax=maxEnergy),limit,analysis.Ts(Name,reoptimize=False),N0,N0_err,gamma,gamma_err,E0]
    return Return
示例#11
0
def main(NAME, RA, DEC, EMIN, EMAX, SC, ROIfile, xmlmodelname, expmap,
         expcube):

    obs = UnbinnedObs(ROIfile,
                      SC,
                      expMap=expmap,
                      expCube=expcube,
                      irfs='P7REP_SOURCE_V15')
    like1 = UnbinnedAnalysis(obs, xmlmodelname, optimizer='MINUIT')
    like1.fit(verbosity=0)
    ul = UpperLimits(like1)
    UL = ul[NAME].compute(emin=float(EMIN), emax=float(EMAX))
    results_ul = UL[1] * 1E-9
    err = 0
    print "'UL': ", results_ul
示例#12
0
    def set_model(self, likelihoodModel):
        """
        Set the model to be used in the joint minimization.
        Must be a LikelihoodModel instance.
        """

        with suppress_stdout():

            self.lmc = LikelihoodModelConverter(likelihoodModel, self.irf)

            self.lmc.setFileSpectrumEnergies(self.emin, self.emax,
                                             self.Nenergies)

            xmlFile = str("%s.xml" % get_random_unique_name())

            temp_files = self.lmc.writeXml(xmlFile, self.ra, self.dec,
                                           self.rad)

        if self.kind == "BINNED":
            self.like = BinnedAnalysis.BinnedAnalysis(self.obs,
                                                      xmlFile,
                                                      optimizer="DRMNFB")

        else:

            self.like = UnbinnedAnalysis.UnbinnedAnalysis(self.obs,
                                                          xmlFile,
                                                          optimizer="DRMNFB")

        self.likelihoodModel = likelihoodModel

        # Here we need also to compute the logLike value, so that the model
        # in the XML file will be chanded if needed
        dumb = self.get_log_like()

        # Since now the Galactic template is in RAM, we can remove the temporary file
        os.remove(self.lmc._unique_filename)
        os.remove(xmlFile)

        # Delete temporary spectral files
        for temp_file in temp_files:

            os.remove(temp_file)

        # Build the list of the nuisance parameters
        new_nuisance_parameters = self._setNuisanceParameters()

        self.update_nuisance_parameters(new_nuisance_parameters)
示例#13
0
    def initMIN(self, useBadFit=False, modelFile="", useEdisp=False):
        """Initiallizes a New Minuit optimizer to use as a backup to
        the DRM optimizer.  This is usually run after you have
        initially run fitDRM and created a <basename>_likeDRM.xml
        model file which is used a seed for the New Minuit optimizer.
        You can skip the DRM process if you like but you need to have
        the proper model file (<basename>_likeDRM.xml) present in the
        working directory. You need to run makeObs before you run this
        function.  If it hasn't been run, this function will exit.  If
        you want to use the non convergant fit from fitDRM, set
        useBadFit to True.  You can also pass a custom model file name
        via the modelFile parameter."""

        try:
            self.obs
        except AttributeError:
            self.logger.critical(
                "Obs object does not exist.  Create it first with the makeObs function."
            )
            return

        if (useBadFit):
            model = self.commonConf['base'] + '_badDRMFit.xml'
        else:
            model = self.commonConf['base'] + '_likeDRM.xml'

        if (modelFile):
            model = modelFile

        try:
            qU.checkForFiles(self.logger, [model])
            if (self.commonConf['binned']):
                self.MIN = BAn.BinnedAnalysis(self.obs,
                                              model,
                                              optimizer='NewMinuit')
            else:
                self.MIN = UbAn.UnbinnedAnalysis(self.obs,
                                                 model,
                                                 optimizer='NewMinuit')
            self.MIN.tol = float(self.likelihoodConf['mintol'])
            self.MINobj = pyLike.NewMinuit(self.MIN.logLike)
            self.pristine = LikelihoodState(self.MIN)
            self.logger.info(self.ret.subn(', ', str(self.MIN))[0])
            if (useEdisp):
                self.MIN.logLike.set_edisp_flag(useEdisp)
        except (qU.FileNotFound):
            self.logger.critical("One or more needed files do not exist")
            return
示例#14
0
    def _new_log_like(self, event_file):

        new_obs = FastUnbinnedObs(event_file, self._orig_log_like.observation)

        # Create empty XML to trick UnbinnedAnalysis in not reading up any source.
        # We will then add the source that have been already loaded in the original likelihood object.
        # This saves a lot because sources like the Galactic template do not need to be reloaded again from disk
        with open("__empty_xml.xml", "w+") as f:
            f.write('<source_library title="source library"></source_library>')

        # Load pyLike
        new_like = UnbinnedAnalysis.UnbinnedAnalysis(new_obs, "__empty_xml.xml",
                                                     optimizer="MINUIT")

        # Now load the sources from the other object
        for source_name in self._sources_to_keep:

            #print("Adding %s" % source_name)
            new_like.addSource(self._orig_log_like.logLike.source(source_name))

        return new_like
if __name__ == "__main__":
    import sys

    srcName = "EMS0001"
    obs = UnbinnedAnalysis.UnbinnedObs('ft1_roi.fits',
                                       scFile='ft2.fits',
                                       expMap='expMap.fits',
                                       expCube='expCube.fits',
                                       irfs='P6_V9_DIFFUSE')

    #min_opt = 'InteractiveMinuit,MIN 0 $TOL,HESSE,.q'
    #pro_opt = 'InteractiveMinuit,SET STR 0,MIN 0 $TOL,.q'
    min_opt = 'MINUIT'
    pro_opt = None

    like = UnbinnedAnalysis.UnbinnedAnalysis(obs, 'model.xml', min_opt)

    src_spectrum = like[srcName].funcs['Spectrum']
    par = src_spectrum.getParam("Index")
    if par:
        par.setFree(False)
        par.setValue(-2.0)
        like.syncSrcParams(srcName)

    ul, results = calc_int(like, srcName, verbosity=1)

    print results

    for i in range(len(results["profile_x"])):
        print results["profile_x"][i], results["profile_y"][i]
示例#16
0
	# Each event must have a separate response precomputed for each diffuse component in the source model. The precomputed responses for Pass 7 (V6) data are for the gll_iem_v05, iso_source_v05.txt, and iso_clean_05.txt diffuse models.

	os.system('cp '+filteredLATFile+' '+filteredLATFile_withDiffResps)	#Make a copy of the filteredLATFile
	my_apps.diffResps['evfile']=filteredLATFile_withDiffResps
	my_apps.diffResps['scfile']=spacecraftFile
	my_apps.diffResps['srcmdl']=modelFile
	my_apps.diffResps['irfs']=irfsType
	my_apps.diffResps.run()
	
# Run the Likelihood Analysis
print colors.OKBLUE+"Performing Liklihood Analysis"+colors.ENDC
import pyLikelihood
from UnbinnedAnalysis import *
obs = UnbinnedObs(filteredLATFile_withDiffResps,spacecraftFile,expMap=expMapFile,expCube=ltCubeFile,irfs=irfsType)
like = UnbinnedAnalysis(obs,modelFile,optimizer=optimizerType)

# Cuts Complete
print colors.OKGREEN+"################ Analysis Complete ################"
print obs
print like
print "###################################################"+colors.ENDC

################################
###### Adjust Source Model #####
################################

like.tol
like.tolType
like.tol = 0.0001
if optimizerType=='Minuit':
示例#17
0
    def PerformLikelihoodAnalysis(self):

        print "\nPerforming likelihood analysis on position: ra=%s, dec=%s" % (
            self.xref, self.yref)

        # Wait a random amount of time between 1 and 5 minutes before starting in order to not crash the asf/nsf disks at SLAC
        waitTime = random.random() * 300
        time.sleep(waitTime)

        # Defind the scratch directory
        JobID = os.environ.get('LSB_JOBID')
        Username = getpass.getuser()
        ScratchDirectory = "/scratch/%s/%s/" % (Username, JobID)

        # Define the pfile directory
        if JobID == 'None':
            PFILESDirectory = "%s/pfiles_%s/" % (self.outdir, self.binNumber)
        else:
            PFILESDirectory = "%s/pfiles/" % ScratchDirectory

        # Create the output directory if it doesn't already exist
        if (os.path.isdir(self.outdir) == False):
            print "\n >> Creating Directory: " + self.outdir
            cmd = "mkdir " + self.outdir
            os.system(cmd)

        # Define where to save the results
        likelihoodResults = '%s/likelihoodResults_bin%s.txt' % (self.outdir,
                                                                self.binNumber)

        # Remove any pre-existing pfiles
        if (os.path.isdir(PFILESDirectory) == True):
            cmd = "rm -r %s" % PFILESDirectory
            os.system(cmd)

        # Set the new pfiles directory
        SetPfilesDirectory(PFILESDirectory)

        # Make a copy of the source model
        xmlModelWithPutativeSource = '%s/ModelSource_bin%s.xml' % (
            self.outdir, self.binNumber)
        cmd = "cp " + self.srcmdl + " " + xmlModelWithPutativeSource
        print cmd
        os.system(cmd)

        # Add a putative point source at the requested location
        #		AddCandidateSource(self.xref, self.yref, xmlModelWithPutativeSource)
        ModifySourceModel(xmlModelWithPutativeSource, self.xref, self.yref)

        # # Import the necessary gtapps
        # gtlike = GtApp('gtlike')

        # # Run the likelihood analysis
        # print '\nPerforming the likelihood fit:'
        # gtlike.run(statistic=self.statistic,
        # 				scfile=self.scfile,
        # 				evfile=self.evfile,
        # 				expmap=self.expmap,
        # 				expcube=self.expcube,
        # 				srcmdl=xmlModelWithPutativeSource,
        # 				irfs=self.irfs,
        # 				optimizer=self.optimizer,
        # 				results=likelihoodResults,
        # 				plot='no',
        # 				save='yes')

        # Setup the unbinned likelihood object
        print '\nPerforming the likelihood fit:'
        try:

            obs = UnbinnedObs(self.evfile,
                              self.scfile,
                              expMap=self.expmap,
                              expCube=self.expcube,
                              irfs=self.irfs)

            # Define the likelihood object
            #like = UnbinnedAnalysis(obs,xmlModelWithPutativeSource,optimizer=self.optimizer)
            like = UnbinnedAnalysis(obs,
                                    xmlModelWithPutativeSource,
                                    optimizer='MINUIT')

            # Setup the likelihood parameters
            Source = 'CandidateSource'
            Integral = like.par_index(Source, 'Integral')
            Index = like.par_index(Source, 'Index')
            LowerLimit = like.par_index(Source, 'LowerLimit')
            UpperLimit = like.par_index(Source, 'UpperLimit')

            # Setup the likelihood bounds
            like[Integral].setScale(1e-3)
            like[Index].setBounds(-5, -0.5)
            # like[LowerLimit] = emin
            # like[UpperLimit] = emax

            # Perform the likelihood fit
            #optObject = pyLike.NewMinuit(like.logLike)
            #like.fit(verbosity=0,covar=True,tol=0.02,optObject=optObject)
            like.fit(verbosity=1,
                     covar=True,
                     tol=1e-10,
                     optimizer='MINUIT',
                     optObject=None)

            # Extract the best fit index
            IndexValue = like[Index].value()
            IndexError = like[Index].error()

            # Extract the best fit flux
            FluxValue = like.flux(Source, emin=100, emax=3e5)
            FluxError = like.fluxError(Source, emin=100, emax=3e5)

            # Extract likelihood fit results
            print '\nLikelihood Results:'
            print like.model[Source]
            print "TS = %s" % like.Ts(Source)
            print "Flux = %s +/- %s" % (FluxValue, FluxError)
            print "Index = %s +/- %s" % (IndexValue, IndexError)

            # Save the xml file
            like.writeXml(xmlFile=xmlModelWithPutativeSource)

        except Exception, message:
            print traceback.format_exc()
示例#18
0
def main(NAME, RA, DEC, TSTART, TSTOP, EMIN, EMAX, SC, ROIu, xml):
    ROIue = float(ROIu) + 10
    os.system('ls -1 *PH*.fits > %s_events.list' % (NAME))
    my_apps.filter['evclass'] = 128
    my_apps.filter['evtype'] = 3
    my_apps.filter['ra'] = RA
    my_apps.filter['dec'] = DEC
    my_apps.filter['rad'] = ROIu
    my_apps.filter['emin'] = EMIN
    my_apps.filter['emax'] = EMAX
    my_apps.filter['zmax'] = 90
    my_apps.filter['tmin'] = TSTART
    my_apps.filter['tmax'] = TSTOP
    my_apps.filter['infile'] = '@%s_events.list' % (NAME)
    my_apps.filter['outfile'] = '%s_filtered.fits' % (NAME)
    my_apps.filter.run()
    #    maketime
    my_apps.maketime['scfile'] = SC
    my_apps.maketime['filter'] = '(DATA_QUAL>0)&&(LAT_CONFIG==1)'
    my_apps.maketime['roicut'] = 'no'
    my_apps.maketime['evfile'] = '%s_filtered.fits' % (NAME)
    my_apps.maketime['outfile'] = '%s_filtered_gti.fits' % (NAME)
    my_apps.maketime.run()
    #
    my_apps.counts_map['evfile'] = '%s_filtered_gti.fits' % (NAME)
    my_apps.counts_map['scfile'] = SC
    my_apps.counts_map['outfile'] = '%s_CountMap.fits' % (NAME)
    #    my_apps.counts_map.run()
    #
    my_apps.expCube['evfile'] = '%s_filtered_gti.fits' % (NAME)
    my_apps.expCube['scfile'] = SC
    my_apps.expCube['zmax'] = 90
    my_apps.expCube['outfile'] = 'expCube.fits'
    my_apps.expCube['dcostheta'] = 0.025
    my_apps.expCube['binsz'] = 1
    my_apps.expCube.run()

    my_apps.expMap['evfile'] = '%s_filtered_gti.fits' % (NAME)
    my_apps.expMap['scfile'] = SC
    my_apps.expMap['expcube'] = 'expCube.fits'
    my_apps.expMap['outfile'] = 'expMap.fits'
    my_apps.expMap['irfs'] = 'CALDB'
    my_apps.expMap['srcrad'] = ROIue
    my_apps.expMap['nlong'] = 120
    my_apps.expMap['nlat'] = 120
    my_apps.expMap['nenergies'] = 20
    my_apps.expMap.run()

    # sara xml model
    roiname = '%s_filtered_gti.fits' % NAME
    if float(xml) == 0:
        xml_creator_P8_v1.main(NAME, float(RA), float(DEC), float(EMIN), float(EMAX), 15)
        xmlmodelname = '%s_model.xml' % NAME

        my_apps.diffResps['evfile'] = '%s_filtered_gti.fits' % (NAME)
        my_apps.diffResps['scfile'] = SC
        my_apps.diffResps['srcmdl'] = xmlmodelname
        my_apps.diffResps['irfs'] = 'CALDB'
        my_apps.diffResps.run()

        xmlfitname = '%s_fit1.xml' % NAME
        obs = UnbinnedObs(roiname, SC, expMap='expMap.fits', expCube='expCube.fits', irfs='CALDB')
        # like1 = UnbinnedAnalysis(obs,xmlmodelname,optimizer='MINUIT')
        like1 = UnbinnedAnalysis(obs, xmlmodelname, optimizer='NewMinuit')
        likeobj = pyLike.NewMinuit(like1.logLike)
        like1.fit(verbosity=0, optObject=likeobj)
        print likeobj.getRetCode()
        sourceDetails = {}
        for source in like1.sourceNames():
            sourceDetails[source] = like1.Ts(source)
        for source, TS in sourceDetails.iteritems():
            if (TS < 2):
                print "Deleting...", source, " TS = ", TS
                like1.deleteSource(source)
        like1.fit(verbosity=0, optObject=likeobj)
        print "0 is converged", likeobj.getRetCode()
        like1.logLike.writeXml(xmlfitname)

        numl = search(NAME, xmlfitname)
        numlg = str(numl + 3)
        os.system("sed '" + numlg + "," + numlg + " s/free=\"1\"/free=\"0\"/' " + xmlfitname + " > xml_sed.xml ")
        inputs = likeInput(like1, NAME, model="xml_sed.xml", nbins=6, phCorr=1.0)
        inputs.plotBins()
        inputs.fullFit(CoVar=True)
        sed = likeSED(inputs)
        sed.getECent()
        sed.fitBands()
        sed.Plot()
        result = like1.model[NAME]
        TS = like1.Ts(NAME)
        flux = like1.flux(NAME, emin=100)
        gamma = like1.model[NAME].funcs['Spectrum'].getParam('Index').value()
        cov_gg = like1.model[NAME].funcs['Spectrum'].getParam('Index').error()
        #    cov_II = like1.model[NAME].funcs['Spectrum'].getParam('Integral').error()
        flux_err = like1.fluxError(NAME, emin=100)
        like1.plot()
        fitsedname = '%s_6bins_likeSEDout.fits' % NAME
        sedtool(fitsedname)

        print NAME, " TS=", TS
        print result

    if float(xml) == 1:
        xmlmodelname = '%s_model.xml' % NAME
        xmlfitname = '%s_fit1.xml' % NAME
        obs = UnbinnedObs(roiname, SC, expMap='expMap.fits', expCube='expCube.fits', irfs='CALDB')
        # like1 = UnbinnedAnalysis(obs,xmlmodelname,optimizer='MINUIT')
        like1 = UnbinnedAnalysis(obs, xmlmodelname, optimizer='NewMinuit')
        likeobj = pyLike.NewMinuit(like1.logLike)
        like1.fit(verbosity=0, optObject=likeobj)
        print likeobj.getRetCode()
        sourceDetails = {}
        for source in like1.sourceNames():
            sourceDetails[source] = like1.Ts(source)
        for source, TS in sourceDetails.iteritems():
            if (TS < 2):
                print "Deleting...", source, " TS = ", TS
                like1.deleteSource(source)
        like1.fit(verbosity=0, optObject=likeobj)
        print "0 is converged", likeobj.getRetCode()
        like1.logLike.writeXml(xmlfitname)
        numl = search(NAME, xmlfitname)
        numlg = str(numl + 3)
        os.system("sed '" + numlg + "," + numlg + " s/free=\"1\"/free=\"0\"/' " + xmlfitname + " > xml_sed.xml ")
        inputs = likeInput(like1, NAME, model="xml_sed.xml", nbins=6, phCorr=1.0)
        inputs.plotBins()
        inputs.fullFit(CoVar=True)
        sed = likeSED(inputs)
        sed.getECent()
        sed.fitBands()
        sed.Plot()
        result = like1.model[NAME]
        TS = like1.Ts(NAME)
        flux = like1.flux(NAME, emin=100)
        gamma = like1.model[NAME].funcs['Spectrum'].getParam('Index').value()
        cov_gg = like1.model[NAME].funcs['Spectrum'].getParam('Index').error()
        #    cov_II = like1.model[NAME].funcs['Spectrum'].getParam('Integral').error()
        flux_err = like1.fluxError(NAME, emin=100)
        like1.plot()
        fitsedname = '%s_6bins_likeSEDout.fits' % NAME
        sedtool(fitsedname)

        print NAME, " TS=", TS
        print result
示例#19
0
     
     expmaps = glob.glob("%s_expomap.fit*" % root_name)
     
     assert len(expmaps) == 1, "Couldn't find exopmap"
     
     expmap = expmaps[0]
     
     # Find XML model output of gtdolike
     xmls = glob.glob("%s_likeRes.xml" % root_name)
     
     assert len(xmls) == 1, "Couldn't find XML"
     
     xml_res = xmls[0]
     
     obs = UnbinnedAnalysis.UnbinnedObs(filteredeventfile, dataset['ft2file'], expMap=expmap, expCube=ltcube)
     like = UnbinnedAnalysis.UnbinnedAnalysis(obs, xml_res, 'MINUIT')
     
     ftm = FastTSMap(like)
     (bestra, bestdec), maxTS = ftm.search_for_maximum(args.ra, args.dec, float(half_size), int(n_side), verbose=False)
     
 
 #Now append the results for this interval
 grb                          = filter(lambda x:x.name.find("GRB")>=0,sources)[0]
 
 if args.tsmap_spec is not None:
     
     if maxTS > grb.TS:
         
         print("\n\n=========================================")
         print(" Fast TS Map has found a better position")
         print("=========================================\n\n")
示例#20
0
def bayesian_ul(**kwargs):

    # Instance the unbinned analysis

    print("Instancing pyLikelihood...")

    unbinned_observation = UnbinnedAnalysis.UnbinnedObs(
        kwargs['ft1'], kwargs['ft2'], kwargs['expomap'], kwargs['ltcube'],
        'CALDB')

    pylike_instance = UnbinnedAnalysis.UnbinnedAnalysis(
        unbinned_observation, kwargs['xml'], kwargs['engine'])

    print("done")

    # Let's start by computing the semi-Bayesian UL from the Science Tools

    print("Semi-bayesian upper limit computation with ST...")

    # Sync and fit
    pylike_instance.syncSrcParams()
    pylike_instance.fit()

    # Compute ST upper limit

    ul = UpperLimits.UpperLimit(pylike_instance, kwargs['src'])

    try:

        st_bayes_ul, parameter_value = ul.bayesianUL(0.95,
                                                     emin=kwargs['emin'],
                                                     emax=kwargs['emax'])

    except:

        # This fails sometimes with RuntimeError: Attempt to set parameter value outside bounds.

        print("\n\nWARNING: upper limit computation with ST has failed! \n\n")

        st_bayes_ul = -1
        st_bayes_ul_ene = -1

        # Get back to a good state
        pylike_instance = UnbinnedAnalysis.UnbinnedAnalysis(
            unbinned_observation, kwargs['xml'], kwargs['engine'])
        pylike_instance.fit()

    else:
        # Convert to energy flux
        best_fit_photon_index = pylike_instance[
            kwargs['src']].src.spectrum().parameter('Index').getValue()

        st_bayes_ul_ene = st_bayes_ul * get_conversion_factor(
            best_fit_photon_index, kwargs)

    print("done")

    # Now find out our free parameters, and define a prior for them

    # Prepare the dictionary of parameters. Note that by default they get a uniform prior
    # between the current min and max values

    free_parameters = collections.OrderedDict()

    for p in pylike_instance.model.params:

        if p.isFree():
            source_name = p.srcName
            parameter_name = p.parameter.getName()
            p.parameter.setScale(1.0)

            free_parameters[(source_name, parameter_name)] = MyParameter(p)

    # Now set the priors and the boundaries

    # Update boundaries (they will be propagated to the prior as well)

    # Isotropic template

    if (kwargs['iso'], 'Normalization') in free_parameters:

        try:

            free_parameters[(kwargs['iso'], 'Normalization')].bounds = (0, 100)

        except:

            # This happens if the best fit value is outside those boundaries
            free_parameters[(kwargs['iso'], 'Normalization')].value = 1.0
            free_parameters[(kwargs['iso'], 'Normalization')].bounds = (0, 100)

    else:

        print("WARNING: Isotropic template is not free to vary (or absent)")

    # Galactic template (Truncated Gaussian with systematic error)

    if (kwargs['gal'], 'Value') in free_parameters:

        try:

            free_parameters[(kwargs['gal'], 'Value')].bounds = (0.1, 10.0)

            free_parameters[(kwargs['gal'],
                             'Value')].prior = TruncatedGaussianPrior(
                                 1.0, kwargs['gal_sys_err'])

        except:
            # This happens if the best fit value is outside those boundaries
            free_parameters[(kwargs['gal'], 'Value')].value = 1.0
            free_parameters[(kwargs['gal'], 'Value')].bounds = (0.1, 10.0)

    else:

        print("WARNING: Galactic template is not free to vary (or absent)")

    # Photon flux (uniform prior)

    if (kwargs['src'], 'Integral') in free_parameters:

        try:

            free_parameters[(kwargs['src'], 'Integral')].bounds = (0, 10)

        except:

            free_parameters[(kwargs['src'], 'Integral')].value = 1e-7
            free_parameters[(kwargs['src'], 'Integral')].bounds = (0, 10)

    else:

        raise RuntimeError(
            "The Integral parameter must be a free parameter of source %s" %
            kwargs['src'])

    # Photon index

    if (kwargs['src'], 'Index') in free_parameters:

        try:

            free_parameters[(kwargs['src'],
                             'Index')].bounds = (kwargs['min_index'],
                                                 kwargs['max_index'])

        except:

            raise RuntimeError(
                "It looks like the best fit photon index is outside the boundaries "
                "provided in the command line")

    else:

        raise RuntimeError(
            "The Index parameter must be a free parameter of source %s" %
            kwargs['src'])

    # Execute a fit to get to a good state with the new boundaries
    pylike_instance.fit()

    # Print the configuration
    print("\nFree parameters:")
    print("----------------\n")

    for k, v in free_parameters.iteritems():
        print("* %s of %s (%s)" % (k[1], k[0], v.prior.name))

    print("")

    # Generate the randomized starting points for the Emcee sampler

    ndim, nwalkers = len(free_parameters), kwargs['n_walkers']

    p0 = [
        map(lambda p: p.get_random_init(0.1), free_parameters.values())
        for i in range(nwalkers)
    ]

    # Instance the sampler
    posterior = Posterior(free_parameters.values(), pylike_instance)

    # Now check that the starting points we have are good (otherwise the sampler will go awry)

    for pp in p0:

        this_ln = posterior.lnprob(pp)

        if not np.isfinite(this_ln):

            raise RuntimeError(
                "Infinite for values %s while setting up walkers" % pp)

    sampler = emcee.EnsembleSampler(nwalkers, ndim, posterior.lnprob)

    print("Burn in...")

    pos, prob, state = sampler.run_mcmc(p0, kwargs['burn_in'])

    print("done")

    sampler.reset()

    print("Sampling...")

    samples = sampler.run_mcmc(pos, kwargs['n_samples'])

    print("done")

    print("Mean acceptance fraction: {0:.3f}".format(
        np.mean(sampler.acceptance_fraction)))

    # Make the corner plot

    samples = sampler.flatchain

    labels = map(lambda x: "%s" % (x[1]), free_parameters.keys())

    print("Producing corner plot...")

    fig = corner.corner(samples,
                        show_titles=True,
                        quantiles=[0.5, 0.50, 0.95],
                        title_fmt=u'.2g',
                        labels=labels,
                        plot_contours=True,
                        plot_density=False)

    fig.tight_layout()

    fig.savefig(kwargs['corner_plot'])

    print("done")

    # Now compute the upper limits

    # Find index of normalization

    norm_index = free_parameters.keys().index((kwargs['src'], 'Integral'))

    # Find index of photon index
    ph_index_index = free_parameters.keys().index((kwargs['src'], 'Index'))

    photon_fluxes = np.zeros(samples.shape[0])
    energy_fluxes = np.zeros(samples.shape[0])

    conversion_factors = np.zeros(samples.shape[0])

    for i, current_sample in enumerate(samples):

        # Set the Integral parameter to the current value

        free_parameters[(kwargs['src'],
                         'Integral')].scaled_value = current_sample[norm_index]

        # Set the photon index to the current value

        current_photon_index = current_sample[ph_index_index]

        free_parameters[(kwargs['src'],
                         'Index')].scaled_value = current_photon_index

        pylike_instance.syncSrcParams()

        # Get photon flux for this sample

        photon_flux = pylike_instance[kwargs['src']].flux(
            kwargs['emin'], kwargs['emax'])

        # Get energy flux for this value

        conv = get_conversion_factor(current_photon_index, kwargs)

        energy_flux = photon_flux * conv

        # Save the results

        photon_fluxes[i] = photon_flux
        energy_fluxes[i] = energy_flux
        conversion_factors[i] = conv

    # Now compute the 95 percentile

    photon_flux_p95 = np.percentile(photon_fluxes, 95)
    energy_flux_p95 = np.percentile(energy_fluxes, 95)

    # Save the samples

    np.savez(kwargs['output_file'] + "_samples", samples=samples)

    np.savez(kwargs['output_file'],
             photon_fluxes=photon_fluxes,
             energy_fluxes=energy_fluxes,
             photon_flux_p95=photon_flux_p95,
             energy_flux_p95=energy_flux_p95,
             st_bayes_ul=st_bayes_ul,
             st_bayes_ul_ene=st_bayes_ul_ene)

    # Now summarize the results

    print("\nUpper limit computation results:")
    print("----------------------------------\n")
    print("Photon flux:\n")
    print("  * Semi-bayes from ST   : %g" % (st_bayes_ul))
    print("  * Bayesian             : %g" % photon_flux_p95)

    print("\nEnergy flux:\n")
    print("  * Semi-bayes from ST   : %g" % st_bayes_ul_ene)
    print("  * Bayesian             : %g" % energy_flux_p95)
示例#21
0
def runFermiTools(Name, RA, DEC, minEnergy, maxEnergy, SCFile, radius, binsz,
                  TSTART, TSTOP, Evfile, bins, zmax, evclass, evtype, TSul,
                  NMtol, lc_bin_num, runMRM):

    print "Working on bin " + str(lc_bin_num) + " for the light curve."

    f = FermiObject()
    """

        Following steps execute Fermi Tool gtselect

    """

    print('\nWorking on file.')
    print('Cutting file to fit desired parameters . . .\n')
    f._setEvclass(evclass)
    f._setEvtype(evtype)
    f._setRa(RA)
    f._setDec(DEC)
    f._setRad(radius)
    f._setEmin(minEnergy)
    f._setEmax(maxEnergy)
    f._setZmax(zmax)
    f._setTmin(TSTART)
    f._setTmax(TSTOP)
    f._setInfile(Evfile)
    f._setOutfile(Name + '_gtselect' + str(lc_bin_num) + '_lc.fits')
    f.amonSelect()
    print(
        'File cuts have been made. Now making cuts for GTI using spacecraft file.'
    )
    """

        Following steps execute Fermi Tool gtmktime

    """

    f._setScfile(SCFile)
    f._setRoicut('no')
    f._setEvfile(Name + '_gtselect' + str(lc_bin_num) + '_lc.fits')
    f._setOutfile(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits')
    ###############################################
    #         Filter expression                   #
    Filter = '(DATA_QUAL>0)&&(LAT_CONFIG==1)'
    ###############################################
    f._setFilter(Filter)
    print('Working on file ' + str(f.getOutfile()) + '. . .')
    f.amonTime()
    print('File cuts have been made.')
    print('Using XML model from whole dataset.\n Moving on to gtltcube.')

    print "Now working on ltcube file using gtltcube\n"
    my_apps.expCube['evfile'] = Name + '_gtmktime' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.expCube['scfile'] = SCFile
    my_apps.expCube['outfile'] = Name + '_ltcube' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.expCube['dcostheta'] = 0.025
    my_apps.expCube['binsz'] = 1
    my_apps.expCube['phibins'] = 0
    my_apps.expCube['zmax'] = zmax
    my_apps.expCube['chatter'] = 0
    my_apps.expCube.run()

    print "\nltcube complete.\nMoving to compute exposure map with gtexpmap.\n"
    my_apps.expMap['evfile'] = Name + '_gtmktime' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.expMap['scfile'] = SCFile
    my_apps.expMap['expcube'] = Name + '_ltcube' + str(lc_bin_num) + '_lc.fits'
    my_apps.expMap['outfile'] = Name + '_expMap' + str(lc_bin_num) + '_lc.fits'
    my_apps.expMap['irfs'] = 'CALDB'
    my_apps.expMap['srcrad'] = radius + 10
    my_apps.expMap['nlong'] = 4 * (radius + 10)
    my_apps.expMap['nlat'] = 4 * (radius + 10)
    ebin = int(10 * log10(maxEnergy / minEnergy))
    print "There are " + str(ebin) + " energy bans."
    my_apps.expMap['nenergies'] = ebin
    my_apps.expMap.run()
    print "Finnished making exposure map.\n"

    print "Calcualting the diffuse response for photons in this bin."
    my_apps.diffResps['evfile'] = Name + '_gtmktime' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.diffResps['scfile'] = SCFile
    my_apps.diffResps['srcmdl'] = Name + '_output_model.xml'
    my_apps.diffResps['irfs'] = 'CALDB'
    my_apps.diffResps.run()

    print "Finished calculating diffuse response. Now moving to conduct a UNBINNED likelihood analysis."

    obs = UnbinnedObs(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits',
                      SCFile,
                      expMap=Name + '_expMap' + str(lc_bin_num) + '_lc.fits',
                      expCube=Name + '_ltcube' + str(lc_bin_num) + '_lc.fits',
                      irfs='P8R2_SOURCE_V6')
    analysis = UnbinnedAnalysis(obs,
                                Name + '_output_model.xml',
                                optimizer='NewMinuit')
    likeObj = pyLike.NewMinuit(analysis.logLike)
    analysis.tol = NMtol
    LIKE = analysis.fit(verbosity=0, covar=True, optObject=likeObj)
    fit = likeObj.getRetCode()
    print "Likelihood has converged whith Code " + str(likeObj.getRetCode())
    Flux = analysis.flux(Name, emin=minEnergy, emax=maxEnergy)
    Ferr = analysis.fluxError(Name, emin=minEnergy, emax=maxEnergy)
    MeVtoErg = 1.602e-6
    ef = analysis.energyFlux(Name, minEnergy, maxEnergy) * MeVtoErg
    ef_err = analysis.energyFluxError(Name, minEnergy, maxEnergy) * MeVtoErg
    UL = False
    TSUM = TSTART + TSTOP
    TMID = TSUM / 2
    limit = Flux
    if analysis.Ts(Name) < TSul:
        UL = True
        limit, results = IUL.calc_int(analysis,
                                      Name,
                                      cl=0.90,
                                      emin=minEnergy,
                                      emax=maxEnergy)

    #Do second likelihood with constant flux to calculate the TS variability
    obsC = UnbinnedObs(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits',
                       SCFile,
                       expMap=Name + '_expMap' + str(lc_bin_num) + '_lc.fits',
                       expCube=Name + '_ltcube' + str(lc_bin_num) + '_lc.fits',
                       irfs='P8R2_SOURCE_V6')
    analysisC = UnbinnedAnalysis(obsC,
                                 Name + '_var_model.xml',
                                 optimizer='NewMinuit')
    likeObjC = pyLike.NewMinuit(analysisC.logLike)
    analysisC.tol = NMtol
    LIKEC = analysisC.fit(verbosity=0, covar=True, optObject=likeObjC)

    #Run gtselect to make smaller data fits file to compute the exposure, set to 3 degrees around source of interest
    f._setRad(3)
    f._setInfile(Evfile)
    f._setOutfile(Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits')
    print "Creating file " + Name + "_gtselect_exposure.fits"
    f.amonSelect()

    #Run gtmaketime on this small region
    f._setEvfile(Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits')
    f._setOutfile(Name + '_gtmktime' + str(lc_bin_num) + '_exposure.fits')
    print('Working on file ' + str(f.getOutfile()))
    f.amonTime()

    my_apps.evtbin['algorithm'] = 'LC'
    my_apps.evtbin['evfile'] = f.getOutfile()
    my_apps.evtbin['outfile'] = Name + '_LC' + str(
        lc_bin_num) + '_exposure.fits'
    my_apps.evtbin['scfile'] = f.getScfile()
    my_apps.evtbin['tbinalg'] = 'LIN'
    my_apps.evtbin['tstart'] = f.getTmin()
    my_apps.evtbin['tstop'] = f.getTmax()
    my_apps.evtbin['dtime'] = TSTOP - TSTART
    my_apps.evtbin.run()

    yes = subprocess.call([
        'gtexposure', Name + '_LC' + str(lc_bin_num) + '_exposure.fits',
        f.getScfile(), 'P8R2_SOURCE_V6', Name + '_output_model.xml', Name
    ])
    if yes == 0:
        print "Exposure map has been created"
    else:
        print "Subprocessing failed. Unable to create exposure map with gtexposure."

    print "Time bin complete."

    hdulist = pyfits.open(Name + '_LC' + str(lc_bin_num) + '_exposure.fits')
    tbdata = hdulist[1].data
    z = tbdata['EXPOSURE']
    exp = z[0]

    ################################################################
    #           This portion prints to the text file               #
    ################################################################

    f = open("lc_output.txt", "a")
    f.write(
        str(Flux) + ',' + str(Ferr) + ',' + str(ef) + ',' + str(ef_err) + ',' +
        str(limit) + ',' + str(analysis.Ts(Name)) + ',' + str(UL) + ',' +
        str(TMID) + ',' + str(exp) + ',' + str(LIKE) + ',' + str(LIKEC) + '\n')
    f.close()
    print "Likelihood analysis on this band is complete."

    yes = subprocess.call([
        'rm', Name + '_gtselect' + str(lc_bin_num) + '_lc.fits',
        Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits',
        Name + '_cmap' + str(lc_bin_num) + '_lc.fits',
        Name + '_ccube' + str(lc_bin_num) + '_lc.fits',
        Name + '_ltcube' + str(lc_bin_num) + '_lc.fits',
        Name + '_expMap' + str(lc_bin_num) + '_lc.fits',
        Name + '_LC' + str(lc_bin_num) + '_exposure.fits',
        Name + '_srcmaps' + str(lc_bin_num) + '_lc.fits',
        Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits',
        Name + '_gtmktime' + str(lc_bin_num) + '_exposure.fits'
    ])
    if yes == 0:
        print 'Files for bin have been deleted'
    else:
        print "Subprocessing failed. Unable to delete files for bin."
def main(NAME,RA,DEC,TSTART,TSTOP,EMIN,EMAX,Np, path, ROIu):
    #outdir = os.environ["FERMI_TMPLATAREA"]
    gtliketxt=open("%s/%s_gtlike.txt"%(path,Np),'w')
    gtsedtxt=open("%s/%s_sed.txt"%(path,Np),'w')
    SCC='%s_SC00.fits'%(Np)
    SC=path+SCC
    Npp=path+Np
    print SC
    ROIue=float(ROIu)+10
    os.system("ls -1 '"+Npp+"'_PH*.fits > %s/%s_events.list" %(path,Np))
 
   # os.system('ls -1 'Np'+'PH*.fits > %s/%s_events.list' %(path,Np)
    my_apps.filter['evclass'] = 128
    my_apps.filter['evtype'] = 3
#    my_apps.filter['evclsmin'] = 3
#    my_apps.filter['evclsmax'] = 4
    my_apps.filter['ra'] = RA
    my_apps.filter['dec'] = DEC
    my_apps.filter['rad'] = ROIu
    my_apps.filter['emin'] = EMIN
    my_apps.filter['emax'] = EMAX
    my_apps.filter['zmax'] = 90
    my_apps.filter['tmin'] = TSTART
    my_apps.filter['tmax'] = TSTOP
    my_apps.filter['infile'] = '@%s/%s_events.list' %(path,Np)
    my_apps.filter['outfile'] = '%s/%s_filtered.fits'%(path,Np)
    my_apps.filter.run()
#    maketime
    my_apps.maketime['scfile'] = SC
    my_apps.maketime['filter'] = '(DATA_QUAL>0)&&(LAT_CONFIG==1)'
    my_apps.maketime['roicut'] = 'no'
    my_apps.maketime['evfile'] = '%s/%s_filtered.fits' %(path,Np)
    my_apps.maketime['outfile'] = '%s/%s_filtered_gti.fits' %(path,Np)
    my_apps.maketime.run()
#
#    my_apps.counts_map['evfile'] = '%s/%s_filtered_gti.fits'%(path,Np)
#    my_apps.counts_map['scfile'] = SC
#    my_apps.counts_map['outfile'] = '%s/%s_CountMap.fits'%(path,Np)
#    my_apps.counts_map.run()
#
    my_apps.expCube['evfile'] =  '%s/%s_filtered_gti.fits'%(path,Np)
    my_apps.expCube['scfile'] = SC
    my_apps.expCube['zmax'] = 90
    my_apps.expCube['outfile'] = '%s/%s_expCube.fits' %(path,Np)
    my_apps.expCube['dcostheta'] = 0.025
    my_apps.expCube['binsz'] = 1
    my_apps.expCube.run()

    my_apps.expMap['evfile'] = '%s/%s_filtered_gti.fits'%(path,Np)
    my_apps.expMap['scfile'] = SC
    my_apps.expMap['expcube'] ='%s/%s_expCube.fits'  %(path,Np)
    my_apps.expMap['outfile'] ='%s/%s_expMap.fits'  %(path,Np)
#    my_apps.expMap['irfs'] ='P7REP_SOURCE_V15'
    my_apps.expMap['irfs'] ='CALDB'
    my_apps.expMap['srcrad'] = ROIue
    my_apps.expMap['nlong'] =120
    my_apps.expMap['nlat'] =120
    my_apps.expMap['nenergies'] =20
    my_apps.expMap.run()

    #sara xml model
    roiname='%s/%s_filtered_gti.fits' %(path,Np)
    xml_creator_P7_v1.main(path,NAME,float(RA),float(DEC),float(EMIN), float(EMAX), 20,Np)
    xmlmodelname='%s/%s_model.xml' %(path,Np)
    
    my_apps.diffResps['evfile'] = '%s/%s_filtered_gti.fits'%(path,Np)
    my_apps.diffResps['scfile'] = SC
    my_apps.diffResps['srcmdl'] = xmlmodelname
    my_apps.diffResps['irfs'] = 'CALDB'
    my_apps.diffResps.run()
    
    
    xmlfitname='%s/%s_fit1.xml' %(path,Np)
    expMapFile='%s/%s_expMap.fits'  %(path,Np)
    expCubeFile='%s/%s_expCube.fits'  %(path,Np)
    obs = UnbinnedObs(roiname,SC ,expMap=expMapFile,expCube=expCubeFile,irfs='CALDB')
    like1 = UnbinnedAnalysis(obs,xmlmodelname,optimizer='NewMinuit')
    like1.fit(verbosity=0)
    like1.logLike.writeXml(xmlfitname)

    


 #   numl=search(NAME,xmlfitname)
 #   numlg=str(numl+3)
 #   os.system("sed '"+numlg+","+numlg+" s/free=\"1\"/free=\"0\"/' "+xmlfitname+ " > xml_sed.xml ")
 #   inputs=likeInput(like1,NAME,model="xml_sed.xml",nbins=9,phCorr=1.0)
    #low_edges = [200.,914.61,1955.87,8944.27,19127.05,40902.61]
    #high_edges = [427.69,1955.87,8944.27,19127.05,40902.61,187049.69]
    #centers = [0.2767, 1.265,  5.787, 12.37, 26.46, 86.60]
    #inputs.customBins(low_edges,high_edges)
 #   inputs.plotBins()
 #   inputs.fullFit(CoVar=True)
 #   sed = likeSED(inputs)
 #   sed.getECent()
  #  sed.fitBands()
   # sed.Plot()
    result=like1.model[NAME] 
    TS=like1.Ts(NAME)
#    I = like1.model[NAME].funcs['Spectrum'].getParam('Integral').value()
    flux = like1.flux(NAME,emin=100)  
#    flux=I*1e-9
    
    gamma = like1.model[NAME].funcs['Spectrum'].getParam('Index').value()
    cov_gg =like1.model[NAME].funcs['Spectrum'].getParam('Index').error()
 #   cov_II = like1.model[NAME].funcs['Spectrum'].getParam('Integral').error()
    flux_err = like1.fluxError(NAME,emin=100)
#    flux_err=cov_II*1e-9

    e=1000.0
    a=1
    b=1.e-18
    lenergy_bin=log10(double(EMIN))+(log10(double(EMAX))-log10(double(EMIN)))/2
    energy_bin=pow(10,lenergy_bin)
    freq=2.42e22*energy_bin/100.0
    ums = 1.-gamma
    conv=ums*pow(energy_bin,(-gamma))/(pow(double(EMAX),ums)-pow(double(EMIN),ums))*6.62e-2*(energy_bin/100.0)
 # conv is in Jy
 # now convert in nufnu erg/cm2/s
    convjy=conv*freq*1.e-23
    nufnu=flux*convjy
    b=flux_err*convjy
    err_log=log10((nufnu+b)/nufnu)
     #cout<<freq<<" "<<a<<" "<<nufnu<<" "<<b<<endl;
     #cout<<log10(freq)<<" "<<log10(a)<<" "<<log10(nufnu)<<" "<<err_log<<endl;

    date_start=computeDate(float(TSTART))
    date_stop=computeDate(float(TSTOP))


  #  like1.plot()
  #  fitsedname='%s_9bins_likeSEDout.fits' %NAME
  #  sedtool(fitsedname)


    print NAME, " TS=", TS
#    print result
#    print like1.model
    print "spectral index= ", gamma, " +/-", cov_gg 
    print " Flux=", flux, "+/-", flux_err
    print "freq", freq, " nuFnu=", nufnu, b,
 #   print "'UL': ", results_ul, err
    gtliketxt.write(NAME)
    gtliketxt.write(" RA=")
    gtliketxt.write(RA)
    gtliketxt.write(" DEC= ")
    gtliketxt.write(DEC)
    gtliketxt.write(" TS= ")
    gtliketxt.write(str(TS))
    gtliketxt.write("\n")
    gtliketxt.write(" Time Interval (MJD) ")
    gtliketxt.write(str(date_start))
    gtliketxt.write(" ")
    gtliketxt.write(str(date_stop))
    gtliketxt.write("\n ")
    gtliketxt.write("Flux ")

    if TS <25:
           obs = UnbinnedObs(roiname,SC ,expMap=expMapFile,expCube=expCubeFile,irfs='CALDB')	   
	   like1 = UnbinnedAnalysis(obs,xmlmodelname,optimizer='NewMinuit')
           like1.fit(verbosity=0)
           ul=UpperLimits(like1)
           UL=ul[NAME].compute(emin=double(EMIN),emax=double(EMAX))
           results_ul=UL[1]*1E-9
           err=0
           print "'UL': ", results_ul, err
           gamma_ul=2.0
	   ums_ul = 1.-gamma_ul
           conv_ul=ums_ul*pow(energy_bin,(-gamma_ul))/(pow(double(EMAX),ums_ul)-pow(double(EMIN),ums_ul))*6.62e-2*(energy_bin/100.0)
           convjy_ul=conv_ul*freq*1.e-23
           nufnu_ul=results_ul*convjy_ul
           b=err*convjy_ul
           print "freq", freq,  "0  nuFnu=", nufnu_ul, b,
           gtliketxt.write(str(results_ul)) 
           gtliketxt.write(" 0  ")
           #gtliketxt.write(err)
           gtsedtxt.write(str(freq))
           gtsedtxt.write(" | 0 ")
           gtsedtxt.write(" | ")
           gtsedtxt.write(str(nufnu_ul))
           gtsedtxt.write(" | ")
           gtsedtxt.write(str(b))
           gtsedtxt.write(" | ")
           gtsedtxt.write(str(date_start))
           gtsedtxt.write(" | ")
           gtsedtxt.write(str(date_stop))
           gtsedtxt.write(" | ")
           gtsedtxt.write(" UL ")
           gtsedtxt.write(" | ")

    else:
    
           gtliketxt.write(str(flux))
           gtliketxt.write(" ")
           gtliketxt.write(str(flux_err))
           gtliketxt.write("\n")
           gtliketxt.write("Spectral Index = ")
           gtliketxt.write(str(gamma))
           gtliketxt.write(" ")
           gtliketxt.write(str(cov_gg))
           gtsedtxt.write(" ")
           gtsedtxt.write(str(freq))
           gtsedtxt.write(" | 0 ")
           gtsedtxt.write(" | ")
           gtsedtxt.write(str(nufnu))
           gtsedtxt.write(" | ")
           gtsedtxt.write(str(b))
           gtsedtxt.write(" | ")
           gtsedtxt.write(str(date_start))
           gtsedtxt.write(" | ")
           gtsedtxt.write(str(date_stop))
           gtsedtxt.write(" | ")
示例#23
0
if not irfsType == 'CALDB':
    my_apps.diffResps['evfile'] = filteredLATFile
    my_apps.diffResps['scfile'] = spacecraftFile
    my_apps.diffResps['srcmdl'] = modelFile
    my_apps.diffResps['irfs'] = irfsType
    my_apps.diffResps.run()

# Run the Likelihood Analysis
import pyLikelihood
from UnbinnedAnalysis import *
obs = UnbinnedObs(filteredLATFile,
                  spacecraftFile,
                  expMap=expMapFile,
                  expCube=ltCubeFile,
                  irfs=irfsType)
like = UnbinnedAnalysis(obs, modelFile, optimizer='Minuit')

# Analysis Complete
print "################ Analysis Complete ################"
print obs
print like
print "###################################################"

# Some plots!

like.tol
like.tolType
like.tol = 0.0001
likeobj = pyLike.Minuit(like.logLike)
like.fit(verbosity=0, covar=True,
         optObject=likeobj)  # Warning: This takes VERY long ~ 30 minutes