def initAltFit(self,opt="MINUIT"): """Initiallizes a minuit optimizer to use as a backup to the DRM optimizer. This function is used internally in the fitDRM function so you probably will never use it. You need to run makeObs before you run this function. If it hasn't been run, this function will exit.""" try: self.obs except AttributeError: self.logger.critical("Obs object does not exist. Create it first with the makeObs function") return try: checkForFiles(self.logger,[self.likelihoodConf['model']]) if(self.commonConf['binned']): self.ALTFIT = BinnedAnalysis(self.obs,self.likelihoodConf['model'],optimizer=opt) else: self.ALTFIT = UnbinnedAnalysis(self.obs,self.likelihoodConf['model'],optimizer=opt) self.ALTFIT.tol = float(self.likelihoodConf['drmtol']) self.ALTFITobj = pyLike.Minuit(self.ALTFIT.logLike) self.logger.info(self.ret.subn(', ',str(self.ALTFIT))[0]) except(FileNotFound): self.logger.critical("One or more needed files do not exist") return
def loadUnbinnedObs(self, f, verbosity=0): if verbosity: print 'Loading unbinned observation:',f['ft1'] obs = UA.UnbinnedObs(eventFile=f['ft1'], scFile=f['ft2'], expMap=f['emap'],expCube=f['ecube'], irfs=f['irfs']) like = UA.UnbinnedAnalysis(obs, srcModel=self.model, optimizer=self.optimizer) return [ obs, like ]
def unBinnedlikeFit2(self, optmz, write): """ Computig a second likelihood fit needed to compute the SED""" global like2 like2 = UnbinnedAnalysis(obs,str(self.src)+"_model_likehoodFit1.xml" ,optimizer=optmz) like2.tol = 1e-8 like2obj = pyLike.NewMinuit(like2.logLike) like2.fit(verbosity=1,covar=True,optObject=like2obj) if write: like2.logLike.writeXml(str(self.src)+"_model_likehoodFit2.xml")
def main(NAME, RA, DEC, EMIN, EMAX, SC, ROIfile, xmlmodelname, expmap, expcube): obs = UnbinnedObs(ROIfile, SC, expMap=expmap, expCube=expcube, irfs='P7REP_SOURCE_V15') like1 = UnbinnedAnalysis(obs, xmlmodelname, optimizer='MINUIT') like1.fit(verbosity=0) ul = UpperLimits(like1) UL = ul[NAME].compute(emin=float(EMIN), emax=float(EMAX)) results_ul = UL[1] * 1E-9 err = 0 print "'UL': ", results_ul
def _new_log_like(self, event_file): new_obs = FastUnbinnedObs(event_file, self._orig_log_like.observation) # Create empty XML to trick UnbinnedAnalysis in not reading up any source. # We will then add the source that have been already loaded in the original likelihood object. # This saves a lot because sources like the Galactic template do not need to be reloaded again from disk with open("__empty_xml.xml", "w+") as f: f.write('<source_library title="source library"></source_library>') # Load pyLike (we use DRMNFB because it is fast, much faster than Minuit, and we do not care about the errors) new_like = UnbinnedAnalysis.UnbinnedAnalysis(new_obs, "__empty_xml.xml", optimizer=self._optimizer) # Now load the sources from the other object for source_name in self._orig_log_like.sourceNames(): if source_name[-1] == 'e' and source_name.find("Template") < 0: # Extended source, jump it (we didn't compute gtdiffrsp because it crashes) continue new_like.addSource(self._orig_log_like.logLike.source(source_name)) return new_like
def set_model(self, likelihoodModel, source_name=None): """ Set the model to be used in the joint minimization. Must be a LikelihoodModel instance. This method can also set or override a previously set source name. """ # with suppress_stdout(): if self._source_name is not None: if (source_name is not None) and (source_name != self._source_name): log.warning('Changing target source from %s to %s' % (self._source_name, source_name)) self._source_name = source_name assert self._source_name in likelihoodModel.point_sources, ( 'Source %s is not a source in the likelihood model! ' % self._source_name) self.lmc = LikelihoodModelConverter(likelihoodModel, self.irf, source_name=self._source_name) self.lmc.setFileSpectrumEnergies(self.emin, self.emax, self.Nenergies) xmlFile = str("%s.xml" % get_random_unique_name()) temp_files = self.lmc.writeXml(xmlFile, self.ra, self.dec, self.rad) if self.kind == "BINNED": self.like = BinnedAnalysis.BinnedAnalysis(self.obs, xmlFile, optimizer="DRMNFB") else: #import pdb;pdb.set_trace() self.like = UnbinnedAnalysis.UnbinnedAnalysis(self.obs, xmlFile, optimizer="DRMNFB") self.likelihoodModel = likelihoodModel # Here we need also to compute the logLike value, so that the model # in the XML file will be chanded if needed dumb = self.get_log_like() # Since now the Galactic template is in RAM, we can remove the temporary file os.remove(self.lmc._unique_filename) os.remove(xmlFile) # Delete temporary spectral files for temp_file in temp_files: os.remove(temp_file) # Build the list of the nuisance parameters new_nuisance_parameters = self._setNuisanceParameters() self.update_nuisance_parameters(new_nuisance_parameters)
def __init__(self, ft1, ft2, expmap, ltcube, xml_file, path_of_tar_file_with_simulated_ft1_files, tsmap_spec=None, srcname='GRB'): # Process the simulations applying the same cuts as in the data file sp = SimulationProcessor(ft1, ft2, path_of_tar_file_with_simulated_ft1_files) ra_center, dec_center, radius = sp.roi # Now create the likelihood object obs = MyUnbinnedObs(ft1, ft2, expMap=expmap, expCube=ltcube) like = UnbinnedAnalysis.UnbinnedAnalysis(obs, xml_file, "MINUIT") fast_ts = FastTS(like, ts_map_spec=tsmap_spec, target_source=srcname) # Get the TSs self._tss = fast_ts.process_ft1s(sp.processed_ft1s, ra_center=ra_center, dec_center=dec_center)
def initDRM(self): """Initializes the DRM optimizer (either binned or unbinned). This is usually the second function that you run when using this module. You need to run makeObs before you run this function. If it hasn't been run, this function will exit.""" try: self.obs except AttributeError: self.logger.critical( "Obs object does not exist. Create it first with the makeObs function" ) return try: qU.checkForFiles(self.logger, [self.likelihoodConf['model']]) if (self.commonConf['binned']): self.DRM = BAn.BinnedAnalysis(self.obs, self.likelihoodConf['model'], optimizer="DRMNGB") else: self.DRM = UbAn.UnbinnedAnalysis(self.obs, self.likelihoodConf['model'], optimizer="DRMNGB") self.DRM.tol = float(self.likelihoodConf['drmtol']) self.logger.info(self.ret.subn(', ', str(self.DRM))[0]) except (qU.FileNotFound): self.logger.critical("One or more needed files do not exist") return
def initAltFit(self, opt="MINUIT"): """Initiallizes a minuit optimizer to use as a backup to the DRM optimizer. This function is used internally in the fitDRM function so you probably will never use it. You need to run makeObs before you run this function. If it hasn't been run, this function will exit.""" try: self.obs except AttributeError: self.logger.critical( "Obs object does not exist. Create it first with the makeObs function" ) return try: qU.checkForFiles(self.logger, [self.likelihoodConf['model']]) if (self.commonConf['binned']): self.ALTFIT = BAn.BinnedAnalysis(self.obs, self.likelihoodConf['model'], optimizer=opt) else: self.ALTFIT = UbAn.UnbinnedAnalysis( self.obs, self.likelihoodConf['model'], optimizer=opt) self.ALTFIT.tol = float(self.likelihoodConf['drmtol']) self.ALTFITobj = pyLike.Minuit(self.ALTFIT.logLike) self.logger.info(self.ret.subn(', ', str(self.ALTFIT))[0]) except (qU.FileNotFound): self.logger.critical("One or more needed files do not exist") return
def run(Name, Ra, Dec, minEnergy, maxEnergy, SCFile, ulTS, NMtol,evclass,model): if evclass == 512: irf = "P8R2_ULTRACLEAN_V6" elif evclass == 128: irf = "P8R2_SOURCE_V6" elif evclass == 256: irf = "P8R2_CLEAN_V6" elif evclass == 1024: irf = "P8R2_ULTRACLEANVETO_V6" print "This is multiUBLike.\nPlease make sure that you have added a model for your source with the name: " + str(Name) print "Calcualting the diffuse response for photons in this bin." my_apps.diffResps['evfile'] = Name + '_gtmktime.fits' my_apps.diffResps['scfile'] = SCFile my_apps.diffResps['srcmdl'] = model my_apps.diffResps['irfs'] = irf my_apps.diffResps.run() print "Finished calculating diffuse response. Now moving to conduct a UNBINNED likelihood analysis." obs = UnbinnedObs(Name + '_gtmktime.fits', SCFile,expMap= Name + '_expMap.fits',expCube= Name + '_ltcube.fits', irfs=irf) analysis = UnbinnedAnalysis(obs,model,optimizer='NewMinuit') likeObj = pyLike.NewMinuit(analysis.logLike) analysis.tol = NMtol lkl = analysis.fit(verbosity=0,covar=True,optObject=likeObj) analysis.writeXml( Name + '_output_model.xml') fit = likeObj.getRetCode() print "Likelihood has converged whith Code " + str(likeObj.getRetCode()) multiLike.printResults(analysis,Name, minEnergy,maxEnergy) print "Fit has likelihood: " + str(lkl) print "\nThe TS is below the threshold, calculating 95% confidence-level Bayesian upper limit." limit=0#,results = IUL.calc_int(analysis,Name,cl=0.95,emin=minEnergy, emax=maxEnergy) print "Bayesian upper limit: " + str(limit) + " photons/cm^2/s" #Calls for the prefactor/err, index/err, and scale N0 = analysis.model[Name].funcs['Spectrum'].getParam('Prefactor').value() N0_err = analysis.model[Name].funcs['Spectrum'].getParam('Prefactor').error() gamma = analysis.model[Name].funcs['Spectrum'].getParam('Index').value() gamma_err = analysis.model[Name].funcs['Spectrum'].getParam('Index').error() E0 = analysis.model[Name].funcs['Spectrum'].getParam('Scale').value() #Array that returns the results of the unbinned analysis #log-likelihood,flux,flux_err,test statisitc Return = [lkl,analysis.flux(Name, emin=minEnergy, emax=maxEnergy),analysis.fluxError(Name, emin=minEnergy, emax=maxEnergy),limit,analysis.Ts(Name,reoptimize=False),N0,N0_err,gamma,gamma_err,E0] return Return
def runFit(basename,directory,model,irfs,srcName,Emin,Emax): """ Performs the fit using maximum likelihood estimation. """ evtfile = directory + "/" + basename + "_filtered_gti.fits" SC = basename + "_SC.fits" expmap = directory + "/" + basename + "_expMap.fits" ltcube = directory + "/" + basename + "_ltcube.fits" obs = UnbinnedObs(eventFile=evtfile, scFile=SC, expMap=expmap, expCube=ltcube, irfs=irfs) analysis = UnbinnedAnalysis(obs, srcModel=model, optimizer='NEWMINUIT') likeObj = pyLike.NewMinuit(analysis.logLike) analysis.fit(verbosity=0,covar=True,optObject=likeObj) num = int(directory[directory.find('bin')+3:]) return "{:8d}{:14.4e}{:14.4e}{:10.4f}{:12.4f}{:12.2f}".format(num, \ analysis.flux(srcName,emin=100,emax=300000), \ analysis.fluxError(srcName,emin=100,emax=300000), \ analysis.model[srcName].funcs['Spectrum'].getParam('Index').value(), \ analysis.model[srcName].funcs['Spectrum'].getParam('Index').error(), \ analysis.Ts(srcName))
def initMIN(self, useBadFit=False): """Initiallizes a New Minuit optimizer to use as a backup to the DRM optimizer. This is usually run after you have initially run fitDRM and created a <basename>_likeDRM.xml model file which is used a seed for the New Minuit optimizer. You can skip the DRM process if you like but you need to have the proper model file (<basename>_likeDRM.xml) present in the working directory. You need to run makeObs before you run this function. If it hasn't been run, this function will exit. If you want to use the non convergant fit from fitDRM, set useBadFit to True.""" try: self.obs except AttributeError: self.logger.critical("Obs object does not exist. Create it first with the makeObs function.") return if(useBadFit): model = self.commonConf['base']+'_badDRMFit.xml' else: model = self.commonConf['base']+'_likeDRM.xml' try: checkForFiles(self.logger,[model]) if(self.commonConf['binned']): self.MIN = BinnedAnalysis(self.obs,model,optimizer='NewMinuit') else: self.MIN = UnbinnedAnalysis(self.obs,model,optimizer='NewMinuit') self.MIN.tol = float(self.likelihoodConf['mintol']) self.MINobj = pyLike.NewMinuit(self.MIN.logLike) self.logger.info(self.ret.subn(', ',str(self.MIN))[0]) except(FileNotFound): self.logger.critical("One or more needed files do not exist") return
def initDRM(self): """Initializes the DRM optimizer (either binned or unbinned). This is usually the second function that you run when using this module. You need to run makeObs before you run this function. If it hasn't been run, this function will exit.""" try: self.obs except AttributeError: self.logger.critical("Obs object does not exist. Create it first with the makeObs function") return try: checkForFiles(self.logger,[self.likelihoodConf['model']]) if(self.commonConf['binned']): self.DRM = BinnedAnalysis(self.obs,self.likelihoodConf['model'],optimizer="DRMNGB") else: self.DRM = UnbinnedAnalysis(self.obs,self.likelihoodConf['model'],optimizer="DRMNGB") self.DRM.tol = float(self.likelihoodConf['drmtol']) self.logger.info(self.ret.subn(', ',str(self.DRM))[0]) except(FileNotFound): self.logger.critical("One or more needed files do not exist") return
def set_model(self, likelihoodModel): """ Set the model to be used in the joint minimization. Must be a LikelihoodModel instance. """ with suppress_stdout(): self.lmc = LikelihoodModelConverter(likelihoodModel, self.irf) self.lmc.setFileSpectrumEnergies(self.emin, self.emax, self.Nenergies) xmlFile = str("%s.xml" % get_random_unique_name()) temp_files = self.lmc.writeXml(xmlFile, self.ra, self.dec, self.rad) if self.kind == "BINNED": self.like = BinnedAnalysis.BinnedAnalysis(self.obs, xmlFile, optimizer="DRMNFB") else: self.like = UnbinnedAnalysis.UnbinnedAnalysis(self.obs, xmlFile, optimizer="DRMNFB") self.likelihoodModel = likelihoodModel # Here we need also to compute the logLike value, so that the model # in the XML file will be chanded if needed dumb = self.get_log_like() # Since now the Galactic template is in RAM, we can remove the temporary file os.remove(self.lmc._unique_filename) os.remove(xmlFile) # Delete temporary spectral files for temp_file in temp_files: os.remove(temp_file) # Build the list of the nuisance parameters new_nuisance_parameters = self._setNuisanceParameters() self.update_nuisance_parameters(new_nuisance_parameters)
def initMIN(self, useBadFit=False, modelFile="", useEdisp=False): """Initiallizes a New Minuit optimizer to use as a backup to the DRM optimizer. This is usually run after you have initially run fitDRM and created a <basename>_likeDRM.xml model file which is used a seed for the New Minuit optimizer. You can skip the DRM process if you like but you need to have the proper model file (<basename>_likeDRM.xml) present in the working directory. You need to run makeObs before you run this function. If it hasn't been run, this function will exit. If you want to use the non convergant fit from fitDRM, set useBadFit to True. You can also pass a custom model file name via the modelFile parameter.""" try: self.obs except AttributeError: self.logger.critical( "Obs object does not exist. Create it first with the makeObs function." ) return if (useBadFit): model = self.commonConf['base'] + '_badDRMFit.xml' else: model = self.commonConf['base'] + '_likeDRM.xml' if (modelFile): model = modelFile try: qU.checkForFiles(self.logger, [model]) if (self.commonConf['binned']): self.MIN = BAn.BinnedAnalysis(self.obs, model, optimizer='NewMinuit') else: self.MIN = UbAn.UnbinnedAnalysis(self.obs, model, optimizer='NewMinuit') self.MIN.tol = float(self.likelihoodConf['mintol']) self.MINobj = pyLike.NewMinuit(self.MIN.logLike) self.pristine = LikelihoodState(self.MIN) self.logger.info(self.ret.subn(', ', str(self.MIN))[0]) if (useEdisp): self.MIN.logLike.set_edisp_flag(useEdisp) except (qU.FileNotFound): self.logger.critical("One or more needed files do not exist") return
def unBinnedlikeFit1(self, optmz, emin, emax, plot, write, analysisType): """Computing a first likelihood fit.""" # global obs # obs = UnbinnedObs(self.gtiFile, self.scFile, expMap=self.expmapFile, expCube=self.ltcubeFile, irfs=self.irfs) like1 = UnbinnedAnalysis(obs,self.model,optimizer=optmz) like1.tol = 0.1 # like1.setEnergyRange(emin, emax) like1.fit(verbosity=1) if plot: like1.plot() if write: like1.logLike.writeXml(str(self.src)+"_model_likehoodFit1.xml")
def makeObs(self): """Creates either a binned or unbinned observation object for use in the likelihood analysis. This function checks for all of the needed files first. If you do not have a needed file, see the quickAnalysis module for creation. This function should be run before any of the init or fit functions.""" if (self.commonConf['binned']): try: qU.checkForFiles(self.logger, [ self.commonConf['base'] + '_srcMaps.fits', self.commonConf['base'] + '_ltcube.fits', self.commonConf['base'] + '_BinnedExpMap.fits' ]) self.obs = BAn.BinnedObs( srcMaps=self.commonConf['base'] + '_srcMaps.fits', expCube=self.commonConf['base'] + '_ltcube.fits', binnedExpMap=self.commonConf['base'] + '_BinnedExpMap.fits', irfs=self.commonConf['irfs']) except (qU.FileNotFound): self.logger.critical("One or more needed files do not exist") sys.exit() else: try: qU.checkForFiles(self.logger, [ self.commonConf['base'] + '_filtered_gti.fits', self.commonConf['base'] + '_SC.fits', self.commonConf['base'] + '_expMap.fits', self.commonConf['base'] + '_ltcube.fits' ]) self.obs = UbAn.UnbinnedObs( self.commonConf['base'] + '_filtered_gti.fits', self.commonConf['base'] + '_SC.fits', expMap=self.commonConf['base'] + '_expMap.fits', expCube=self.commonConf['base'] + '_ltcube.fits', irfs=self.commonConf['irfs']) except (qU.FileNotFound): self.logger.critical("One or more needed files do not exist") sys.exit() self.logger.info(self.ret.subn(', ', str(self.obs))[0])
def _new_log_like(self, event_file): new_obs = FastUnbinnedObs(event_file, self._orig_log_like.observation) # Create empty XML to trick UnbinnedAnalysis in not reading up any source. # We will then add the source that have been already loaded in the original likelihood object. # This saves a lot because sources like the Galactic template do not need to be reloaded again from disk with open("__empty_xml.xml", "w+") as f: f.write('<source_library title="source library"></source_library>') # Load pyLike new_like = UnbinnedAnalysis.UnbinnedAnalysis(new_obs, "__empty_xml.xml", optimizer="MINUIT") # Now load the sources from the other object for source_name in self._sources_to_keep: #print("Adding %s" % source_name) new_like.addSource(self._orig_log_like.logLike.source(source_name)) return new_like
# Each event must have a separate response precomputed for each diffuse component in the source model. The precomputed responses for Pass 7 (V6) data are for the gll_iem_v05, iso_source_v05.txt, and iso_clean_05.txt diffuse models. os.system('cp '+filteredLATFile+' '+filteredLATFile_withDiffResps) #Make a copy of the filteredLATFile my_apps.diffResps['evfile']=filteredLATFile_withDiffResps my_apps.diffResps['scfile']=spacecraftFile my_apps.diffResps['srcmdl']=modelFile my_apps.diffResps['irfs']=irfsType my_apps.diffResps.run() # Run the Likelihood Analysis print colors.OKBLUE+"Performing Liklihood Analysis"+colors.ENDC import pyLikelihood from UnbinnedAnalysis import * obs = UnbinnedObs(filteredLATFile_withDiffResps,spacecraftFile,expMap=expMapFile,expCube=ltCubeFile,irfs=irfsType) like = UnbinnedAnalysis(obs,modelFile,optimizer=optimizerType) # Cuts Complete print colors.OKGREEN+"################ Analysis Complete ################" print obs print like print "###################################################"+colors.ENDC ################################ ###### Adjust Source Model ##### ################################ like.tol like.tolType like.tol = 0.0001 if optimizerType=='Minuit':
# Find expomap expmaps = glob.glob("%s_expomap.fit*" % root_name) assert len(expmaps) == 1, "Couldn't find exopmap" expmap = expmaps[0] # Find XML model output of gtdolike xmls = glob.glob("%s_likeRes.xml" % root_name) assert len(xmls) == 1, "Couldn't find XML" xml_res = xmls[0] obs = UnbinnedAnalysis.UnbinnedObs(filteredeventfile, dataset['ft2file'], expMap=expmap, expCube=ltcube) like = UnbinnedAnalysis.UnbinnedAnalysis(obs, xml_res, 'MINUIT') ftm = FastTSMap(like) (bestra, bestdec), maxTS = ftm.search_for_maximum(args.ra, args.dec, float(half_size), int(n_side), verbose=False) #Now append the results for this interval grb = filter(lambda x:x.name.find("GRB")>=0,sources)[0] if args.tsmap_spec is not None: if maxTS > grb.TS: print("\n\n=========================================") print(" Fast TS Map has found a better position")
def runLikelihood(subdir, tpl_file): '''This runction runs the likelihood code on a set of pixels in a subdirectory. It takes as input the subdirectory to work on and a template counts map. It reads it's configuration from a pickle file (par.pck) that should be located in the subdirectory and the pixel locations from another pickle file (pixel.pck). It then creats an overall likelihood object, does a quick global fit and then loops over the pixels. At each pixel, it creats a test source, fits that source, calculates the TS of the source and writes the results to an output file in the subdirectory called ts_results.dat.''' parfile = open("par.pck", "r") pars = pickle.load(parfile) pixelfile = open("pixel.pck", "r") pixels = pickle.load(pixelfile) pixel_coords = PixelCoords(tpl_file) obs = UnbinnedObs(resolve_fits_files(pars['evfile']), resolve_fits_files(pars['scfile']), expMap='../'+pars['expmap'], expCube='../'+pars['expcube'], irfs=pars['irfs']) like = UnbinnedAnalysis(obs, '../'+pars['srcmdl'], pars['optimizer']) like.setFitTolType(pars['toltype']) like.optimize(0) loglike0 = like() test_src = getPointSource(like) target_name = 'testSource' test_src.setName(target_name) outfile = 'ts_results.dat' finished_pixels = [] if os.path.isfile(outfile): input = open(outfile, 'r') for line in input: tokens = line.strip().split() ij = int(tokens[0]), int(tokens[1]) finished_pixels.append(ij) input.close() output = open(outfile, 'a') for indx, i, j in pixels: if (i, j) in finished_pixels: continue ra, dec = pixel_coords(i, j) test_src.setDir(ra, dec, True, False) like.addSource(test_src) like.optimize(0) ts = -2*(like() - loglike0) output.write("%3i %3i %.3f %.3f %.5f\n" % (i, j, ra, dec, ts)) output.flush() like.deleteSource(target_name) output.close()
def runFermiTools(Name, RA, DEC, minEnergy, maxEnergy, SCFile, radius, binsz, TSTART, TSTOP, Evfile, bins, zmax, evclass, evtype, TSul, NMtol, lc_bin_num, runMRM): print "Working on bin " + str(lc_bin_num) + " for the light curve." f = FermiObject() """ Following steps execute Fermi Tool gtselect """ print('\nWorking on file.') print('Cutting file to fit desired parameters . . .\n') f._setEvclass(evclass) f._setEvtype(evtype) f._setRa(RA) f._setDec(DEC) f._setRad(radius) f._setEmin(minEnergy) f._setEmax(maxEnergy) f._setZmax(zmax) f._setTmin(TSTART) f._setTmax(TSTOP) f._setInfile(Evfile) f._setOutfile(Name + '_gtselect' + str(lc_bin_num) + '_lc.fits') f.amonSelect() print( 'File cuts have been made. Now making cuts for GTI using spacecraft file.' ) """ Following steps execute Fermi Tool gtmktime """ f._setScfile(SCFile) f._setRoicut('no') f._setEvfile(Name + '_gtselect' + str(lc_bin_num) + '_lc.fits') f._setOutfile(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits') ############################################### # Filter expression # Filter = '(DATA_QUAL>0)&&(LAT_CONFIG==1)' ############################################### f._setFilter(Filter) print('Working on file ' + str(f.getOutfile()) + '. . .') f.amonTime() print('File cuts have been made.') print('Using XML model from whole dataset.\n Moving on to gtltcube.') print "Now working on ltcube file using gtltcube\n" my_apps.expCube['evfile'] = Name + '_gtmktime' + str( lc_bin_num) + '_lc.fits' my_apps.expCube['scfile'] = SCFile my_apps.expCube['outfile'] = Name + '_ltcube' + str( lc_bin_num) + '_lc.fits' my_apps.expCube['dcostheta'] = 0.025 my_apps.expCube['binsz'] = 1 my_apps.expCube['phibins'] = 0 my_apps.expCube['zmax'] = zmax my_apps.expCube['chatter'] = 0 my_apps.expCube.run() print "\nltcube complete.\nMoving to compute exposure map with gtexpmap.\n" my_apps.expMap['evfile'] = Name + '_gtmktime' + str( lc_bin_num) + '_lc.fits' my_apps.expMap['scfile'] = SCFile my_apps.expMap['expcube'] = Name + '_ltcube' + str(lc_bin_num) + '_lc.fits' my_apps.expMap['outfile'] = Name + '_expMap' + str(lc_bin_num) + '_lc.fits' my_apps.expMap['irfs'] = 'CALDB' my_apps.expMap['srcrad'] = radius + 10 my_apps.expMap['nlong'] = 4 * (radius + 10) my_apps.expMap['nlat'] = 4 * (radius + 10) ebin = int(10 * log10(maxEnergy / minEnergy)) print "There are " + str(ebin) + " energy bans." my_apps.expMap['nenergies'] = ebin my_apps.expMap.run() print "Finnished making exposure map.\n" print "Calcualting the diffuse response for photons in this bin." my_apps.diffResps['evfile'] = Name + '_gtmktime' + str( lc_bin_num) + '_lc.fits' my_apps.diffResps['scfile'] = SCFile my_apps.diffResps['srcmdl'] = Name + '_output_model.xml' my_apps.diffResps['irfs'] = 'CALDB' my_apps.diffResps.run() print "Finished calculating diffuse response. Now moving to conduct a UNBINNED likelihood analysis." obs = UnbinnedObs(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits', SCFile, expMap=Name + '_expMap' + str(lc_bin_num) + '_lc.fits', expCube=Name + '_ltcube' + str(lc_bin_num) + '_lc.fits', irfs='P8R2_SOURCE_V6') analysis = UnbinnedAnalysis(obs, Name + '_output_model.xml', optimizer='NewMinuit') likeObj = pyLike.NewMinuit(analysis.logLike) analysis.tol = NMtol LIKE = analysis.fit(verbosity=0, covar=True, optObject=likeObj) fit = likeObj.getRetCode() print "Likelihood has converged whith Code " + str(likeObj.getRetCode()) Flux = analysis.flux(Name, emin=minEnergy, emax=maxEnergy) Ferr = analysis.fluxError(Name, emin=minEnergy, emax=maxEnergy) MeVtoErg = 1.602e-6 ef = analysis.energyFlux(Name, minEnergy, maxEnergy) * MeVtoErg ef_err = analysis.energyFluxError(Name, minEnergy, maxEnergy) * MeVtoErg UL = False TSUM = TSTART + TSTOP TMID = TSUM / 2 limit = Flux if analysis.Ts(Name) < TSul: UL = True limit, results = IUL.calc_int(analysis, Name, cl=0.90, emin=minEnergy, emax=maxEnergy) #Do second likelihood with constant flux to calculate the TS variability obsC = UnbinnedObs(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits', SCFile, expMap=Name + '_expMap' + str(lc_bin_num) + '_lc.fits', expCube=Name + '_ltcube' + str(lc_bin_num) + '_lc.fits', irfs='P8R2_SOURCE_V6') analysisC = UnbinnedAnalysis(obsC, Name + '_var_model.xml', optimizer='NewMinuit') likeObjC = pyLike.NewMinuit(analysisC.logLike) analysisC.tol = NMtol LIKEC = analysisC.fit(verbosity=0, covar=True, optObject=likeObjC) #Run gtselect to make smaller data fits file to compute the exposure, set to 3 degrees around source of interest f._setRad(3) f._setInfile(Evfile) f._setOutfile(Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits') print "Creating file " + Name + "_gtselect_exposure.fits" f.amonSelect() #Run gtmaketime on this small region f._setEvfile(Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits') f._setOutfile(Name + '_gtmktime' + str(lc_bin_num) + '_exposure.fits') print('Working on file ' + str(f.getOutfile())) f.amonTime() my_apps.evtbin['algorithm'] = 'LC' my_apps.evtbin['evfile'] = f.getOutfile() my_apps.evtbin['outfile'] = Name + '_LC' + str( lc_bin_num) + '_exposure.fits' my_apps.evtbin['scfile'] = f.getScfile() my_apps.evtbin['tbinalg'] = 'LIN' my_apps.evtbin['tstart'] = f.getTmin() my_apps.evtbin['tstop'] = f.getTmax() my_apps.evtbin['dtime'] = TSTOP - TSTART my_apps.evtbin.run() yes = subprocess.call([ 'gtexposure', Name + '_LC' + str(lc_bin_num) + '_exposure.fits', f.getScfile(), 'P8R2_SOURCE_V6', Name + '_output_model.xml', Name ]) if yes == 0: print "Exposure map has been created" else: print "Subprocessing failed. Unable to create exposure map with gtexposure." print "Time bin complete." hdulist = pyfits.open(Name + '_LC' + str(lc_bin_num) + '_exposure.fits') tbdata = hdulist[1].data z = tbdata['EXPOSURE'] exp = z[0] ################################################################ # This portion prints to the text file # ################################################################ f = open("lc_output.txt", "a") f.write( str(Flux) + ',' + str(Ferr) + ',' + str(ef) + ',' + str(ef_err) + ',' + str(limit) + ',' + str(analysis.Ts(Name)) + ',' + str(UL) + ',' + str(TMID) + ',' + str(exp) + ',' + str(LIKE) + ',' + str(LIKEC) + '\n') f.close() print "Likelihood analysis on this band is complete." yes = subprocess.call([ 'rm', Name + '_gtselect' + str(lc_bin_num) + '_lc.fits', Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits', Name + '_cmap' + str(lc_bin_num) + '_lc.fits', Name + '_ccube' + str(lc_bin_num) + '_lc.fits', Name + '_ltcube' + str(lc_bin_num) + '_lc.fits', Name + '_expMap' + str(lc_bin_num) + '_lc.fits', Name + '_LC' + str(lc_bin_num) + '_exposure.fits', Name + '_srcmaps' + str(lc_bin_num) + '_lc.fits', Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits', Name + '_gtmktime' + str(lc_bin_num) + '_exposure.fits' ]) if yes == 0: print 'Files for bin have been deleted' else: print "Subprocessing failed. Unable to delete files for bin."
def __init__( self, name, eventFile, ft2File, livetimeCube, kind, exposureMap=None, sourceMaps=None, binnedExpoMap=None, source_name=None, ): # Initially the nuisance parameters dict is empty, as we don't know yet # the likelihood model. They will be updated in set_model super(FermiLATLike, self).__init__(name, {}) # Read the ROI cut cc = pyLike.RoiCuts() cc.readCuts(eventFile, "EVENTS") self.ra, self.dec, self.rad = cc.roiCone() # Read the IRF selection c = pyLike.Cuts(eventFile, "EVENTS") self.irf = c.CALDB_implied_irfs() self.ft2File = ft2File self.livetimeCube = livetimeCube # These are the boundaries and the number of energies for the computation # of the model (in keV) self.emin = 1e4 self.emax = 5e8 self.Nenergies = 200 # This is the limit on the effective area correction factor, # which is a multiplicative factor in front of the whole model # to account for inter-calibration issues. By default it can vary # by 10%. This can be changed by issuing: # FermiLATUnbinnedLikeInstance.effCorrLimit = [new limit] # where for example a [new limit] of 0.2 allow for an effective # area correction up to +/- 20 % self.effCorrLimit = 0.1 if kind.upper() != "UNBINNED" and kind.upper() != "BINNED": raise RuntimeError("Accepted values for the kind parameter are: " + "binned, unbinned. You specified: %s" % (kind)) else: self.kind = kind.upper() if kind.upper() == "UNBINNED": assert exposureMap is not None, "You have to provide an exposure map" self.eventFile = eventFile self.exposureMap = exposureMap # Read the files and generate the pyLikelihood object self.obs = UnbinnedAnalysis.UnbinnedObs( self.eventFile, self.ft2File, expMap=self.exposureMap, expCube=self.livetimeCube, irfs=self.irf, ) elif kind.upper() == "BINNED": assert sourceMaps is not None, "You have to provide a source map" assert ( binnedExpoMap is not None), "You have to provided a (binned) exposure map" self.sourceMaps = sourceMaps self.binnedExpoMap = binnedExpoMap self.obs = BinnedAnalysis.BinnedObs( srcMaps=self.sourceMaps, expCube=self.livetimeCube, binnedExpMap=self.binnedExpoMap, irfs=self.irf, ) pass # Activate inner minimization by default self.setInnerMinimization(True) self._source_name = source_name
class quickLike: """ This is the base class. A usual likelihood analysis will consists of running the following functions (assuming you have a configuration file): * qL = quickLike('MySource', True) * qL.makeObs() * qL.initDRM() * qL.fitDRM() * qL.initMIN() * qL.fitMIN() This will set up all of the objects needed for the analysis and do an initial fit with one of the DRM optimizers. It'll save these results and use them for the second fit with one of the Minuit optimizers. If you do not have a configuration file, you'll need to input all of the options for this module when you create the quickLike object (see the various options below). You can create a configuration file by executing writeConfig(). * qL.writeConfig() This module will catch any failures from the optimizers and will report them to the user. There are a few functions that are useful to use in this case:""" def __init__(self, base = 'MySource', configFile = False, likelihoodConfig = {"model" : "MySource_model.xml", "sourcename" : "Source Name", "drmtol" : 0.1, "mintol" : 1e-4}, commonConfig = {"base" : 'MySource', "eventclass" : 2, "binned" : False, "irfs" : "P7SOURCE_V6", "verbosity" : 0}): commonConfig['base'] = base self.logger = initLogger(base, 'quickLike') if(configFile): try: commonConfigRead,analysisConfigRead,likelihoodConfigRead,plotConfigRead = readConfig(self.logger,base) except(FileNotFound): self.logger.critical("One or more needed files do not exist") return try: commonConfig = checkConfig(self.logger,commonConfig,commonConfigRead) except(KeyError): return try: likelihoodConfig = checkConfig(self.logger,likelihoodConfig,likelihoodConfigRead) except(KeyError): return self.commonConf = commonConfig self.likelihoodConf = likelihoodConfig self.ret = re.compile('\n') self.fitbit = False self.Print() def writeConfig(self): """Writes all of the initialization variables to the config file called <basename>.cfg""" writeConfig(quickLogger=self.logger, commonDictionary=self.commonConf, likelihoodDictionary=self.likelihoodConf) def Print(self): """Prints out information about the various objects to the terminal and to the log file.""" logString = "Created quickLike object: " for variable, value in self.commonConf.iteritems(): logString += variable+"="+str(value)+"," for variable, value in self.likelihoodConf.iteritems(): logString += variable+"="+str(value)+"," self.logger.info(logString) def makeObs(self): """Creates either a binned or unbinned observation object for use in the likelihood analysis. This function checks for all of the needed files first. If you do not have a needed file, see the quickAnalysis module for creation. This function should be run before any of the init or fit functions.""" if(self.commonConf['binned']): try: checkForFiles(self.logger,[self.commonConf['base']+'_srcMaps.fits', self.commonConf['base']+'_ltcube.fits', self.commonConf['base']+'_BinnedExpMap.fits']) self.obs = BinnedObs(srcMaps=self.commonConf['base']+'_srcMaps.fits', expCube=self.commonConf['base']+'_ltcube.fits', binnedExpMap=self.commonConf['base']+'_BinnedExpMap.fits', irfs=self.commonConf['irfs']) except(FileNotFound): self.logger.critical("One or more needed files do not exist") return else: try: checkForFiles(self.logger,[self.commonConf['base']+'_filtered_gti.fits', self.commonConf['base']+'_SC.fits', self.commonConf['base']+'_expMap.fits', self.commonConf['base']+'_ltcube.fits']) self.obs = UnbinnedObs(self.commonConf['base']+'_filtered_gti.fits', self.commonConf['base']+'_SC.fits', expMap=self.commonConf['base']+'_expMap.fits', expCube=self.commonConf['base']+'_ltcube.fits', irfs=self.commonConf['irfs']) except(FileNotFound): self.logger.critical("One or more needed files do not exist") return self.logger.info(self.ret.subn(', ',str(self.obs))[0]) def initDRM(self): """Initializes the DRM optimizer (either binned or unbinned). This is usually the second function that you run when using this module. You need to run makeObs before you run this function. If it hasn't been run, this function will exit.""" try: self.obs except AttributeError: self.logger.critical("Obs object does not exist. Create it first with the makeObs function") return try: checkForFiles(self.logger,[self.likelihoodConf['model']]) if(self.commonConf['binned']): self.DRM = BinnedAnalysis(self.obs,self.likelihoodConf['model'],optimizer="DRMNGB") else: self.DRM = UnbinnedAnalysis(self.obs,self.likelihoodConf['model'],optimizer="DRMNGB") self.DRM.tol = float(self.likelihoodConf['drmtol']) self.logger.info(self.ret.subn(', ',str(self.DRM))[0]) except(FileNotFound): self.logger.critical("One or more needed files do not exist") return def initAltFit(self,opt="MINUIT"): """Initiallizes a minuit optimizer to use as a backup to the DRM optimizer. This function is used internally in the fitDRM function so you probably will never use it. You need to run makeObs before you run this function. If it hasn't been run, this function will exit.""" try: self.obs except AttributeError: self.logger.critical("Obs object does not exist. Create it first with the makeObs function") return try: checkForFiles(self.logger,[self.likelihoodConf['model']]) if(self.commonConf['binned']): self.ALTFIT = BinnedAnalysis(self.obs,self.likelihoodConf['model'],optimizer=opt) else: self.ALTFIT = UnbinnedAnalysis(self.obs,self.likelihoodConf['model'],optimizer=opt) self.ALTFIT.tol = float(self.likelihoodConf['drmtol']) self.ALTFITobj = pyLike.Minuit(self.ALTFIT.logLike) self.logger.info(self.ret.subn(', ',str(self.ALTFIT))[0]) except(FileNotFound): self.logger.critical("One or more needed files do not exist") return def initMIN(self, useBadFit=False): """Initiallizes a New Minuit optimizer to use as a backup to the DRM optimizer. This is usually run after you have initially run fitDRM and created a <basename>_likeDRM.xml model file which is used a seed for the New Minuit optimizer. You can skip the DRM process if you like but you need to have the proper model file (<basename>_likeDRM.xml) present in the working directory. You need to run makeObs before you run this function. If it hasn't been run, this function will exit. If you want to use the non convergant fit from fitDRM, set useBadFit to True.""" try: self.obs except AttributeError: self.logger.critical("Obs object does not exist. Create it first with the makeObs function.") return if(useBadFit): model = self.commonConf['base']+'_badDRMFit.xml' else: model = self.commonConf['base']+'_likeDRM.xml' try: checkForFiles(self.logger,[model]) if(self.commonConf['binned']): self.MIN = BinnedAnalysis(self.obs,model,optimizer='NewMinuit') else: self.MIN = UnbinnedAnalysis(self.obs,model,optimizer='NewMinuit') self.MIN.tol = float(self.likelihoodConf['mintol']) self.MINobj = pyLike.NewMinuit(self.MIN.logLike) self.logger.info(self.ret.subn(', ',str(self.MIN))[0]) except(FileNotFound): self.logger.critical("One or more needed files do not exist") return def fitDRM(self): """Performs a DRM inital fit on your data using the <basename>_model.xml model file. It tries an intial fit and if that fails, tries a tighter tolerance. If that fails, it tries a looser tolerance. If that fails, it tries to do this initial fit with the MINUIT optimizer. If that fails, this function bails. If the fit converges, it saves the results to <basename>_likeDRM.xml which will be used in the NewMinuit fit. If no fit is found, it will save the results to <basename>_badDRMFit.xml. You can use this in the NewMinuit fit if you use the useBadFit option in initMIN. You need to have run initDRM before you run this function.""" try: self.DRM except AttributeError: self.logger.critical("DRM object does not exist. Create it first with the initDRM function.") return altfit=False try: self.DRM.fit(verbosity=int(self.commonConf['verbosity'])) except: self.logger.error("Initial DRM Fit Failed") try: self.logger.info("Trying tighter tolerance (DRMtol*0.1)") self.DRM.tol = float(self.likelihoodConf['drmtol']) * 0.1 self.DRM.fit(verbosity= int(self.commonConf['verbosity'])) except: self.logger.error("Second DRM Fit Failed") try: self.logger.info("Trying looser tolerance (drmtol*10.)") self.DRM.tol = float(self.likelihoodConf['drmtol']) * 10. self.DRM.fit(verbosity= int(self.commonConf['verbosity'])) except: self.logger.error("Third DRM Fit Failed") try: self.logger.info("Trying alternate fit algorithm (MINUIT)") self.initAltFit() self.ALTFIT.fit(verbosity=int(self.commonConf['verbosity']),covar=True,optObject=self.ALTFITobj) print self.ALTFITobj.getQuality() altfit = True except: self.logger.error("Alternative fit algorithm failed, bailing") self.logger.error(self.decodeRetCode('Minuit',self.ALTFITobj.getRetCode())) self.ALTFIT.logLike.writeXml(self.commonConf['base']+'_badDRMFit.xml') self.logger.info("Saved ALTFIT as "+self.commonConf['base']+"_badDRMFit.xml") return if(altfit): self.logger.info("ALTFIT Fit Finished. Total TS: "+str(self.ALTFIT.logLike.value())) self.ALTFIT.logLike.writeXml(self.commonConf['base']+'_likeDRM.xml') self.logger.info("Saved ALTFIT as "+self.commonConf['base']+"_likeDRM.xml") else: self.DRM.logLike.writeXml(self.commonConf['base']+'_likeDRM.xml') self.logger.info("DRM Fit Finished. Total TS: "+str(self.DRM.logLike.value())) self.logger.info("Saved DRM as "+self.commonConf['base']+"_likeDRM.xml") def fitMIN(self): """Does a New Minuit fit on your data based on the model output by the fitDRM function. You need to have run initMIN before running this function. Saves the results to <basename>_likeMIN.xml if there is convergence. If convergence is not found, saves the results to <basename>_badMINFit.xml.""" try: self.MIN except AttributeError: self.logger.critical("MIN object does not exist. Create it first with the initMIN function.") return self.MIN.fit(covar=True, optObject=self.MINobj,verbosity=int(self.commonConf['verbosity'])) self.MIN.logLike.writeXml(self.commonConf['base']+'_likeMinuit.xml') self.logger.info("NEWMINUIT Fit Finished. Total TS: "+str(self.MIN.logLike.value())) self.logger.info("NEWMINUIT Fit Status: "+str(self.MINobj.getRetCode())) self.logger.info("NEWMINUIT fit Distance: "+str(self.MINobj.getDistance())) self.fitbit = True if(self.MINobj.getRetCode() > 0): self.logger.error("NEWMINUIT DID NOT CONVERGE!!!") self.logger.error("The fit failed the following tests: "+self.decodeRetCode('NewMinuit',self.MINobj.getRetCode())) self.MIN.fit(covar=True, optObject=self.MINobj,verbosity=int(self.commonConf['verbosity'])) self.MIN.logLike.writeXml(self.commonConf['base']+'_badMINFit.xml') def printSource(self,source,Emin=100,Emax=300000): """Prints various details for a source in your model.""" try: self.MIN except AttributeError: self.logger.critical("MIN object does not exist. "+\ "Create it first with the initMIN function and then fit it with the fitMIN function.") return if(not self.fitbit): self.logger.warn("Fit isn't current, these values might not be correct. Fun fitMIN first.") logString = source TS = self.MIN.Ts(source) print "TS: ",TS logString += " TS: " + str(TS) NPred = self.MIN.NpredValue(source) print "Npred: ",NPred logString += " NPred: " + str(NPred) flux = self.MIN.flux(source,emin=Emin,emax=Emax) print "Flux: ",flux logString += "Flux: "+str(flux) if(self.fitbit): fluxErr = self.MIN.fluxError(source,emin=Emin,emax=Emax) print "Flux Error: ",fluxErr logString += "Flux Error: "+str(fluxErr) for paramName in self.MIN.model[source].funcs['Spectrum'].paramNames: paramValue = self.MIN.model[source].funcs['Spectrum'].getParam(paramName).value() print paramName,": ",paramValue logString += paramName + ": " + str(paramValue) + " " self.logger.info(logString) def customERange(self,Emin,Emax): """Sets a smaller energy range for the fitting of both the DRM and MIN optimization steps.""" try: self.DRM except AttributeError: self.logger.warn("DRM object doesn't exist. Energy range not modified.") else: self.DRM.setEnergyRange(Emin,Emax) self.logger.info("Set energy range for DRM to "+str(self.DRM.emin)+","+str(self.DRM.emax)) try: self.MIN except AttributeError: self.logger.warn("MIN object doesn't exist. Energy range not modified.") else: self.MIN.setEnergyRange(Emin,Emax) self.logger.info("Set energy range for MIN to "+str(self.MIN.emin)+","+str(self.MIN.emax)) def calcUpper(self,source,Emin=100,Emax=300000): """Calculates an upper limit for a source in your model.""" self.ul = UpperLimits(self.MIN) self.ul[source].compute(emin=Emin,emax=Emax) print self.ul[source].results self.logger.info(source+" UL: "+str(self.ul[source].results[0])) def removeWeak(self,mySource = '',tslimit=0,distlimit=0,RemoveFree=False,RemoveFixed=False): """This function has two main uses: it will print out details on all of the sources in your model and it will remove sources according to different requirements. If you just want to print out details, execute it this way: <obj>.removeWeak(<my_source>) Where <obj> is the quickLike object you're using here and <my_source> is the name of your source of interest. You can then remove some of these sources from the model if you like. For example, if you want to remove all of the fixed sources with TS values less than 1, execute it this way: <obj>.removeWeak(<my_source>,tslimit=1,RemoveFixed=True) You can mix and match any of the options. You could remove all sources (fixed and free) that are below a TS value of 3 and are 10 degrees from your source of interest by executing: <obj>.removeWeak(<my_source>,tslimit=3,distlimit=10,RemoveFree=True,RemoveFixed=True)""" try: self.MIN except AttributeError: self.logger.critical("MIN object does not exist. "+\ "Create it first with the initMIN function and then fit it with the fitMIN function.") return if(not self.fitbit): self.logger.warn("Fit isn't current, these values might not be correct. Run fitMIN first.") if(mySource == ''): mySource = self.likelihoodConf['sourcename'] for name in self.MIN.sourceNames(): remove = False distance = 0 sourceTS = self.MIN.Ts(name) if(self.MIN.model[name].src.getType() == 'Point'): distance = self.MIN._separation(self.MIN.model[mySource].src,self.MIN.model[name].src) if(self.MIN.freePars(name).size() > 0): indexFree = "Free" if( (sourceTS < tslimit) and (distance > distlimit) and RemoveFree ): remove = True else: indexFree = "Fixed" if( (sourceTS < tslimit) and (distance > distlimit) and RemoveFixed ): remove = True if( remove ): self.logger.info("Removing "+name+", TS: "+str(sourceTS)+", Frozen?: "+str(indexFree)+", Distance: "+str(distance)) self.MIN.deleteSource(name) else: self.logger.info("Retaining "+name+", TS: "+str(sourceTS)+", Frozen?: "+str(indexFree)+", Distance: "+str(distance)) def paramsAtLimit(self, limit = 0.1): """This function will print out any sources whoes parameters are close to their limits. You could use this to find sources that are having issues being fit. This function is useful when you're having trouble getting convergence from the New Minuit fit routine. The limit is in percentage difference of a bound. If one of the bounds is zero it uses the value of the parameter to check for closeness (absolute instead of percent differenct). The default is 0.1 (1%) difference for a measure of closeness.""" try: self.MIN except AttributeError: self.logger.critical("MIN object does not exist. "+\ "Create it first with the initMIN function and then fit it with the fitMIN function.") return if(not self.fitbit): self.logger.warn("Fit isn't current, these values might not be correct. Run fitMIN first.") for src in self.MIN.sourceNames(): for name in self.MIN.model[src].funcs['Spectrum'].paramNames: bounds = self.MIN.model[src].funcs['Spectrum'].getParam(name).getBounds() value = self.MIN.model[src].funcs['Spectrum'].getParam(name).value() try: distToLower = abs((value - bounds[0])/bounds[0]) except ZeroDivisionError: distToLower = abs(value) try: distToUpper = abs((value - bounds[1])/bounds[1]) except ZeroDivisionError: distToUpper = abs(value) if( distToLower < limit ): self.logger.error("The "+name+" ("+str(value)+") of "+src+" is close ("\ +str(distToLower)+") to its lower limit ("+str(bounds[0])+")") if( distToUpper < limit): self.logger.error("The "+name+" ("+str(value)+") of "+src+" is close ("\ +str(distToUpper)+") to its upper limit ("+str(bounds[1])+")") def decodeRetCode(self, optimizer, retCode): """Decodes the return codes from the Minuit and New Minuit fit functions. Used in the fitting functions in this module. You'll probably never use this function.""" if(optimizer == 'NewMinuit'): retCode -= 100 failure = "" if(retCode & 1): failure += " IsAboveMaxEdm" if(retCode & 2): failure += " HasCovariance" if(retCode & 4): failure += " HesseFailed" if(retCode & 8): failure += " HasMadePosDefCovar" if(retCode & 16): failure += " HasPosDefCovar" if(retCode & 32): failure += " HasAccurateCovar" if(retCode & 64): failure += " HasValidCovariance" if(retCode & 128): failure += " HasValidParameters" if(retCode & 256): failure += " IsValid" return failure if(optimizer == 'Minuit'): if(retCode == 0): failure = "Error matrix not calculated at all" if(retCode == 1): failure = "Diagonal approximation only, not accurate" if(retCode == 2): failure = "Full matrix, but forced positive-definite (i.e. not accurate)" if(retCode == 3): failure = "Full accurate covariance matrix (After MIGRAD, this is the indication of normal convergence.)" return failure
def main(NAME, RA, DEC, TSTART, TSTOP, EMIN, EMAX, SC, ROIu, xml): ROIue = float(ROIu) + 10 os.system('ls -1 *PH*.fits > %s_events.list' % (NAME)) my_apps.filter['evclass'] = 128 my_apps.filter['evtype'] = 3 my_apps.filter['ra'] = RA my_apps.filter['dec'] = DEC my_apps.filter['rad'] = ROIu my_apps.filter['emin'] = EMIN my_apps.filter['emax'] = EMAX my_apps.filter['zmax'] = 90 my_apps.filter['tmin'] = TSTART my_apps.filter['tmax'] = TSTOP my_apps.filter['infile'] = '@%s_events.list' % (NAME) my_apps.filter['outfile'] = '%s_filtered.fits' % (NAME) my_apps.filter.run() # maketime my_apps.maketime['scfile'] = SC my_apps.maketime['filter'] = '(DATA_QUAL>0)&&(LAT_CONFIG==1)' my_apps.maketime['roicut'] = 'no' my_apps.maketime['evfile'] = '%s_filtered.fits' % (NAME) my_apps.maketime['outfile'] = '%s_filtered_gti.fits' % (NAME) my_apps.maketime.run() # my_apps.counts_map['evfile'] = '%s_filtered_gti.fits' % (NAME) my_apps.counts_map['scfile'] = SC my_apps.counts_map['outfile'] = '%s_CountMap.fits' % (NAME) # my_apps.counts_map.run() # my_apps.expCube['evfile'] = '%s_filtered_gti.fits' % (NAME) my_apps.expCube['scfile'] = SC my_apps.expCube['zmax'] = 90 my_apps.expCube['outfile'] = 'expCube.fits' my_apps.expCube['dcostheta'] = 0.025 my_apps.expCube['binsz'] = 1 my_apps.expCube.run() my_apps.expMap['evfile'] = '%s_filtered_gti.fits' % (NAME) my_apps.expMap['scfile'] = SC my_apps.expMap['expcube'] = 'expCube.fits' my_apps.expMap['outfile'] = 'expMap.fits' my_apps.expMap['irfs'] = 'CALDB' my_apps.expMap['srcrad'] = ROIue my_apps.expMap['nlong'] = 120 my_apps.expMap['nlat'] = 120 my_apps.expMap['nenergies'] = 20 my_apps.expMap.run() # sara xml model roiname = '%s_filtered_gti.fits' % NAME if float(xml) == 0: xml_creator_P8_v1.main(NAME, float(RA), float(DEC), float(EMIN), float(EMAX), 15) xmlmodelname = '%s_model.xml' % NAME my_apps.diffResps['evfile'] = '%s_filtered_gti.fits' % (NAME) my_apps.diffResps['scfile'] = SC my_apps.diffResps['srcmdl'] = xmlmodelname my_apps.diffResps['irfs'] = 'CALDB' my_apps.diffResps.run() xmlfitname = '%s_fit1.xml' % NAME obs = UnbinnedObs(roiname, SC, expMap='expMap.fits', expCube='expCube.fits', irfs='CALDB') # like1 = UnbinnedAnalysis(obs,xmlmodelname,optimizer='MINUIT') like1 = UnbinnedAnalysis(obs, xmlmodelname, optimizer='NewMinuit') likeobj = pyLike.NewMinuit(like1.logLike) like1.fit(verbosity=0, optObject=likeobj) print likeobj.getRetCode() sourceDetails = {} for source in like1.sourceNames(): sourceDetails[source] = like1.Ts(source) for source, TS in sourceDetails.iteritems(): if (TS < 2): print "Deleting...", source, " TS = ", TS like1.deleteSource(source) like1.fit(verbosity=0, optObject=likeobj) print "0 is converged", likeobj.getRetCode() like1.logLike.writeXml(xmlfitname) numl = search(NAME, xmlfitname) numlg = str(numl + 3) os.system("sed '" + numlg + "," + numlg + " s/free=\"1\"/free=\"0\"/' " + xmlfitname + " > xml_sed.xml ") inputs = likeInput(like1, NAME, model="xml_sed.xml", nbins=6, phCorr=1.0) inputs.plotBins() inputs.fullFit(CoVar=True) sed = likeSED(inputs) sed.getECent() sed.fitBands() sed.Plot() result = like1.model[NAME] TS = like1.Ts(NAME) flux = like1.flux(NAME, emin=100) gamma = like1.model[NAME].funcs['Spectrum'].getParam('Index').value() cov_gg = like1.model[NAME].funcs['Spectrum'].getParam('Index').error() # cov_II = like1.model[NAME].funcs['Spectrum'].getParam('Integral').error() flux_err = like1.fluxError(NAME, emin=100) like1.plot() fitsedname = '%s_6bins_likeSEDout.fits' % NAME sedtool(fitsedname) print NAME, " TS=", TS print result if float(xml) == 1: xmlmodelname = '%s_model.xml' % NAME xmlfitname = '%s_fit1.xml' % NAME obs = UnbinnedObs(roiname, SC, expMap='expMap.fits', expCube='expCube.fits', irfs='CALDB') # like1 = UnbinnedAnalysis(obs,xmlmodelname,optimizer='MINUIT') like1 = UnbinnedAnalysis(obs, xmlmodelname, optimizer='NewMinuit') likeobj = pyLike.NewMinuit(like1.logLike) like1.fit(verbosity=0, optObject=likeobj) print likeobj.getRetCode() sourceDetails = {} for source in like1.sourceNames(): sourceDetails[source] = like1.Ts(source) for source, TS in sourceDetails.iteritems(): if (TS < 2): print "Deleting...", source, " TS = ", TS like1.deleteSource(source) like1.fit(verbosity=0, optObject=likeobj) print "0 is converged", likeobj.getRetCode() like1.logLike.writeXml(xmlfitname) numl = search(NAME, xmlfitname) numlg = str(numl + 3) os.system("sed '" + numlg + "," + numlg + " s/free=\"1\"/free=\"0\"/' " + xmlfitname + " > xml_sed.xml ") inputs = likeInput(like1, NAME, model="xml_sed.xml", nbins=6, phCorr=1.0) inputs.plotBins() inputs.fullFit(CoVar=True) sed = likeSED(inputs) sed.getECent() sed.fitBands() sed.Plot() result = like1.model[NAME] TS = like1.Ts(NAME) flux = like1.flux(NAME, emin=100) gamma = like1.model[NAME].funcs['Spectrum'].getParam('Index').value() cov_gg = like1.model[NAME].funcs['Spectrum'].getParam('Index').error() # cov_II = like1.model[NAME].funcs['Spectrum'].getParam('Integral').error() flux_err = like1.fluxError(NAME, emin=100) like1.plot() fitsedname = '%s_6bins_likeSEDout.fits' % NAME sedtool(fitsedname) print NAME, " TS=", TS print result
def bayesian_ul(**kwargs): # Instance the unbinned analysis print("Instancing pyLikelihood...") unbinned_observation = UnbinnedAnalysis.UnbinnedObs( kwargs['ft1'], kwargs['ft2'], kwargs['expomap'], kwargs['ltcube'], 'CALDB') pylike_instance = UnbinnedAnalysis.UnbinnedAnalysis( unbinned_observation, kwargs['xml'], kwargs['engine']) print("done") # Let's start by computing the semi-Bayesian UL from the Science Tools print("Semi-bayesian upper limit computation with ST...") # Sync and fit pylike_instance.syncSrcParams() pylike_instance.fit() # Compute ST upper limit ul = UpperLimits.UpperLimit(pylike_instance, kwargs['src']) try: st_bayes_ul, parameter_value = ul.bayesianUL(0.95, emin=kwargs['emin'], emax=kwargs['emax']) except: # This fails sometimes with RuntimeError: Attempt to set parameter value outside bounds. print("\n\nWARNING: upper limit computation with ST has failed! \n\n") st_bayes_ul = -1 st_bayes_ul_ene = -1 # Get back to a good state pylike_instance = UnbinnedAnalysis.UnbinnedAnalysis( unbinned_observation, kwargs['xml'], kwargs['engine']) pylike_instance.fit() else: # Convert to energy flux best_fit_photon_index = pylike_instance[ kwargs['src']].src.spectrum().parameter('Index').getValue() st_bayes_ul_ene = st_bayes_ul * get_conversion_factor( best_fit_photon_index, kwargs) print("done") # Now find out our free parameters, and define a prior for them # Prepare the dictionary of parameters. Note that by default they get a uniform prior # between the current min and max values free_parameters = collections.OrderedDict() for p in pylike_instance.model.params: if p.isFree(): source_name = p.srcName parameter_name = p.parameter.getName() p.parameter.setScale(1.0) free_parameters[(source_name, parameter_name)] = MyParameter(p) # Now set the priors and the boundaries # Update boundaries (they will be propagated to the prior as well) # Isotropic template if (kwargs['iso'], 'Normalization') in free_parameters: try: free_parameters[(kwargs['iso'], 'Normalization')].bounds = (0, 100) except: # This happens if the best fit value is outside those boundaries free_parameters[(kwargs['iso'], 'Normalization')].value = 1.0 free_parameters[(kwargs['iso'], 'Normalization')].bounds = (0, 100) else: print("WARNING: Isotropic template is not free to vary (or absent)") # Galactic template (Truncated Gaussian with systematic error) if (kwargs['gal'], 'Value') in free_parameters: try: free_parameters[(kwargs['gal'], 'Value')].bounds = (0.1, 10.0) free_parameters[(kwargs['gal'], 'Value')].prior = TruncatedGaussianPrior( 1.0, kwargs['gal_sys_err']) except: # This happens if the best fit value is outside those boundaries free_parameters[(kwargs['gal'], 'Value')].value = 1.0 free_parameters[(kwargs['gal'], 'Value')].bounds = (0.1, 10.0) else: print("WARNING: Galactic template is not free to vary (or absent)") # Photon flux (uniform prior) if (kwargs['src'], 'Integral') in free_parameters: try: free_parameters[(kwargs['src'], 'Integral')].bounds = (0, 10) except: free_parameters[(kwargs['src'], 'Integral')].value = 1e-7 free_parameters[(kwargs['src'], 'Integral')].bounds = (0, 10) else: raise RuntimeError( "The Integral parameter must be a free parameter of source %s" % kwargs['src']) # Photon index if (kwargs['src'], 'Index') in free_parameters: try: free_parameters[(kwargs['src'], 'Index')].bounds = (kwargs['min_index'], kwargs['max_index']) except: raise RuntimeError( "It looks like the best fit photon index is outside the boundaries " "provided in the command line") else: raise RuntimeError( "The Index parameter must be a free parameter of source %s" % kwargs['src']) # Execute a fit to get to a good state with the new boundaries pylike_instance.fit() # Print the configuration print("\nFree parameters:") print("----------------\n") for k, v in free_parameters.iteritems(): print("* %s of %s (%s)" % (k[1], k[0], v.prior.name)) print("") # Generate the randomized starting points for the Emcee sampler ndim, nwalkers = len(free_parameters), kwargs['n_walkers'] p0 = [ map(lambda p: p.get_random_init(0.1), free_parameters.values()) for i in range(nwalkers) ] # Instance the sampler posterior = Posterior(free_parameters.values(), pylike_instance) # Now check that the starting points we have are good (otherwise the sampler will go awry) for pp in p0: this_ln = posterior.lnprob(pp) if not np.isfinite(this_ln): raise RuntimeError( "Infinite for values %s while setting up walkers" % pp) sampler = emcee.EnsembleSampler(nwalkers, ndim, posterior.lnprob) print("Burn in...") pos, prob, state = sampler.run_mcmc(p0, kwargs['burn_in']) print("done") sampler.reset() print("Sampling...") samples = sampler.run_mcmc(pos, kwargs['n_samples']) print("done") print("Mean acceptance fraction: {0:.3f}".format( np.mean(sampler.acceptance_fraction))) # Make the corner plot samples = sampler.flatchain labels = map(lambda x: "%s" % (x[1]), free_parameters.keys()) print("Producing corner plot...") fig = corner.corner(samples, show_titles=True, quantiles=[0.5, 0.50, 0.95], title_fmt=u'.2g', labels=labels, plot_contours=True, plot_density=False) fig.tight_layout() fig.savefig(kwargs['corner_plot']) print("done") # Now compute the upper limits # Find index of normalization norm_index = free_parameters.keys().index((kwargs['src'], 'Integral')) # Find index of photon index ph_index_index = free_parameters.keys().index((kwargs['src'], 'Index')) photon_fluxes = np.zeros(samples.shape[0]) energy_fluxes = np.zeros(samples.shape[0]) conversion_factors = np.zeros(samples.shape[0]) for i, current_sample in enumerate(samples): # Set the Integral parameter to the current value free_parameters[(kwargs['src'], 'Integral')].scaled_value = current_sample[norm_index] # Set the photon index to the current value current_photon_index = current_sample[ph_index_index] free_parameters[(kwargs['src'], 'Index')].scaled_value = current_photon_index pylike_instance.syncSrcParams() # Get photon flux for this sample photon_flux = pylike_instance[kwargs['src']].flux( kwargs['emin'], kwargs['emax']) # Get energy flux for this value conv = get_conversion_factor(current_photon_index, kwargs) energy_flux = photon_flux * conv # Save the results photon_fluxes[i] = photon_flux energy_fluxes[i] = energy_flux conversion_factors[i] = conv # Now compute the 95 percentile photon_flux_p95 = np.percentile(photon_fluxes, 95) energy_flux_p95 = np.percentile(energy_fluxes, 95) # Save the samples np.savez(kwargs['output_file'] + "_samples", samples=samples) np.savez(kwargs['output_file'], photon_fluxes=photon_fluxes, energy_fluxes=energy_fluxes, photon_flux_p95=photon_flux_p95, energy_flux_p95=energy_flux_p95, st_bayes_ul=st_bayes_ul, st_bayes_ul_ene=st_bayes_ul_ene) # Now summarize the results print("\nUpper limit computation results:") print("----------------------------------\n") print("Photon flux:\n") print(" * Semi-bayes from ST : %g" % (st_bayes_ul)) print(" * Bayesian : %g" % photon_flux_p95) print("\nEnergy flux:\n") print(" * Semi-bayes from ST : %g" % st_bayes_ul_ene) print(" * Bayesian : %g" % energy_flux_p95)
if not irfsType == 'CALDB': my_apps.diffResps['evfile'] = filteredLATFile my_apps.diffResps['scfile'] = spacecraftFile my_apps.diffResps['srcmdl'] = modelFile my_apps.diffResps['irfs'] = irfsType my_apps.diffResps.run() # Run the Likelihood Analysis import pyLikelihood from UnbinnedAnalysis import * obs = UnbinnedObs(filteredLATFile, spacecraftFile, expMap=expMapFile, expCube=ltCubeFile, irfs=irfsType) like = UnbinnedAnalysis(obs, modelFile, optimizer='Minuit') # Analysis Complete print "################ Analysis Complete ################" print obs print like print "###################################################" # Some plots! like.tol like.tolType like.tol = 0.0001 likeobj = pyLike.Minuit(like.logLike) like.fit(verbosity=0, covar=True, optObject=likeobj) # Warning: This takes VERY long ~ 30 minutes
def main(NAME,RA,DEC,TSTART,TSTOP,EMIN,EMAX,Np, path, ROIu): #outdir = os.environ["FERMI_TMPLATAREA"] gtliketxt=open("%s/%s_gtlike.txt"%(path,Np),'w') gtsedtxt=open("%s/%s_sed.txt"%(path,Np),'w') SCC='%s_SC00.fits'%(Np) SC=path+SCC Npp=path+Np print SC ROIue=float(ROIu)+10 os.system("ls -1 '"+Npp+"'_PH*.fits > %s/%s_events.list" %(path,Np)) # os.system('ls -1 'Np'+'PH*.fits > %s/%s_events.list' %(path,Np) my_apps.filter['evclass'] = 128 my_apps.filter['evtype'] = 3 # my_apps.filter['evclsmin'] = 3 # my_apps.filter['evclsmax'] = 4 my_apps.filter['ra'] = RA my_apps.filter['dec'] = DEC my_apps.filter['rad'] = ROIu my_apps.filter['emin'] = EMIN my_apps.filter['emax'] = EMAX my_apps.filter['zmax'] = 90 my_apps.filter['tmin'] = TSTART my_apps.filter['tmax'] = TSTOP my_apps.filter['infile'] = '@%s/%s_events.list' %(path,Np) my_apps.filter['outfile'] = '%s/%s_filtered.fits'%(path,Np) my_apps.filter.run() # maketime my_apps.maketime['scfile'] = SC my_apps.maketime['filter'] = '(DATA_QUAL>0)&&(LAT_CONFIG==1)' my_apps.maketime['roicut'] = 'no' my_apps.maketime['evfile'] = '%s/%s_filtered.fits' %(path,Np) my_apps.maketime['outfile'] = '%s/%s_filtered_gti.fits' %(path,Np) my_apps.maketime.run() # # my_apps.counts_map['evfile'] = '%s/%s_filtered_gti.fits'%(path,Np) # my_apps.counts_map['scfile'] = SC # my_apps.counts_map['outfile'] = '%s/%s_CountMap.fits'%(path,Np) # my_apps.counts_map.run() # my_apps.expCube['evfile'] = '%s/%s_filtered_gti.fits'%(path,Np) my_apps.expCube['scfile'] = SC my_apps.expCube['zmax'] = 90 my_apps.expCube['outfile'] = '%s/%s_expCube.fits' %(path,Np) my_apps.expCube['dcostheta'] = 0.025 my_apps.expCube['binsz'] = 1 my_apps.expCube.run() my_apps.expMap['evfile'] = '%s/%s_filtered_gti.fits'%(path,Np) my_apps.expMap['scfile'] = SC my_apps.expMap['expcube'] ='%s/%s_expCube.fits' %(path,Np) my_apps.expMap['outfile'] ='%s/%s_expMap.fits' %(path,Np) # my_apps.expMap['irfs'] ='P7REP_SOURCE_V15' my_apps.expMap['irfs'] ='CALDB' my_apps.expMap['srcrad'] = ROIue my_apps.expMap['nlong'] =120 my_apps.expMap['nlat'] =120 my_apps.expMap['nenergies'] =20 my_apps.expMap.run() #sara xml model roiname='%s/%s_filtered_gti.fits' %(path,Np) xml_creator_P7_v1.main(path,NAME,float(RA),float(DEC),float(EMIN), float(EMAX), 20,Np) xmlmodelname='%s/%s_model.xml' %(path,Np) my_apps.diffResps['evfile'] = '%s/%s_filtered_gti.fits'%(path,Np) my_apps.diffResps['scfile'] = SC my_apps.diffResps['srcmdl'] = xmlmodelname my_apps.diffResps['irfs'] = 'CALDB' my_apps.diffResps.run() xmlfitname='%s/%s_fit1.xml' %(path,Np) expMapFile='%s/%s_expMap.fits' %(path,Np) expCubeFile='%s/%s_expCube.fits' %(path,Np) obs = UnbinnedObs(roiname,SC ,expMap=expMapFile,expCube=expCubeFile,irfs='CALDB') like1 = UnbinnedAnalysis(obs,xmlmodelname,optimizer='NewMinuit') like1.fit(verbosity=0) like1.logLike.writeXml(xmlfitname) # numl=search(NAME,xmlfitname) # numlg=str(numl+3) # os.system("sed '"+numlg+","+numlg+" s/free=\"1\"/free=\"0\"/' "+xmlfitname+ " > xml_sed.xml ") # inputs=likeInput(like1,NAME,model="xml_sed.xml",nbins=9,phCorr=1.0) #low_edges = [200.,914.61,1955.87,8944.27,19127.05,40902.61] #high_edges = [427.69,1955.87,8944.27,19127.05,40902.61,187049.69] #centers = [0.2767, 1.265, 5.787, 12.37, 26.46, 86.60] #inputs.customBins(low_edges,high_edges) # inputs.plotBins() # inputs.fullFit(CoVar=True) # sed = likeSED(inputs) # sed.getECent() # sed.fitBands() # sed.Plot() result=like1.model[NAME] TS=like1.Ts(NAME) # I = like1.model[NAME].funcs['Spectrum'].getParam('Integral').value() flux = like1.flux(NAME,emin=100) # flux=I*1e-9 gamma = like1.model[NAME].funcs['Spectrum'].getParam('Index').value() cov_gg =like1.model[NAME].funcs['Spectrum'].getParam('Index').error() # cov_II = like1.model[NAME].funcs['Spectrum'].getParam('Integral').error() flux_err = like1.fluxError(NAME,emin=100) # flux_err=cov_II*1e-9 e=1000.0 a=1 b=1.e-18 lenergy_bin=log10(double(EMIN))+(log10(double(EMAX))-log10(double(EMIN)))/2 energy_bin=pow(10,lenergy_bin) freq=2.42e22*energy_bin/100.0 ums = 1.-gamma conv=ums*pow(energy_bin,(-gamma))/(pow(double(EMAX),ums)-pow(double(EMIN),ums))*6.62e-2*(energy_bin/100.0) # conv is in Jy # now convert in nufnu erg/cm2/s convjy=conv*freq*1.e-23 nufnu=flux*convjy b=flux_err*convjy err_log=log10((nufnu+b)/nufnu) #cout<<freq<<" "<<a<<" "<<nufnu<<" "<<b<<endl; #cout<<log10(freq)<<" "<<log10(a)<<" "<<log10(nufnu)<<" "<<err_log<<endl; date_start=computeDate(float(TSTART)) date_stop=computeDate(float(TSTOP)) # like1.plot() # fitsedname='%s_9bins_likeSEDout.fits' %NAME # sedtool(fitsedname) print NAME, " TS=", TS # print result # print like1.model print "spectral index= ", gamma, " +/-", cov_gg print " Flux=", flux, "+/-", flux_err print "freq", freq, " nuFnu=", nufnu, b, # print "'UL': ", results_ul, err gtliketxt.write(NAME) gtliketxt.write(" RA=") gtliketxt.write(RA) gtliketxt.write(" DEC= ") gtliketxt.write(DEC) gtliketxt.write(" TS= ") gtliketxt.write(str(TS)) gtliketxt.write("\n") gtliketxt.write(" Time Interval (MJD) ") gtliketxt.write(str(date_start)) gtliketxt.write(" ") gtliketxt.write(str(date_stop)) gtliketxt.write("\n ") gtliketxt.write("Flux ") if TS <25: obs = UnbinnedObs(roiname,SC ,expMap=expMapFile,expCube=expCubeFile,irfs='CALDB') like1 = UnbinnedAnalysis(obs,xmlmodelname,optimizer='NewMinuit') like1.fit(verbosity=0) ul=UpperLimits(like1) UL=ul[NAME].compute(emin=double(EMIN),emax=double(EMAX)) results_ul=UL[1]*1E-9 err=0 print "'UL': ", results_ul, err gamma_ul=2.0 ums_ul = 1.-gamma_ul conv_ul=ums_ul*pow(energy_bin,(-gamma_ul))/(pow(double(EMAX),ums_ul)-pow(double(EMIN),ums_ul))*6.62e-2*(energy_bin/100.0) convjy_ul=conv_ul*freq*1.e-23 nufnu_ul=results_ul*convjy_ul b=err*convjy_ul print "freq", freq, "0 nuFnu=", nufnu_ul, b, gtliketxt.write(str(results_ul)) gtliketxt.write(" 0 ") #gtliketxt.write(err) gtsedtxt.write(str(freq)) gtsedtxt.write(" | 0 ") gtsedtxt.write(" | ") gtsedtxt.write(str(nufnu_ul)) gtsedtxt.write(" | ") gtsedtxt.write(str(b)) gtsedtxt.write(" | ") gtsedtxt.write(str(date_start)) gtsedtxt.write(" | ") gtsedtxt.write(str(date_stop)) gtsedtxt.write(" | ") gtsedtxt.write(" UL ") gtsedtxt.write(" | ") else: gtliketxt.write(str(flux)) gtliketxt.write(" ") gtliketxt.write(str(flux_err)) gtliketxt.write("\n") gtliketxt.write("Spectral Index = ") gtliketxt.write(str(gamma)) gtliketxt.write(" ") gtliketxt.write(str(cov_gg)) gtsedtxt.write(" ") gtsedtxt.write(str(freq)) gtsedtxt.write(" | 0 ") gtsedtxt.write(" | ") gtsedtxt.write(str(nufnu)) gtsedtxt.write(" | ") gtsedtxt.write(str(b)) gtsedtxt.write(" | ") gtsedtxt.write(str(date_start)) gtsedtxt.write(" | ") gtsedtxt.write(str(date_stop)) gtsedtxt.write(" | ")
expMap['scfile'] = str(sc_file) expMap['expcube'] = str(name)+'ltcube'+str(this_bin)+'.fits' expMap['outfile'] = str(name)+'expmap'+str(this_bin)+'.fits' expMap['irfs'] ='P7SOURCE_V6' expMap['srcrad'] = radius + 10 expMap['nlong'] =120 expMap['nlat'] =120 expMap['nenergies'] = 20 expMap['debug'] = 'yes' expMap.run() print '\n***Running likelihood analysis***' try: obs = UnbinnedObs(str(name)+'mktime'+str(this_bin)+'.fits',str(sc_file),expMap=str(name)+'expmap'+str(this_bin)+'.fits',expCube=str(name)+'ltcube'+str(this_bin)+'.fits',irfs='P7SOURCE_V6') like1 = UnbinnedAnalysis(obs,str(xml_file),optimizer='DRMNFB') like1.fit(verbosity=0) like1.logLike.writeXml(str(name)+'fit'+str(this_bin)+'.xml') like2 = UnbinnedAnalysis(obs,srcModel=str(name)+'fit'+str(this_bin)+'.xml',optimizer='NewMinuit') obj = pyLike.Minuit(like2.logLike) like2.fit(verbosity=0, covar=True, optObject=obj) like2.plot() try: flux = like2.model['_2FGLJ'+str(cat_name)].funcs['Spectrum'].getParam('Integral').value() errFlux = like2.model['_2FGLJ'+str(cat_name)].funcs['Spectrum'].getParam('Integral').error() scale = like2.model['_2FGLJ'+str(cat_name)].funcs['Spectrum'].getParam('Integral').getScale() except: flux = 0 errFlux = 0 scale = 0
def PerformLikelihoodAnalysis(self): print "\nPerforming likelihood analysis on position: ra=%s, dec=%s" % (self.xref, self.yref) # Wait a random amount of time between 1 and 5 minutes before starting in order to not crash the asf/nsf disks at SLAC waitTime = random.random()*300 time.sleep(waitTime) # Defind the scratch directory JobID = os.environ.get('LSB_JOBID') Username = getpass.getuser() ScratchDirectory = "/scratch/%s/%s/" % (Username, JobID) # Define the pfile directory if JobID == 'None': PFILESDirectory = "%s/pfiles_%s/" % (self.outdir, self.binNumber) else: PFILESDirectory = "%s/pfiles/" % ScratchDirectory # Create the output directory if it doesn't already exist if(os.path.isdir(self.outdir)==False): print "\n >> Creating Directory: " + self.outdir cmd = "mkdir " + self.outdir os.system(cmd) # Define where to save the results likelihoodResults = '%s/likelihoodResults_bin%s.txt' % (self.outdir, self.binNumber) # Remove any pre-existing pfiles if(os.path.isdir(PFILESDirectory)==True): cmd = "rm -r %s" % PFILESDirectory os.system(cmd) # Set the new pfiles directory SetPfilesDirectory(PFILESDirectory) # Make a copy of the source model xmlModelWithPutativeSource = '%s/ModelSource_bin%s.xml' % (self.outdir, self.binNumber) cmd = "cp " + self.srcmdl + " " + xmlModelWithPutativeSource print cmd os.system(cmd) # Add a putative point source at the requested location # AddCandidateSource(self.xref, self.yref, xmlModelWithPutativeSource) ModifySourceModel(xmlModelWithPutativeSource, self.xref, self.yref) # # Import the necessary gtapps # gtlike = GtApp('gtlike') # # Run the likelihood analysis # print '\nPerforming the likelihood fit:' # gtlike.run(statistic=self.statistic, # scfile=self.scfile, # evfile=self.evfile, # expmap=self.expmap, # expcube=self.expcube, # srcmdl=xmlModelWithPutativeSource, # irfs=self.irfs, # optimizer=self.optimizer, # results=likelihoodResults, # plot='no', # save='yes') # Setup the unbinned likelihood object print '\nPerforming the likelihood fit:' try: obs = UnbinnedObs(self.evfile,self.scfile,expMap=self.expmap,expCube=self.expcube,irfs=self.irfs) # Define the likelihood object #like = UnbinnedAnalysis(obs,xmlModelWithPutativeSource,optimizer=self.optimizer) like = UnbinnedAnalysis(obs,xmlModelWithPutativeSource,optimizer='MINUIT') # Setup the likelihood parameters Source = 'CandidateSource' Integral = like.par_index(Source, 'Integral') Index = like.par_index(Source, 'Index') LowerLimit = like.par_index(Source, 'LowerLimit') UpperLimit = like.par_index(Source, 'UpperLimit') # Setup the likelihood bounds like[Integral].setScale(1e-3) like[Index].setBounds(-5, -0.5) # like[LowerLimit] = emin # like[UpperLimit] = emax # Perform the likelihood fit #optObject = pyLike.NewMinuit(like.logLike) #like.fit(verbosity=0,covar=True,tol=0.02,optObject=optObject) like.fit(verbosity=1,covar=True,tol=1e-10,optimizer='MINUIT', optObject=None) # Extract the best fit index IndexValue = like[Index].value() IndexError = like[Index].error() # Extract the best fit flux FluxValue = like.flux(Source, emin=100, emax=3e5) FluxError = like.fluxError(Source, emin=100, emax=3e5) # Extract likelihood fit results print '\nLikelihood Results:' print like.model[Source] print "TS = %s" % like.Ts(Source) print "Flux = %s +/- %s" % (FluxValue, FluxError) print "Index = %s +/- %s" % (IndexValue, IndexError) # Save the xml file like.writeXml(xmlFile=xmlModelWithPutativeSource) except Exception, message: print traceback.format_exc()
def runLike(likeIn,ecent,ftol,tslim,ttype,opt,rescaleAll,lastbinUL,wx): pts=[] errs=[] tsPts=[] gamma=[] Src=likeIn.source NBins=likeIn.NBins IRFs=likeIn.IRFs expCube=likeIn.expCube ft2=likeIn.ft2 bandModel=likeIn.bandModel nbins=likeIn.nbins phCorr=likeIn.phCorr flux=likeIn.ubAn.flux for i in range(0,nbins): #first, set up for a likelihood run print ' -Runnng Likelihood for band%i-' %i ev='%s_%ibins_band%i.fits' %(Src.replace(' ','_'),NBins,i) em='%s_%ibins_band%i_%s_em.fits' %(Src.replace(' ','_'),NBins,i,IRFs) band_obs=UnbinnedObs((ev),ft2,irfs=IRFs,expMap=em,expCube=expCube) band_like=UnbinnedAnalysis(band_obs,bandModel,opt) band_like.setFitTolType(ttype) stype=band_like.model.srcs[Src].spectrum().genericName() emin,emax=band_like.observation.roiCuts().getEnergyCuts() if phCorr!=1: for src in band_like.sourceNames(): par=band_like.normPar(src) par.setValue(par.getValue()*phCorr) #then set the scale factor to the center of the energy band, make sure it's frozen, and get index for prefactor while you're at it Ts=band_like.Ts freeze=band_like.freeze fit=band_like.fit DO=1 if stype=='PowerLaw': scale=getParamIndx(band_like,Src,'Scale') #this is where you have to use PowerLaw, for PowerLaw2 these parameters don't exist and this will cause problems pref=getParamIndx(band_like,Src,'Prefactor') freeze(scale) band_like[scale].setBounds(20,5e5) band_like[scale].setScale(1) band_like[scale]=1000.*ecent[i] #put center energies in units of GeV but xml files use MeV #multiplier=band_like[pref].getScale() #need to get the scale of the prefactor so the values will not be too large try: logFlux=log10(flux(Src,emin=emin,emax=emax)/(emax-emin)) except: logFlux=-14 newScale=max(int(floor(logFlux)),-14) print newScale band_like[pref].setScale(10.**newScale) multiplier=10.**newScale #cycle through the other point sources and adjust parameters of free point sources with PowerLaw2 models for src in band_like.sourceNames(): spec=band_like[src].funcs['Spectrum'] par=spec.normPar() if par.isFree()==True and band_like.model.srcs[src].spectrum().genericName()=='PowerLaw2': HIGH=getParamIndx(band_like,src,'UpperLimit') LOW=getParamIndx(band_like,src,'LowerLimit') band_like[HIGH].setBounds(20,5e5) #just in case, make sure no out of range error gets thrown band_like[LOW].setBounds(20,5e5) band_like[HIGH].setScale(1.) #just in case, make sure scale is MeV band_like[LOW].setScale(1.) band_like[HIGH]=emax band_like[LOW]=emin freeze(HIGH) #just in case, make sure these aren't fit values freeze(LOW) if rescaleAll==True: try: logFlux=log10(flux(src,emin=emin,emax=emax)) except: logFlux=-14 newScale=max(int(floor(logFlux)),-14) par.setScale(10**newScale) if rescaleAll==True and src!=Src and par.isFree()==True and band_like.model.srcs[src].spectrum().genericName()=='PowerLaw': try: logFlux=log10(flux(src,emin=emin,emax=emax)/(emax-emin)) except: logFlux=-14 newScale=max(int(floor(logFlux)),-14) par.setScale(10**newScale) try: fit(tol=1,verbosity=0,optimizer=opt) fail=0 except: try: fit(tol=1*10,verbosity=0,optimizer=opt) fail=0 except: try: fit(tol=1./10,verbosity=0,optimizer=opt) fail=0 except: print "Fit with optimizer %s with tolerances ~1 to look for negative or zero TS sources failed, if error bars are unrealistically small you may need to redo the fit for energy band %i manually" %(opt,i) fail=1 if fail==0: for src in band_like.sourceNames(): if src!=Src: par=band_like.normPar(src) if band_like[src].type=='PointSource' and Ts(src)<=0 and par.isFree()==True: band_like.deleteSource(src) print " -Removing %s from the model" %src scale=getParamIndx(band_like,Src,'Scale') pref=getParamIndx(band_like,Src,'Prefactor') #do the actual fit try: fit(tol=ftol,verbosity=0) except: try: print 'Trying lower tolerance of %s for band%i.' %(ftol/10,i) fit(tol=ftol/10,verbosity=0) except: try: print 'Trying higher tolerance of %s for band%i.' %(ftol*10,i) fit(tol=ftol*10,verbosity=0) except: print 'No convergence for band%i, skipping.' %i pts+=[0] errs+=[0] tsPts+=[0] pass #get the prefactor, error, and ts values val=band_like[pref].value() err=band_like[pref].error() TS=Ts(Src) if(TS<tslim or (i==(nbins-1) and lastbinUL==True)): #calculate 95% upperlimit if source TS<tslim, 25 by default (corresponds to ~5sigma) try: freeze(pref) ul=UpperLimits(band_like) UL=ul[Src].compute(emin=emin,emax=emax) val=UL[1] err=0 print ' NOTE: Band%i,' %i,'with center energy',ecent[i],'GeV, quoting 95% upper limit on flux.' except: try: print ' Tyring higher tolerance of %s for band %i to get good starting point for upper limit calculations.' %(ftol*10,i) band_like[pref].setFree(1) fit(tol=ftol*10,verbosity=0) TS=Ts(Src) if(TS<tslim or (i==(nbins-1) and lastbinUL==True)): freeze(pref) ul=UpperLimits(band_like) UL=ul[Src].compute(emin=emin,emax=emax) val=UL[1] err=0 print ' NOTE: Band%i,' %i,'with center energy',ecent[i],'GeV, quoting 95% upper limit on flux.' else: val=band_like[pref].value() err=band_like[pref].error() except: try: print ' Tyring lower tolerance of %s for band %i to get good starting point for upper limit calculations.' %(ftol/10,i) band_like[pref].setFree(1) fit(tol=ftol/10,verbosity=0) TS=Ts(Src) if(TS<tslim or (i==(nbins-1) and lastbinUL==True)): freeze(pref) ul=UpperLimits(band_like) UL=ul[Src].compute(emin=emin,emax=emax) val=UL[1] err=0 print ' NOTE: Band%i,' %i,'with center energy',ecent[i],'GeV, quoting 95% upper limit on flux.' else: val=band_like[pref].value() err=band_like[pref].error() except: err=0 print ' NOTE: Band%i,' %i,'with center energy',ecent[i],'GeV, TS<%s' %tslim,'but UpperLimits computation failed.' print ' Quoting best fit value with zero error.' tsPts+=[TS] pts+=[val*multiplier/phCorr] errs+=[err*multiplier/phCorr] elif stype=='PowerLaw2': Upper=getParamIndx(band_like,Src,'UpperLimit') Lower=getParamIndx(band_like,Src,'LowerLimit') Integral=getParamIndx(band_like,Src,'Integral') Index=getParamIndx(band_like,Src,'Index') freeze(Upper) freeze(Lower) band_like[Upper].setBounds(20,5e5) band_like[Lower].setBounds(20,5e5) band_like[Lower].setScale(1) band_like[Lower].setScale(1) band_like[Upper]=emax band_like[Lower]=emin #multiplier=band_like[Integral].getScale() try: logFlux=log10(flux(Src,emin=emin,emax=emax)) except: logFlux=-14 newScale=max(int(floor(logFlux)),-14) band_like[Integral].setScale(10**newScale) multiplier=10**newScale indxMult=band_like[Index].getScale() for src in band_like.sourceNames(): if src!=Src: spec=band_like[src].funcs['Spectrum'] par=spec.normPar() if par.isFree()==True and band_like.model.srcs[src].spectrum().genericName()=='PowerLaw2': HIGH=getParamIndx(band_like,src,'UpperLimit') LOW=getParamIndx(band_like,src,'LowerLimit') band_like[HIGH].setBounds(20,5e5) #just in case, make sure no out of range error gets thrown band_like[LOW].setBounds(20,5e5) band_like[HIGH].setScale(1.) #just in case, make sure scale is MeV band_like[LOW].setScale(1.) band_like[HIGH]=emax band_like[LOW]=emin freeze(HIGH) #just in case, make sure these aren't fit values freeze(LOW) if rescaleAll==True: try: logFlux=log10(flux(src,emin=emin,emax=emax)) except: logFlux=-14 newScale=max(int(floor(logFlux)),-14) par.setScale(10**newScale) if rescaleAll==True and src!=Src and par.isFree()==True and band_like.model.srcs[src].spectrum().genericName()=='PowerLaw': try: logFlux=log10(flux(src,emin=emin,emax=emax)/(emax-emin)) except: logFlux=-14 newScale=max(int(floor(logFlux)),-14) par.setScale(10**newScale) try: fit(tol=1,verbosity=0,optimizer=opt) fail=0 except: try: fit(tol=1*10,verbosity=0,optimizer=opt) fail=0 except: try: fit(tol=1./10,verbosity=0,optimizer=opt) fail=0 except: print "Fit with optimizer %s with tolerances ~1 to look for negative or zero TS sources failed, if error bars are unrealistically small you may need to redo the fit for energy band %i manually" %(opt,i) fail=1 if not fail: for src in band_like.sourceNames(): if src!=Src: par=band_like.normPar(src) if band_like[src].type=='PointSource' and Ts(src)<=0 and par.isFree()==True: band_like.deleteSource(src) print " -Removing %s from the model" %src Upper=getParamIndx(band_like,Src,'UpperLimit') Lower=getParamIndx(band_like,Src,'LowerLimit') Integral=getParamIndx(band_like,Src,'Integral') Index=getParamIndx(band_like,Src,'Index') try: fit(tol=ftol,verbosity=0) except: try: print 'Trying lower tolerance of %s for band%i.' %(ftol/10,i) fit(tol=ftol/10,verbosity=0) except: try: print 'Trying higher tolerance of %s for band%i.' %(ftol*10,i) fit(tol=ftol*10,verbosity=0) except: print 'No convergence for band%i, skipping.' %i pts+=[0] errs+=[0] tsPts+=[0] gamma+=[0] DO=0 if DO: val=band_like[Integral].value() err=band_like[Integral].error() TS=Ts(Src) gam=band_like[Index].value()*indxMult*-1. if(TS<tslim or (i==(nbins-1) and lastbinUL==True)): #calculate 95% upperlimit if source TS<9, i.e. less than 3 sigma detection in each energy band try: freeze(Integral) ul=UpperLimits(band_like) UL=ul[Src].compute(emin=emin,emax=emax) val=UL[1] err=0 #need to redo the fit with band_like set to Upper Limit value to get correct spectral index for that value band_like[Integral]=val freeze(Integral) fit(tol=ftol,verbosity=0) gam=band_like[Index].value()*indxMult*-1. print ' NOTE: Band%i,' %i,'with center energy',ecent[i],'GeV, quoting 95% upper limit on flux.' except: try: print ' Tyring higher tolerance of %s for band %i to get good starting point for upper limit calculations.' %(ftol*10,i) band_like[Integral].setFree(1) fit(tol=ftol*10,verbosity=0) TS=Ts(Src) if(TS<tslim or (i==(nbins-1) and lastbinUL==True)): freeze(Integral) ul=UpperLimits(band_like) UL=ul[Src].compute(emin=emin,emax=emax) val=UL[1] err=0 band_like[Integral]=val freeze(Integral) fit(tol=ftol*10,verbosity=0) gam=band_like[Index].value()*indxMult*-1. print ' NOTE: Band%i,' %i,'with center energy',ecent[i],'GeV, quoting 95% upper limit on flux.' else: val=band_like[Integral].value() err=band_like[Integral].error() gam=band_like[Index].value()*indxMult*-1. except: try: print ' Tyring lower tolerance of %s for band %i to get good starting point for upper limit calculations.' %(ftol/10,i) band_like[Integral].setFree(1) fit(tol=ftol/10,verbosity=0) TS=Ts(Src) if(TS<tslim or (i==(nbins-1) and lastbinUL==True)): freeze(Integral) ul=UpperLimits(band_like) UL=ul[Src].compute(emin=emin,emax=emax) val=UL[1] err=0 band_like[Integral]=val freeze(Integral) fit(tol=ftol/10,verbosity=0) gam=band_like[Index].value()*indxMult*-1. print ' NOTE: Band%i,' %i,'with center energy',ecent[i],'GeV, quoting 95% upper limit on flux.' else: val=band_like[Integral].value() err=band_like[Integral].error() gam=band_like[Index].value()*indxMult*-1. except: err=0 print ' NOTE: Band%i,' %i,'with center energy',ecent[i],'GeV, TS<%s' %tslim,'but UpperLimits computation failed.' print ' Quoting best fit value with zero error.' tsPts+=[TS] pts+=[val*multiplier/phCorr] errs+=[err*multiplier/phCorr] gamma+=[gam] else: print '%s needs to have PowerLaw or PowerLaw2 spectral model, not %s' %(Src,stype) print 'exiting without running likelihood in the energy bands' return None,None,None,None if wx: band_like.writeXml('%s_%ibins_band%i_fitmodel.xml'%(Src.replace(' ','_'),NBins,i)) del band_like del band_obs return pts,errs,tsPts,gamma
def PerformLikelihoodAnalysis(self): print "\nPerforming likelihood analysis on position: ra=%s, dec=%s" % ( self.xref, self.yref) # Wait a random amount of time between 1 and 5 minutes before starting in order to not crash the asf/nsf disks at SLAC waitTime = random.random() * 300 time.sleep(waitTime) # Defind the scratch directory JobID = os.environ.get('LSB_JOBID') Username = getpass.getuser() ScratchDirectory = "/scratch/%s/%s/" % (Username, JobID) # Define the pfile directory if JobID == 'None': PFILESDirectory = "%s/pfiles_%s/" % (self.outdir, self.binNumber) else: PFILESDirectory = "%s/pfiles/" % ScratchDirectory # Create the output directory if it doesn't already exist if (os.path.isdir(self.outdir) == False): print "\n >> Creating Directory: " + self.outdir cmd = "mkdir " + self.outdir os.system(cmd) # Define where to save the results likelihoodResults = '%s/likelihoodResults_bin%s.txt' % (self.outdir, self.binNumber) # Remove any pre-existing pfiles if (os.path.isdir(PFILESDirectory) == True): cmd = "rm -r %s" % PFILESDirectory os.system(cmd) # Set the new pfiles directory SetPfilesDirectory(PFILESDirectory) # Make a copy of the source model xmlModelWithPutativeSource = '%s/ModelSource_bin%s.xml' % ( self.outdir, self.binNumber) cmd = "cp " + self.srcmdl + " " + xmlModelWithPutativeSource print cmd os.system(cmd) # Add a putative point source at the requested location # AddCandidateSource(self.xref, self.yref, xmlModelWithPutativeSource) ModifySourceModel(xmlModelWithPutativeSource, self.xref, self.yref) # # Import the necessary gtapps # gtlike = GtApp('gtlike') # # Run the likelihood analysis # print '\nPerforming the likelihood fit:' # gtlike.run(statistic=self.statistic, # scfile=self.scfile, # evfile=self.evfile, # expmap=self.expmap, # expcube=self.expcube, # srcmdl=xmlModelWithPutativeSource, # irfs=self.irfs, # optimizer=self.optimizer, # results=likelihoodResults, # plot='no', # save='yes') # Setup the unbinned likelihood object print '\nPerforming the likelihood fit:' try: obs = UnbinnedObs(self.evfile, self.scfile, expMap=self.expmap, expCube=self.expcube, irfs=self.irfs) # Define the likelihood object #like = UnbinnedAnalysis(obs,xmlModelWithPutativeSource,optimizer=self.optimizer) like = UnbinnedAnalysis(obs, xmlModelWithPutativeSource, optimizer='MINUIT') # Setup the likelihood parameters Source = 'CandidateSource' Integral = like.par_index(Source, 'Integral') Index = like.par_index(Source, 'Index') LowerLimit = like.par_index(Source, 'LowerLimit') UpperLimit = like.par_index(Source, 'UpperLimit') # Setup the likelihood bounds like[Integral].setScale(1e-3) like[Index].setBounds(-5, -0.5) # like[LowerLimit] = emin # like[UpperLimit] = emax # Perform the likelihood fit #optObject = pyLike.NewMinuit(like.logLike) #like.fit(verbosity=0,covar=True,tol=0.02,optObject=optObject) like.fit(verbosity=1, covar=True, tol=1e-10, optimizer='MINUIT', optObject=None) # Extract the best fit index IndexValue = like[Index].value() IndexError = like[Index].error() # Extract the best fit flux FluxValue = like.flux(Source, emin=100, emax=3e5) FluxError = like.fluxError(Source, emin=100, emax=3e5) # Extract likelihood fit results print '\nLikelihood Results:' print like.model[Source] print "TS = %s" % like.Ts(Source) print "Flux = %s +/- %s" % (FluxValue, FluxError) print "Index = %s +/- %s" % (IndexValue, IndexError) # Save the xml file like.writeXml(xmlFile=xmlModelWithPutativeSource) except Exception, message: print traceback.format_exc()
def runLikelihood(subdir, tpl_file): '''This runction runs the likelihood code on a set of pixels in a subdirectory. It takes as input the subdirectory to work on and a template counts map. It reads it's configuration from a pickle file (par.pck) that should be located in the subdirectory and the pixel locations from another pickle file (pixel.pck). It then creats an overall likelihood object, does a quick global fit and then loops over the pixels. At each pixel, it creats a test source, fits that source, calculates the TS of the source and writes the results to an output file in the subdirectory called ts_results.dat.''' parfile = open("par.pck", "r") pars = pickle.load(parfile) pixelfile = open("pixel.pck", "r") pixels = pickle.load(pixelfile) pixel_coords = PixelCoords(tpl_file) if pars['statistic'] == 'UNBINNED': import UnbinnedAnalysis as UBAn obs = UBAn.UnbinnedObs(resolve_fits_files(pars['evfile']), resolve_fits_files(pars['scfile']), expMap='../' + pars['expmap'], expCube='../' + pars['expcube'], irfs=pars['irfs']) like = UBAn.UnbinnedAnalysis(obs, '../' + pars['srcmdl'], pars['optimizer']) elif pars['statistic'] == 'BINNED': import BinnedAnalysis as BAn obs = BAn.BinnedObs(srcMaps='../' + pars['srcmaps'], expCube='../' + pars['expcube'], binnedExpMap='../' + pars['bexpmap'], irfs=pars['irfs']) like = BAn.BinnedAnalysis(obs, '../' + pars['srcmdl'], pars['optimizer']) like.setFitTolType(pars['toltype']) like.optimize(0) loglike0 = like() test_src = getPointSource(like) target_name = 'testSource' test_src.setName(target_name) outfile = 'ts_results.dat' finished_pixels = [] if os.path.isfile(outfile): input = open(outfile, 'r') for line in input: tokens = line.strip().split() ij = int(tokens[0]), int(tokens[1]) finished_pixels.append(ij) input.close() output = open(outfile, 'a') for indx, i, j in pixels: if (i, j) in finished_pixels: continue ra, dec = pixel_coords(i, j) test_src.setDir(ra, dec, True, False) like.addSource(test_src) like.optimize(0) ts = -2 * (like() - loglike0) output.write("%3i %3i %.3f %.3f %.5f\n" % (i, j, ra, dec, ts)) output.flush() like.deleteSource(target_name) output.close()
poi_values=poi_values, poi_probs=poi_probs, poi_dlogL=poi_dlogL, flux_emin=emin, flux_emax=emax) return ul_flux, results if __name__ == "__main__": import sys srcName = "EMS0001" obs = UnbinnedAnalysis.UnbinnedObs('ft1_roi.fits', scFile='ft2.fits', expMap='expMap.fits', expCube='expCube.fits', irfs='P6_V9_DIFFUSE') #min_opt = 'InteractiveMinuit,MIN 0 $TOL,HESSE,.q' #pro_opt = 'InteractiveMinuit,SET STR 0,MIN 0 $TOL,.q' min_opt = 'MINUIT' pro_opt = None like = UnbinnedAnalysis.UnbinnedAnalysis(obs, 'model.xml', min_opt) src_spectrum = like[srcName].funcs['Spectrum'] par = src_spectrum.getParam("Index") if par: par.setFree(False) par.setValue(-2.0)
import sys from UnbinnedAnalysis import * from UpperLimits import UpperLimits filename, par_srcname, par_irfs = sys.argv[1: 4] par_emin = float(sys.argv[4]) par_emax = float(sys.argv[5]) file_eventFile, file_scFile, file_expMap, file_expCube, file_srcModel = sys.argv[6: 11] obs = UnbinnedObs(eventFile=file_eventFile, scFile=file_scFile, expMap=file_expMap, expCube=file_expCube, irfs=par_irfs) like = UnbinnedAnalysis(obs, srcModel=file_srcModel, optimizer='NewMinuit') like.fit(verbosity=0, covar=True) like.Ts(par_srcname) ul = UpperLimits(like) ul[par_srcname].compute(emin=par_emin, emax=par_emax) f = open(filename, 'w') print >> f, ul[par_srcname].results f.close()