def __init__(self,infile): self.infile = infile try : self.config = get_config(infile) except : config = ConfigObj(indent_type='\t') config['out'] = os.getcwd() self.config = get_config(config) try : ftmp=open(self.config['file']['xml'],'r') ftmp.close() except : os.system('touch '+self.config['file']['xml']) self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) self.window.connect("delete_event", self.delete) self.window.set_border_width(10) table = gtk.Table(3,6,False) self.window.add(table) self.notebloc = gtk.Notebook() self.notebloc.set_tab_pos(gtk.POS_LEFT) table.attach(self.notebloc, 0,6,0,1) self.notebloc.show() self._addMainOption() self._addFiles() self._addTargetSpace() self._addAnalysis() self._addEnergyTime() self._addSpectrum() self._addUL() self._addLC() # self._addFoldedLC() self._addAppFoldedLC() # self._addEbin() self._addTSMap() self._addfindsrcsrcprob() self._addPlot() self.notebloc.set_current_page(0) ReloadButton = gtk.Button("Reload conf file") ReloadButton.connect("clicked", self.reload, "") table.attach(ReloadButton, 1,2,1,2) ReloadButton.show() SaveButton = gtk.Button("Save file") SaveButton.connect("clicked", self.save, "") table.attach(SaveButton, 3,4,1,2) SaveButton.show() CloseButton = gtk.Button("Close") CloseButton.connect("clicked", self.delete) table.attach(CloseButton, 4,5,1,2) CloseButton.show() table.show() self.window.show()
def run(infile): from enrico import utils from enrico import energybin from enrico.config import get_config """Run an entire Fermi analysis (spectrum) by reading a config file""" config = get_config(infile) folder = config['out'] utils.create_dir(folder) FitRunner,Fit = GenAnalysisObjects(config) # create all the fit files and run gtlike FitRunner.PerformFit(Fit) Result = FitRunner.GetAndPrintResults(Fit)#Get and dump the target specific results utils.DumpResult(Result, config) #plot the SED and model map if possible and asked if config['Spectrum']['ResultPlots'] == 'yes' : from enrico.constants import SpectrumPath utils.create_dir("%s/%s/" %(config['out'],SpectrumPath)) if float(config['UpperLimit']['TSlimit']) < Fit.Ts(config['target']['name']): FitRunner.ComputeSED(Fit) outXml = utils._dump_xml(config) if config['Spectrum']['SummedLike'] != 'yes': # the possiblity of making the model map is checked inside the function FitRunner.ModelMap(outXml) # Make energy bins by running a *new* analysis Nbin = config['Ebin']['NumEnergyBins'] energybin.RunEbin(folder,Nbin,Fit,FitRunner) del(Result) del(FitRunner)
def run(infile): """Run an entire Fermi analysis (spectrum) by reading a config file""" config = get_config(infile) folder = config['out'] os.system('mkdir -p ' + folder) FitRunner,Fit = GenAnalysisObjects(config) # create all the fit files and run gtlike FitRunner.PerformFit(Fit) Result = FitRunner.GetAndPrintResults(Fit)#Get and dump the target specific results if config['verbose'] == 'yes' : utils.GetFluxes(Fit,FitRunner.obs.Emin,FitRunner.obs.Emax) #print the flux of all the sources utils.DumpResult(Result, config) #plot the SED and model map if possible and asked if config['Spectrum']['ResultPlots'] == 'yes' : from enrico.constants import SpectrumPath os.system("mkdir -p "+config['out'] + '/'+SpectrumPath+'/') if float(config['UpperLimit']['TSlimit']) < Fit.Ts(config['target']['name']): FitRunner.ComputeSED(Fit) outXml = utils._dump_xml(config) if config['Spectrum']['SummedLike'] != 'yes': # the possiblity of making the model map is checked inside the function FitRunner.ModelMap(outXml) # Make energy bins by running a *new* analysis Nbin = config['Ebin']['NumEnergyBins'] energybin.RunEbin(folder,Nbin,Fit,FitRunner)
def run(infile): """Run an entire Fermi analysis (spectrum) by reading a config file""" config = get_config(infile) folder = config['out'] os.system('mkdir -p ' + folder) FitRunner, Fit = GenAnalysisObjects(config) # create all the fit files and run gtlike FitRunner.PerformFit(Fit) Result = FitRunner.GetAndPrintResults( Fit) #Get and dump the target specific results if config['verbose'] == 'yes': utils.GetFluxes(Fit, FitRunner.obs.Emin, FitRunner.obs.Emax) #print the flux of all the sources utils.DumpResult(Result, config) #plot the SED and model map if possible and asked if config['Spectrum']['ResultPlots'] == 'yes': from enrico.constants import SpectrumPath os.system("mkdir -p " + config['out'] + '/' + SpectrumPath + '/') if float(config['UpperLimit']['TSlimit']) < Fit.Ts( config['target']['name']): FitRunner.ComputeSED(Fit) outXml = utils._dump_xml(config) if config['Spectrum'][ 'SummedLike'] != 'yes': # the possiblity of making the model map is checked inside the function FitRunner.ModelMap(outXml) # Make energy bins by running a *new* analysis Nbin = config['Ebin']['NumEnergyBins'] energybin.RunEbin(folder, Nbin, Fit, FitRunner)
def run(infile): from enrico import utils from enrico import energybin from enrico.config import get_config from enrico import Loggin mes = Loggin.Message() """Run an entire Fermi analysis (spectrum) by reading a config file""" config = get_config(infile) folder = config['out'] utils.create_dir(folder) FitRunner,Fit = GenAnalysisObjects(config) # create all the fit files and run gtlike FitRunner.PerformFit(Fit) #plot the SED and model map if possible and asked if float(config['UpperLimit']['TSlimit']) < Fit.Ts(config['target']['name']): if config['Spectrum']['ResultPlots'] == 'yes': from enrico.constants import SpectrumPath utils.create_dir("%s/%s/" %(config['out'],SpectrumPath)) sedresult = FitRunner.ComputeSED(Fit,dump=True) else: sedresult = FitRunner.ComputeSED(Fit,dump=False) # Update the energy scale to decorrelation energy mes.info('Setting the decorrelation energy as new Scale for the spectral parameters') spectrum = Fit[FitRunner.obs.srcname].funcs['Spectrum'] modeltype = spectrum.genericName() if Fit.model.srcs[FitRunner.obs.srcname].spectrum().genericName()=="PowerLaw": varscale = "Scale" if Fit.model.srcs[FitRunner.obs.srcname].spectrum().genericName()=="PowerLaw2": varscale = None elif Fit.model.srcs[FitRunner.obs.srcname].spectrum().genericName()=="PLSuperExpCutoff": varscale = "Scale" elif Fit.model.srcs[FitRunner.obs.srcname].spectrum().genericName()=="LogParabola": varscale = "Eb" elif Fit.model.srcs[FitRunner.obs.srcname].spectrum().genericName()=="BrokenPowerLaw": varscale = "Eb" if varscale is not None: spectrum.getParam(varscale).setValue(sedresult.decE) FitRunner.PerformFit(Fit) if config['Spectrum']['ResultPlots'] == 'yes' : outXml = utils._dump_xml(config) if config['Spectrum']['SummedLike'] != 'yes': # the possiblity of making the model map is checked inside the function FitRunner.ModelMap(outXml) #Get and dump the target specific results Result = FitRunner.GetAndPrintResults(Fit) utils.DumpResult(Result, config) # Make energy bins by running a *new* analysis Nbin = config['Ebin']['NumEnergyBins'] energybin.RunEbin(folder,Nbin,Fit,FitRunner) del(Result) del(FitRunner)
def run_analysis(self): import shutil from enrico import environ from enrico.config import get_config from enrico.appertureLC import AppLC from enrico.submit import call from enrico.constants import AppLCPath infile = self.configfile config = get_config(infile) self.remove_prev_dir() verbose("-> Running the analysis for %s" %(analysis.name)) # We will always try to run it in parallel, # either with python multiproc or with external torque pbs. if config['Submit'] == 'no': global fqueue fqueue.put(infile) else: enricodir = environ.DIRS.get('ENRICO_DIR') fermidir = environ.DIRS.get('FERMI_DIR') cmd = enricodir+"/enrico/appertureLC.py %s" %infile LCoutfolder = config['out']+"/"+AppLCPath os.system("mkdir -p "+LCoutfolder) prefix = LCoutfolder +"/"+ config['target']['name'] + "_AppertureLightCurve" scriptname = prefix+"_Script.sh" JobLog = prefix + "_Job.log" JobName = "LC_%s" %self.name call(cmd, enricodir, fermidir, scriptname, JobLog, JobName) verbose("--> Job sent successfully")
def AppLC(infile): '''Main function of the apperture photometrie Lightcurve script. Read the config file and run the analysis''' ROOT.gROOT.SetBatch(ROOT.kTRUE) #Batch mode enricodir = environ.DIRS.get('ENRICO_DIR') fermidir = environ.DIRS.get('FERMI_DIR') config = get_config(infile) folder = config['out'] #Create a subfolder name LightCurve LCoutfolder = folder + "/" + AppLCPath os.system("mkdir -p " + LCoutfolder) #Change the ROI to the desired radius in degree, legacy 1 deg. try: config['space']['rad'] = config['AppLC']['rad'] except NameError: config['space']['rad'] = 1 Nbins = config['AppLC']['NLCbin'] #Number of bins #Get The time bin dt = (config['time']['tmax'] - config['time']['tmin']) / Nbins #sec Obs = Observation(LCoutfolder, config, tag="") if config['AppLC']["FitsGeneration"] == "yes": _log('gtselect', 'Select data from library') #run gtselect Obs.FirstCut() Obs.SelectEvents() _log('gtmktime', 'Update the GTI and cut data based on ROI') #run gtdiffresp Obs.MkTime() #Binning from data or using a fix bin size if config['AppLC']['binsFromData'] == "no": _log('gtbin', 'bin the data into a light-curve using fixe time bin' ) #run gtbin print "Use a dt of %2.2e seconds" % (dt) Obs.GtLCbin(dt=dt) else: spfile = pyfits.open(Obs.eventfile) diff = spfile[1].data.field(9)[1:-1] - spfile[1].data.field(9)[:-2] dt = np.min( diff ) / 2. ##Compute the delta T as being the min delta t between 2 events divided by 2 timefile = LCoutfolder + "/Timebin.txt" MakeTimebinFile(Obs, timefile) _log('gtbindef', 'define de bins') #run gtbindef Obs.GtBinDef(timefile) _log('gtbin', 'bin the data into a light-curve using bins based on data' ) #run gtbin Obs.GtLCbin(dt=0) _log('gtexposure', 'compute the exposure') #run gtexposure Obs.GtExposure() #Get Some usefull value here. This allow PlotAppLC to be call independently Nbins = config['AppLC']['NLCbin'] #Number of bins #Plot the results and dump into ascii files PlotAppLC(Nbins, LCoutfolder, Obs.lcfile)
def __init__(self, config): super(LightCurve, self).__init__() Loggin.Message.__init__(self) ROOT.gROOT.SetBatch(ROOT.kTRUE) #Batch mode self.config = get_config(config) self.generalconfig = get_config(config) print(self.generalconfig) #Read the config self.srcname = self.config['target']['name'] #src name self.Tag = self.config['file']['tag'] self.tmin = self.config['time']['tmin'] self.tmax = self.config['time']['tmax'] self.submit = self.config['Submit'] # One point of the LC will be computed as a spectrum plot. # enrico_sed will be used # Do fits files will be generated self.config['Spectrum']['FitsGeneration'] = self.config['LightCurve'][ 'FitsGeneration'] #Froze the Spectral index at a value of self.config['LightCurve']['SpectralIndex'] (no effect if 0) self.config['Spectrum']['FrozenSpectralIndex'] = self.config[ 'LightCurve']['SpectralIndex'] #TS limit. Compute an UL if the TS is below TSLightCurve self.config['UpperLimit']['TSlimit'] = self.config['LightCurve'][ 'TSLightCurve'] self.folder = self.config['out'] #No plot, no bin in energy, no decE optimization, Normal UL self.config['Spectrum']['ResultPlots'] = 'no' self.config['Ebin']['NumEnergyBins'] = 0 self.config['energy']['decorrelation_energy'] = 'no' self.config['UpperLimit']['envelope'] = 'no' #No submition. Submission will be directly handle by this soft self.config['Submit'] = 'no' # self.config['verbose'] ='no' #Be quiet # Try to speed-up the analysis by reusing the evt file from the main analysis self._RecycleEvtCoarse() self.configfile = [ ] #All the config file in the disk are stored in a list
def __init__(self, config, parent_filename=""): super(LightCurve,self).__init__() Loggin.Message.__init__(self) self.parent_filename = os.path.abspath(parent_filename) self.config = get_config(config) self.generalconfig = get_config(config) print(self.generalconfig) #Read the config self.srcname = self.config['target']['name'] #src name self.Tag = self.config['file']['tag'] self.tmin = self.config['time']['tmin'] self.tmax = self.config['time']['tmax'] self.submit = self.config['Submit'] # One point of the LC will be computed as a spectrum plot. # enrico_sed will be used # Do fits files will be generated #self.config['target']['spectrum'] = 'PowerLaw' # simplify the spectrum self.config['Spectrum']['FitsGeneration'] = self.config['LightCurve']['FitsGeneration'] #Freeze the Spectral index at a value of self.config['LightCurve']['SpectralIndex'] (no effect if 0) self.config['Spectrum']['FrozenSpectralIndex'] = self.config['LightCurve']['SpectralIndex'] if (self.config['LightCurve']['SpectralIndex'] != 0): self.config['UpperLimit']['SpectralIndex'] = self.config['LightCurve']['SpectralIndex'] #TS limit. Compute an UL if the TS is below TSLightCurve self.config['UpperLimit']['TSlimit'] = self.config['LightCurve']['TSLightCurve'] self.folder = self.config['out'] # Do not create plots self.config['Spectrum']['ResultPlots'] = 'no' # no self.config['Spectrum']['ResultParentPlots'] = 'no' # no self.config['Ebin']['NumEnergyBins'] = 0 self.config['energy']['decorrelation_energy'] = 'yes' # no self.config['UpperLimit']['envelope'] = 'no' # Submission will be directly handle by this soft self.config['Submit'] = 'no' # self.config['verbose'] ='no' #Be quiet # Speed-up the analysis by reusing the evt file from the main analysis self._RecycleEvtCoarse() self.configfile = [] #All the config file in the disk are stored in a list
def AppLC(infile): '''Main function of the apperture photometrie Lightcurve script. Read the config file and run the analysis''' ROOT.gROOT.SetBatch(ROOT.kTRUE) #Batch mode enricodir = environ.DIRS.get('ENRICO_DIR') fermidir = environ.DIRS.get('FERMI_DIR') config = get_config(infile) folder = config['out'] #Create a subfolder name LightCurve LCoutfolder = folder+"/"+AppLCPath utils.mkdir_p(LCoutfolder) #Change the ROI to the desired radius in degree, legacy 1 deg. try: config['space']['rad'] = config['AppLC']['rad'] except NameError: config['space']['rad'] = 1 Nbins = config['AppLC']['NLCbin']#Number of bins #Get The time bin dt = (config['time']['tmax']-config['time']['tmin'])/Nbins #sec Obs = Observation(LCoutfolder, config, tag="") if config['AppLC']["FitsGeneration"] == "yes": _log('gtselect', 'Select data from library')#run gtselect Obs.FirstCut() Obs.SelectEvents() _log('gtmktime', 'Update the GTI and cut data based on ROI')#run gtdiffresp Obs.MkTime() #Binning from data or using a fix bin size if config['AppLC']['binsFromData'] == "no": _log('gtbin', 'bin the data into a light-curve using fixe time bin')#run gtbin print "Use a dt of %2.2e seconds"%(dt) Obs.GtLCbin(dt = dt) else: spfile=fits.open(Obs.eventfile) diff = spfile[1].data.field(9)[1:-1]-spfile[1].data.field(9)[:-2] dt = np.min(diff)/2. ##Compute the delta T as being the min delta t between 2 events divided by 2 timefile = LCoutfolder+"/Timebin.txt" MakeTimebinFile(Obs,timefile) _log('gtbindef', 'define de bins')#run gtbindef Obs.GtBinDef(timefile) _log('gtbin', 'bin the data into a light-curve using bins based on data')#run gtbin Obs.GtLCbin(dt = 0) _log('gtexposure', 'compute the exposure')#run gtexposure Obs.GtExposure() #Get Some usefull value here. This allow PlotAppLC to be call independently Nbins = config['AppLC']['NLCbin']#Number of bins #Plot the results and dump into ascii files PlotAppLC(Nbins,LCoutfolder,Obs.lcfile)
def __init__(self, config, parent_filename=""): super(LightCurve,self).__init__() Loggin.Message.__init__(self) self.parent_filename = os.path.abspath(parent_filename) self.config = get_config(config) self.generalconfig = get_config(config) print(self.generalconfig) #Read the config self.srcname = self.config['target']['name'] #src name self.Tag = self.config['file']['tag'] self.tmin = self.config['time']['tmin'] self.tmax = self.config['time']['tmax'] self.submit = self.config['Submit'] # One point of the LC will be computed as a spectrum plot. # enrico_sed will be used # Do fits files will be generated #self.config['target']['spectrum'] = 'PowerLaw' # simplify the spectrum self.config['Spectrum']['FitsGeneration'] = self.config['LightCurve']['FitsGeneration'] #Freeze the Spectral index at a value of self.config['LightCurve']['SpectralIndex'] (no effect if 0) self.config['Spectrum']['FrozenSpectralIndex'] = self.config['LightCurve']['SpectralIndex'] #TS limit. Compute an UL if the TS is below TSLightCurve self.config['UpperLimit']['TSlimit'] = self.config['LightCurve']['TSLightCurve'] self.folder = self.config['out'] # Do not create plots self.config['Spectrum']['ResultPlots'] = 'no' # no self.config['Spectrum']['ResultParentPlots'] = 'no' # no self.config['Ebin']['NumEnergyBins'] = 0 self.config['energy']['decorrelation_energy'] = 'yes' # no self.config['UpperLimit']['envelope'] = 'no' # Submission will be directly handle by this soft self.config['Submit'] = 'no' # self.config['verbose'] ='no' #Be quiet # Speed-up the analysis by reusing the evt file from the main analysis self._RecycleEvtCoarse() self.configfile = [] #All the config file in the disk are stored in a list
def RunEbin(folder,Nbin,Fit,FitRunner): if int(Nbin) > 0: configfiles = PrepareEbin(Fit, FitRunner) ind = 0 enricodir = environ.DIRS.get('ENRICO_DIR') fermidir = environ.DIRS.get('FERMI_DIR') for conf in configfiles: pathconf = folder + "/"+ EbinPath + str(Nbin) +"/" + conf Newconfig = get_config(pathconf) cmd = enricodir+"/enrico/RunGTlike.py "+pathconf if Newconfig['Submit'] == 'no' : #run directly os.system(cmd) else : #submit a job to a cluster prefix = Newconfig['out'] + "/"+ EbinPath + str(ind) scriptname = prefix + "_Script.sh" JobLog = prefix + "_Job.log" JobName = (Newconfig['target']['name'] + "_" + Newconfig['analysis']['likelihood'] + "_Ebin_" + str(ind) + "_" + Newconfig['file']['tag']) call(cmd, enricodir, fermidir, scriptname, JobLog, JobName)# submition ind+=1
def FindSrc(infile): config = get_config(infile) folder = config['out'] Obs = Observation(folder, config) utils._log('SUMMARY: ') Obs.printSum() FitRunner = FitMaker(Obs, config) if config["findsrc"]["FitsGeneration"]== "yes": config['analysis']['likelihood'] = 'unbinned' FitRunner.GenerateFits() FitRunner._log('gtfindsrc', 'Optimize source position') os.system("rm "+utils._dump_findsrcout(config)) Obs.FindSource() try: update_reg(config) except: pass
def __init__(self, infile): self.config = get_config(infile) self.folder = self.config['out'] os.system("mkdir -p "+self.folder+"/TestModel") convtype = self.config['analysis']['convtype'] if self.config['Spectrum']['SummedLike'] == 'yes': Obs1 = Observation(self.folder, self.config, convtype=0, tag="FRONT") Obs2 = Observation(self.folder, self.config, convtype=1, tag="BACK") FitRunnerfront = FitMaker(Obs1, self.config) FitRunnerback = FitMaker(Obs2, self.config) FitRunnerfront.CreateLikeObject() FitRunnerback.CreateLikeObject() self.Fit = SummedLikelihood.SummedLikelihood() else: Obs = Observation(self.folder, self.config, convtype, tag="") FitRunner = FitMaker(Obs, self.config)##Class self.Fit = FitRunner.CreateLikeObject() # Store the results in a dictionnary self.Results = {} self.Results["PowerLaw"] = 0 self.Results["LogParabola"] = 0 self.Results["PLSuperExpCutoff"] = 0
src.getSrcFuncs()['Spectrum'].getParam('Prefactor').setBounds(1e-5,1e5) src.getSrcFuncs()['Spectrum'].getParam('Prefactor').setScale(1e-9) src.getSrcFuncs()['Spectrum'].getParam('Index').setBounds(-5,0) src.getSrcFuncs()['Spectrum'].getParam('Scale').setValue(300) src.getSrcFuncs()['Spectrum'].getParam('Scale').setBounds(1e-5,1e5) return src if __name__ == '__main__': try: infile = sys.argv[1] except: print('FATAL: Config file not found.') sys.exit(1) from enrico.config import get_config config = get_config(infile) TSmap = TSMap(config,infile) if len(sys.argv)== 6 : if TSmap.config['TSMap']['method'] == 'row' : TSmap.FitOneRow(float(sys.argv[2]),int(sys.argv[4])) else : TSmap.FitOnePixel(float(sys.argv[2]),float(sys.argv[3]),int(sys.argv[4]),int(sys.argv[5])) else : print "Wrong number of arguments" sys.exit(1)
def _PlotLC(self,folded=False): self.info("Reading files produced by enrico") LcOutPath = self.LCfolder + self.config['target']['name'] #Result are stored into list. This allow to get rid of the bin which failled Time = [] TimeErr = [] Flux = [] FluxErr = [] # FluxErrChi2 = [] Index = [] IndexErr = [] Cutoff = [] CutoffErr = [] FluxForNpred = [] # FluxErrForNpred = [] Npred = [] Npred_detected_indices = [] TS = [] uplim = [] # Find name used for index parameter if ((self.config['target']['spectrum'] == 'PowerLaw' or self.config['target']['spectrum'] == 'PowerLaw2') and self.config['target']['redshift'] == 0): IndexName = 'Index' CutoffName = None elif (self.config['target']['spectrum'] == 'PLExpCutoff' or self.config['target']['spectrum'] == 'PLSuperExpCutoff'): IndexName = 'Index1' CutoffName = 'Cutoff' CutoffErrName = 'dCutoff' else: IndexName = 'alpha' CutoffName = None IndexErrName = 'd' + IndexName Nfail = 0 for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try : ResultDic = utils.ReadResult(CurConfig) if ResultDic == {}: raise(ValueError) except : self._errorReading("Fail reading config file",i) Nfail+=1 continue #Update the time and time error array Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.) TimeErr.append((ResultDic.get("tmax")-ResultDic.get("tmin"))/2.) #Check is an ul have been computed. The error is set to zero for the TGraph. if ResultDic.has_key('Ulvalue') : uplim.append(1) Flux.append(ResultDic.get("Ulvalue")) # FluxErr.append(0) # FluxErrChi2.append(ResultDic.get("dFlux")) # Index.append(ResultDic.get(IndexName)) # IndexErr.append(0) else : uplim.append(0) Flux.append(ResultDic.get("Flux")) FluxErr.append(ResultDic.get("dFlux")) # FluxErrChi2.append(ResultDic.get("dFlux")) Index.append(ResultDic.get(IndexName)) IndexErr.append(ResultDic.get(IndexErrName)) # if CutoffName is not None: # Cutoff.append(ResultDic.get(CutoffName)) # CutoffErr.append(ResultDic.get(CutoffErrName)) # FluxErrForNpred.append(ResultDic.get("dFlux")) FluxForNpred.append(ResultDic.get("Flux")) #Get the Npred and TS values Npred.append(ResultDic.get("Npred")) TS.append(ResultDic.get("TS")) if (CurConfig['LightCurve']['TSLightCurve']<float(ResultDic.get("TS"))): Npred_detected_indices.append(i-Nfail) # #change the list into np array # TS = np.array(TS) Npred = np.asarray(Npred) Npred_detected = np.asarray(Npred[Npred_detected_indices]) Time = np.asarray(Time) TimeErr = np.asarray(TimeErr) Flux = np.asarray(Flux) FluxErr = np.asarray(FluxErr) # Index = np.array(Index) # IndexErr = np.array(IndexErr) # Cutoff = np.array(Cutoff) # CutoffErr = np.array(CutoffErr) FluxForNpred = np.asarray(FluxForNpred) # FluxErrForNpred = np.array(FluxErrForNpred) uplim = np.asarray(uplim,dtype=bool) #Plots the diagnostic plots is asked # Plots are : Npred vs flux # TS vs Time if self.config['LightCurve']['DiagnosticPlots'] == 'yes' and len(Npred)>0: #plot Npred vs flux plt.figure() NdN = np.asarray(Npred) /np.sqrt(Npred) FdF = np.asarray(FluxForNpred) / (np.asarray(FluxErr) + 1e-20) plt.errorbar(NdN, FdF,fmt='+',color='black') if len(Npred_detected)>2: NdN = np.asarray(Npred_detected) /np.sqrt(Npred_detected) FdF = np.asarray(FluxForNpred[Npred_detected_indices]) / (np.asarray(FluxErr[Npred_detected_indices]) + 1e-20) plt.errorbar(NdN, FdF,fmt='+',color='red') popt,_ = scipy.optimize.curve_fit(pol1, NdN, FdF, p0=[0,1])#, sigma=dydata) for i in xrange(len(FluxForNpred)): if FluxForNpred[i]/FluxErr[i]>2*pol1(sqrt(Npred[i]),popt[0],popt[1]): self._errorReading("problem in errors calculation for",i) print "Flux +/- error = ",FluxForNpred[i]," +/- ",FluxErr[i] print "V(Npred) = ",sqrt(Npred[i]) print plt.plot(np.array([0,max(NdN)]),pol1(np.array([0,max(NdN)]),popt[0],popt[1]),'--',color='black') plt.xlabel(r"${\rm Npred/\sqrt{Npred}}$") plt.ylabel(r"${\rm Flux/\Delta Flux}$") plt.savefig(LcOutPath+"_Npred.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) else : print "No Npred Plot produced" #plot TS vs Time plt.figure() plt.xlabel(r"Time (s)") plt.ylabel(r"Test Statistic") plt.errorbar(x=Time,y=TS,xerr=TimeErr,fmt='+',color='black',ls='None') plt.ylim(ymin=min(TS)*0.8,ymax=max(TS)*1.2) plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)),xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([float(round(k,5)) for k in ax.get_yticks()*10**(-offset_factor)]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() + r" [${\times 10^{%d}}$]" %offset_factor) # Secondary axis with MJD mjdaxis = ax.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False)) plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 ) plt.tight_layout() plt.savefig(LcOutPath+"_TS.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) # Plot the LC itself. This function return a TH2F for a nice plot # a TGraph and a list of TArrow for the ULs # if folded: # phase = np.linspace(0,1,self.Nbin+1) # Time = (phase[1:]+phase[:-1])/2. # TimeErr = (phase[1:]-phase[:-1])/2. # gTHLC,TgrLC,ArrowLC = plotting.PlotFoldedLC(Time,TimeErr,Flux,FluxErr) # gTHIndex,TgrIndex,ArrowIndex = plotting.PlotFoldedLC(Time,TimeErr,Index,IndexErr) # if CutoffName is not None: # gTHCutoff,TgrCutoff,ArrowCutoff = plotting.PlotFoldedLC(Time,TimeErr,Cutoff,CutoffErr) # else : # gTHLC,TgrLC,ArrowLC = plotting.PlotLC(Time,TimeErr,Flux,FluxErr) # gTHIndex,TgrIndex,ArrowIndex = plotting.PlotLC(Time,TimeErr,Index,IndexErr) # if CutoffName is not None: # gTHCutoff,TgrCutoff,ArrowCutoff = plotting.PlotFoldedLC(Time,TimeErr,Cutoff,CutoffErr) # xmin = min(Time) - max(TimeErr) * 10 # xmax = max(Time) + max(TimeErr) * 10 # ymin = min(Flux) - max(FluxErr) * 1.3 # ymax = max(Flux) + max(FluxErr) * 1.3 plt.figure() plt.xlabel(r"Time (s)") plt.ylabel(r"${\rm Flux\ (photon\ cm^{-2}\ s^{-1})}$") # plt.ylim(ymin=ymin,ymax=ymax) # plt.xlim(xmin=xmin,xmax=xmax) #plt.errorbar(Time,Flux,xerr=TimeErr,yerr=FluxErr,i # fmt='o',color='black',ls='None',uplims=uplim) plot_errorbar_withuls(Time,TimeErr,TimeErr,Flux,FluxErr,FluxErr, uplim,bblocks=True) plt.ylim(ymin=max(plt.ylim()[0],np.percentile(Flux[~uplim],1)*0.1), ymax=min(plt.ylim()[1],np.percentile(Flux[~uplim],99)*2.0)) plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)), xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([float(round(k,5)) \ for k in ax.get_yticks()*10**(-offset_factor)]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\ r" [${\times 10^{%d}}$]" %offset_factor) # Secondary axis with MJD mjdaxis = ax.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False)) plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 ) plt.tight_layout() plt.savefig(LcOutPath+"_LC.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) if self.config["LightCurve"]["SpectralIndex"] == 0 : plt.figure() plt.xlabel(r"Time (s)") plt.ylabel(r"${\rm Index}$") Index = np.asarray(Index) IndexErr = np.asarray(IndexErr) uplimIndex = uplim #+ Index<0.55 plot_errorbar_withuls(Time[~uplimIndex], TimeErr[~uplimIndex], TimeErr[~uplimIndex], Index[~uplimIndex], IndexErr[~uplimIndex], IndexErr[~uplimIndex], uplimIndex[~uplimIndex], bblocks=True) plt.ylim(ymin=max(plt.ylim()[0],np.percentile(Index[~uplimIndex],1)*0.1), ymax=min(plt.ylim()[1],np.percentile(Index[~uplimIndex],99)*2.0)) plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)), xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([float(round(k,5)) \ for k in ax.get_yticks()*10**(-offset_factor)]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\ r" [${\times 10^{%d}}$]" %offset_factor) # Secondary axis with MJD mjdaxis = ax.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False)) plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 ) plt.tight_layout() plt.savefig(LcOutPath+"_Index.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) # compute Fvar and probability of being cst self.info("Flux vs Time: infos") self.FitWithCst(Time,Flux,FluxErr) self.Fvar(Flux,FluxErr) # ### plot and save the Index LC # CanvIndex = ROOT.TCanvas() # gTHIndex.Draw() # TgrIndex.Draw('zP') # #plot the ul as arrow # for i in xrange(len(ArrowIndex)): # ArrowIndex[i].Draw() # #Save the canvas in the LightCurve subfolder # if self.config["LightCurve"]["SpectralIndex"] == 0 : # self.info("Index vs Time") # self.FitWithCst(Time,Index,IndexErr) # CanvIndex.Print(LcOutPath+'_Index.png') # CanvIndex.Print(LcOutPath+'_Index.eps') # CanvIndex.Print(LcOutPath+'_Index.C') #Dump into ascii lcfilename = LcOutPath+"_results.dat" self.info("Write to Ascii file : "+lcfilename) WriteToAscii(Time,TimeErr,Flux,FluxErr,Index,IndexErr, Cutoff,CutoffErr,TS,Npred,lcfilename) if self.config["LightCurve"]['ComputeVarIndex'] == 'yes': self.VariabilityIndex()
def PlotDataPoints(config,pars): """Collect the data points/UL and generate a TGraph for the points and a list of TArrow for the UL. All is SED format""" #Preparation + declaration of arrays arrows = [] NEbin = int(config['Ebin']['NumEnergyBins']) lEmax = np.log10(float(config['energy']['emax'])) lEmin = np.log10(float(config['energy']['emin'])) Epoint = np.zeros(NEbin) EpointErrp = np.zeros(NEbin) EpointErrm = np.zeros(NEbin) Fluxpoint = np.zeros(NEbin) FluxpointErrp = np.zeros(NEbin) FluxpointErrm = np.zeros(NEbin) ener = np.logspace(lEmin, lEmax, NEbin + 1) mes = Loggin.Message() mes.info("Save Ebin results in ",pars.PlotName+".Ebin.dat") dumpfile = open(pars.PlotName+".Ebin.dat",'w') dumpfile.write("# Energy (MeV)\tEmin (MeV)\tEmax (MeV)\tE**2. dN/dE (erg.cm-2s-1)\tGaussianError\tMinosNegativeError\tMinosPositiveError\n") from enrico.constants import EbinPath for i in xrange(NEbin):#Loop over the energy bins E = int(pow(10, (np.log10(ener[i + 1]) + np.log10(ener[i])) / 2)) filename = (config['out'] + '/'+EbinPath+str(NEbin)+'/' + config['target']['name'] + "_" + str(i) + ".conf") try:#read the config file of each data points CurConf = get_config(filename) mes.info("Reading "+filename) results = utils.ReadResult(CurConf) except: mes.warning("cannot read the Results of energy "+ str(E)) continue #fill the energy arrays Epoint[i] = E EpointErrm[i] = E - results.get("Emin") EpointErrp[i] = results.get("Emax") - E dprefactor = 0 #Compute the flux or the UL (in SED format) if results.has_key('Ulvalue'): PrefUl = utils.Prefactor(results.get("Ulvalue"),results.get("Index"), results.get("Emin"),results.get("Emax"),E) Fluxpoint[i] = MEV_TO_ERG * PrefUl * Epoint[i] ** 2 arrows.append(ROOT.TArrow(Epoint[i], Fluxpoint[i], Epoint[i], Fluxpoint[i] * 0.5, 0.02, "|>")) else : #Not an UL : compute points + errors Fluxpoint[i] = MEV_TO_ERG * results.get("Prefactor") * Epoint[i] ** 2 dprefactor = results.get("dPrefactor") try: down = abs(results.get("dPrefactor-")) up = results.get("dPrefactor+") if down==0 or up ==0 : mes.error("cannot get Error value") FluxpointErrp[i] = MEV_TO_ERG * up * Epoint[i] ** 2 FluxpointErrm[i] = MEV_TO_ERG * down * Epoint[i] ** 2 except: try: err = MEV_TO_ERG * dprefactor * Epoint[i] ** 2 FluxpointErrp[i] = err FluxpointErrm[i] = err except: pass mes.info("Energy bins results") print "Energy = ",Epoint[i] print "E**2. dN/dE = ",Fluxpoint[i]," + ",FluxpointErrp[i]," - ",FluxpointErrm[i] #Save the data point in a ascii file dumpfile.write(str(Epoint[i])+"\t"+str(results.get("Emin"))+"\t"+str( results.get("Emax"))+"\t"+str(Fluxpoint[i])+"\t"+str( MEV_TO_ERG * dprefactor * Epoint[i] ** 2)+"\t"+str(FluxpointErrm[i])+"\t"+str(FluxpointErrp[i])+"\n") #create a TGraph for the points tgpoint = ROOT.TGraphAsymmErrors(NEbin, Epoint, Fluxpoint, EpointErrm, EpointErrp, FluxpointErrm, FluxpointErrp) tgpoint.SetMarkerStyle(20) dumpfile.close() return tgpoint, arrows
def _PlotLC(self, folded=False): root_style.RootStyle() #Nice plot style print "Reading files produced by enrico" LcOutPath = self.LCfolder + self.config['target']['name'] #Result are stored into list. This allow to get rid of the bin which failled Time = [] TimeErr = [] Flux = [] FluxErr = [] FluxForNpred = [] FluxErrForNpred = [] Npred = [] Npred_detected_indices = [] TS = [] for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try: ResultDic = utils.ReadResult(CurConfig) except: self._errorReading("fail reading config file", i) continue #Update the time and time error array Time.append((ResultDic.get("tmax") + ResultDic.get("tmin")) / 2.) TimeErr.append( (ResultDic.get("tmax") - ResultDic.get("tmin")) / 2.) #Check is an ul have been computed. The error is set to zero for the TGraph. if ResultDic.has_key('Ulvalue'): Flux.append(ResultDic.get("Ulvalue")) FluxErr.append(0) else: Flux.append(ResultDic.get("Flux")) FluxErr.append(ResultDic.get("dFlux")) FluxErrForNpred.append(ResultDic.get("dFlux")) FluxForNpred.append(ResultDic.get("Flux")) #Get the Npred and TS values Npred.append(ResultDic.get("Npred")) TS.append(ResultDic.get("TS")) if (CurConfig['LightCurve']['TSLightCurve'] < float( ResultDic.get("TS"))): Npred_detected_indices.append(i) #change the list into np array TS = np.array(TS) Npred = np.array(Npred) Npred_detected = Npred[Npred_detected_indices] Time = np.array(Time) TimeErr = np.array(TimeErr) Flux = np.array(Flux) FluxErr = np.array(FluxErr) FluxForNpred = np.array(FluxForNpred) FluxErrForNpred = np.array(FluxErrForNpred) fittedFunc = self.CheckNpred( Npred, FluxForNpred, FluxErrForNpred, Npred_detected_indices) #check the errors calculation #Plots the diagnostic plots is asked # Plots are : Npred vs flux # TS vs Time if self.config['LightCurve']['DiagnosticPlots'] == 'yes': gTHNpred, TgrNpred = plotting.PlotNpred(Npred, FluxForNpred, FluxErrForNpred) CanvNpred = _GetCanvas() gTHNpred.Draw() TgrNpred.Draw('zP') _, TgrNpred_detected = plotting.PlotNpred( Npred_detected, Flux[Npred_detected_indices], FluxErrForNpred[Npred_detected_indices]) TgrNpred_detected.SetLineColor(2) TgrNpred_detected.SetMarkerColor(2) TgrNpred_detected.Draw('zP') fittedFunc.Draw("SAME") CanvNpred.Print(LcOutPath + "_Npred.eps") CanvNpred.Print(LcOutPath + "_Npred.C") gTHTS, TgrTS = plotting.PlotTS(Time, TimeErr, TS) CanvTS = _GetCanvas() gTHTS.Draw() TgrTS.Draw('zP') CanvTS.Print(LcOutPath + '_TS.eps') CanvTS.Print(LcOutPath + '_TS.C') # Plot the LC itself. This function return a TH2F for a nice plot # a TGraph and a list of TArrow for the ULs if folded: phase = np.linspace(0, 1, self.Nbin + 1) Time = (phase[1:] + phase[:-1]) / 2. TimeErr = (phase[1:] - phase[:-1]) / 2. gTHLC, TgrLC, ArrowLC = plotting.PlotFoldedLC( Time, TimeErr, Flux, FluxErr) else: gTHLC, TgrLC, ArrowLC = plotting.PlotLC(Time, TimeErr, Flux, FluxErr) CanvLC = ROOT.TCanvas() gTHLC.Draw() TgrLC.Draw('zP') #plot the ul as arrow for i in xrange(len(ArrowLC)): ArrowLC[i].Draw() # compute Fvar and probability of being cst self.FitWithCst(TgrLC) self.Fvar(Flux, FluxErr) #Save the canvas in the LightCurve subfolder CanvLC.Print(LcOutPath + '_LC.eps') CanvLC.Print(LcOutPath + '_LC.C') #Dump into ascii lcfilename = LcOutPath + "_results.dat" print "Write to Ascii file : ", lcfilename WriteToAscii(Time, TimeErr, Flux, FluxErr, TS, Npred, lcfilename) if self.config["LightCurve"]['ComputeVarIndex'] == 'yes': self.VariabilityIndex()
def PlotLC(self): '''Plot a lightcurve which have been generated previously''' root_style.RootStyle()#Nice plot style print "Reading files produced by enrico" LcOutPath = self.LCfolder + self.config['target']['name'] #Result are stored into list. This allow to get rid of the bin which failled Time = [] TimeErr = [] Flux = [] FluxErr = [] FluxForNpred = [] FluxErrForNpred = [] Npred = [] Npred_detected_indices = [] TS = [] self.PrepareLC()#Get the config file for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try : ResultDic = utils.ReadResult(CurConfig) except : self._errorReading("fail reading config file",i) continue #Update the time and time error array Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.) TimeErr.append((ResultDic.get("tmax")-ResultDic.get("tmin"))/2.) #Check is an ul have been computed. The error is set to zero for the TGraph. if ResultDic.has_key('Ulvalue') : Flux.append(ResultDic.get("Ulvalue")) FluxErr.append(0) else : Flux.append(ResultDic.get("Flux")) FluxErr.append(ResultDic.get("dFlux")) FluxErrForNpred.append(ResultDic.get("dFlux")) FluxForNpred.append(ResultDic.get("Flux")) #Get the Npred and TS values Npred.append(ResultDic.get("Npred")) TS.append(ResultDic.get("TS")) if (CurConfig['LightCurve']['TSLightCurve']<float(ResultDic.get("TS"))): Npred_detected_indices.append(i) #change the list into np array TS = np.array(TS) Npred = np.array(Npred) Npred_detected = Npred[Npred_detected_indices] Time = np.array(Time) TimeErr = np.array(TimeErr) Flux = np.array(Flux) FluxErr = np.array(FluxErr) FluxForNpred = np.array(FluxForNpred) FluxErrForNpred = np.array(FluxErrForNpred) fittedFunc = self.CheckNpred(Npred,FluxForNpred,FluxErrForNpred,Npred_detected_indices)#check the errors calculation #Plots the diagnostic plots is asked # Plots are : Npred vs flux # TS vs Time if self.config['LightCurve']['DiagnosticPlots'] == 'yes': gTHNpred,TgrNpred = plotting.PlotNpred(Npred,FluxForNpred,FluxErrForNpred) CanvNpred = _GetCanvas() gTHNpred.Draw() TgrNpred.Draw('zP') _,TgrNpred_detected = plotting.PlotNpred(Npred_detected,Flux[Npred_detected_indices],FluxErrForNpred[Npred_detected_indices]) TgrNpred_detected.SetLineColor(2) TgrNpred_detected.SetMarkerColor(2) TgrNpred_detected.Draw('zP') fittedFunc.Draw("SAME") CanvNpred.Print(LcOutPath+"_Npred.eps") CanvNpred.Print(LcOutPath+"_Npred.C") gTHTS,TgrTS = plotting.PlotTS(Time,TimeErr,TS) CanvTS = _GetCanvas() gTHTS.Draw() TgrTS.Draw('zP') CanvTS.Print(LcOutPath+'_TS.eps') CanvTS.Print(LcOutPath+'_TS.C') # Plot the LC itself. This function return a TH2F for a nice plot # a TGraph and a list of TArrow for the ULs gTHLC,TgrLC,ArrowLC = plotting.PlotLC(Time,TimeErr,Flux,FluxErr) CanvLC = ROOT.TCanvas() gTHLC.Draw() TgrLC.Draw('zP') #plot the ul as arrow for i in xrange(len(ArrowLC)): ArrowLC[i].Draw() # compute Fvar and probability of being cst self.FitWithCst(TgrLC) self.Fvar(Flux,FluxErr) #Save the canvas in the LightCurve subfolder CanvLC.Print(LcOutPath+'_LC.eps') CanvLC.Print(LcOutPath+'_LC.C') #Dump into ascii lcfilename = LcOutPath+"_results.dat" print "Write to Ascii file : ",lcfilename WriteToAscii(Time,TimeErr,Flux,FluxErr,TS,Npred,lcfilename) if self.config["LightCurve"]['ComputeVarIndex'] == 'yes': self.VariabilityIndex()
def VariabilityIndex(self): """Compute the variability index as in the 2FLG catalogue. (see Nolan et al, 2012)""" LcOutPath = self.LCfolder + self.config['target']['name'] utils._log('Computing Variability index ') self.config['Spectrum']['FitsGeneration'] = 'no' # ValueDC = self.GetDCValue() ResultDicDC = utils.ReadResult(self.config) LogL1 = [] LogL0 = [] Time = [] for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try: ResultDic = utils.ReadResult(CurConfig) except: self._errorReading("fail reading the config file ", i) # print "WARNING : fail reading the config file : ",CurConfig # print "Job Number : ",i # print "Please have a look at this job log file" continue # LogL1.append(ResultDic.get("log_like")) #Update the time and time error array Time.append((ResultDic.get("tmax") + ResultDic.get("tmin")) / 2.) ############################################################## # Compute the loglike value using the DC flux or prefactor ############################################################## # Create one obs instance CurConfig['Spectrum']['FitsGeneration'] = 'no' _, Fit = GenAnalysisObjects(CurConfig, verbose=0) #be quiet Fit.ftol = float(self.config['fitting']['ftol']) #Spectral index management! self.info("Spectral index frozen to a value of 2") utils.FreezeParams(Fit, self.srcname, 'Index', -2) LogL1.append( -Fit.fit(0, optimizer=CurConfig['fitting']['optimizer'])) Model_type = Fit.model.srcs[self.srcname].spectrum().genericName() if (Model_type == 'PowerLaw'): utils.FreezeParams(Fit, self.srcname, 'Prefactor', utils.fluxNorm(ResultDicDC['Prefactor'])) if (Model_type == 'PowerLaw2'): utils.FreezeParams(Fit, self.srcname, 'Integral', utils.fluxNorm(ResultDicDC['Integral'])) LogL0.append( -Fit.fit(0, optimizer=CurConfig['fitting']['optimizer'])) del Fit #Clean memory Can = _GetCanvas() TgrDC = ROOT.TGraph(len(Time), np.array(Time), np.array(LogL0)) TgrDC.Draw("ALP*") TgrDC = ROOT.TGraph(len(Time), np.array(Time), np.array(LogL0)) TgrDC.SetMarkerColor(2) TgrDC.Draw("PL*") #Save the canvas in the LightCurve subfolder Can.Print(LcOutPath + '_VarIndex.eps') Can.Print(LcOutPath + '_VarIndex.C') self.info("Variability index calculation") print "\t TSvar = ", 2 * (sum(LogL1) - sum(LogL0)) print "\t NDF = ", len(LogL0) - 1 print "\t Chi2 prob = ", ROOT.TMath.Prob(2 * (sum(LogL1) - sum(LogL0)), len(LogL0) - 1) print
def _PlotLC(self,folded=False): self.info("Reading files produced by enrico") LcOutPath = self.LCfolder + self.config['target']['name'] #Result are stored into list. This allow to get rid of the bin which failled Time = [] TimeErr = [] Flux = [] FluxErr = [] # FluxErrChi2 = [] Index = [] IndexErr = [] Cutoff = [] CutoffErr = [] FluxForNpred = [] # FluxErrForNpred = [] Npred = [] Npred_detected_indices = [] TS = [] uplim = [] # Find name used for index parameter if (self.config['target']['spectrum'] == 'PowerLaw' or self.config['target']['spectrum'] == 'PowerLaw2'): IndexName = 'Index' CutoffName = None elif (self.config['target']['spectrum'] == 'PLExpCutoff' or self.config['target']['spectrum'] == 'PLSuperExpCutoff'): IndexName = 'Index1' CutoffName = 'Cutoff' CutoffErrName = 'dCutoff' else: IndexName = 'alpha' CutoffName = None IndexErrName = 'd' + IndexName Nfail = 0 for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try : ResultDic = utils.ReadResult(CurConfig) if ResultDic == {}: raise(ValueError) except : self._errorReading("Fail reading config file",i) Nfail+=1 continue #Update the time and time error array Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.) TimeErr.append((ResultDic.get("tmax")-ResultDic.get("tmin"))/2.) #Check is an ul have been computed. The error is set to zero for the TGraph. if ResultDic.has_key('Ulvalue') : uplim.append(1) Flux.append(ResultDic.get("Ulvalue")) # FluxErr.append(0) # FluxErrChi2.append(ResultDic.get("dFlux")) # Index.append(ResultDic.get(IndexName)) # IndexErr.append(0) else : uplim.append(0) Flux.append(ResultDic.get("Flux")) FluxErr.append(ResultDic.get("dFlux")) # FluxErrChi2.append(ResultDic.get("dFlux")) Index.append(ResultDic.get(IndexName)) IndexErr.append(ResultDic.get(IndexErrName)) # if CutoffName is not None: # Cutoff.append(ResultDic.get(CutoffName)) # CutoffErr.append(ResultDic.get(CutoffErrName)) # FluxErrForNpred.append(ResultDic.get("dFlux")) FluxForNpred.append(ResultDic.get("Flux")) #Get the Npred and TS values Npred.append(ResultDic.get("Npred")) TS.append(ResultDic.get("TS")) if (CurConfig['LightCurve']['TSLightCurve']<float(ResultDic.get("TS"))): Npred_detected_indices.append(i-Nfail) # #change the list into np array # TS = np.array(TS) Npred = np.asarray(Npred) Npred_detected = np.asarray(Npred[Npred_detected_indices]) Time = np.asarray(Time) TimeErr = np.asarray(TimeErr) Flux = np.asarray(Flux) FluxErr = np.asarray(FluxErr) # Index = np.array(Index) # IndexErr = np.array(IndexErr) # Cutoff = np.array(Cutoff) # CutoffErr = np.array(CutoffErr) FluxForNpred = np.asarray(FluxForNpred) # FluxErrForNpred = np.array(FluxErrForNpred) uplim = np.asarray(uplim,dtype=bool) #Plots the diagnostic plots is asked # Plots are : Npred vs flux # TS vs Time if self.config['LightCurve']['DiagnosticPlots'] == 'yes' and len(Npred)>0: #plot Npred vs flux plt.figure() NdN = np.asarray(Npred) /np.sqrt(Npred) FdF = np.asarray(FluxForNpred) / (np.asarray(FluxErr) + 1e-20) plt.errorbar(NdN, FdF,fmt='+',color='black') if len(Npred_detected)>0: NdN = np.asarray(Npred_detected) /np.sqrt(Npred_detected) FdF = np.asarray(FluxForNpred[Npred_detected_indices]) / (np.asarray(FluxErr[Npred_detected_indices]) + 1e-20) plt.errorbar(NdN, FdF,fmt='+',color='red') popt,_ = scipy.optimize.curve_fit(pol1, NdN, FdF, p0=[0,1])#, sigma=dydata) for i in xrange(len(FluxForNpred)): if FluxForNpred[i]/FluxErr[i]>2*pol1(sqrt(Npred[i]),popt[0],popt[1]): self._errorReading("problem in errors calculation for",i) print "Flux +/- error = ",FluxForNpred[i]," +/- ",FluxErr[i] print "V(Npred) = ",sqrt(Npred[i]) print plt.plot(np.array([0,max(NdN)]),pol1(np.array([0,max(NdN)]),popt[0],popt[1]),'--',color='black') plt.xlabel(r"${\rm Npred/\sqrt{Npred}}$") plt.ylabel(r"${\rm Flux/\Delta Flux}$") plt.savefig(LcOutPath+"_Npred.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) else : print "No Npred Plot produced" #plot TS vs Time plt.figure() plt.xlabel(r"Time (s)") plt.ylabel(r"Test Statistic") plt.errorbar(x=Time,y=TS,xerr=TimeErr,fmt='+',color='black',ls='None') plt.ylim(ymin=min(TS)*0.8,ymax=max(TS)*1.2) plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)),xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([float(round(k,5)) for k in ax.get_yticks()*10**(-offset_factor)]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() + r" [${\times 10^{%d}}$]" %offset_factor) # Secondary axis with MJD mjdaxis = plt.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") plt.tight_layout() plt.savefig(LcOutPath+"_TS.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) # Plot the LC itself. This function return a TH2F for a nice plot # a TGraph and a list of TArrow for the ULs # if folded: # phase = np.linspace(0,1,self.Nbin+1) # Time = (phase[1:]+phase[:-1])/2. # TimeErr = (phase[1:]-phase[:-1])/2. # gTHLC,TgrLC,ArrowLC = plotting.PlotFoldedLC(Time,TimeErr,Flux,FluxErr) # gTHIndex,TgrIndex,ArrowIndex = plotting.PlotFoldedLC(Time,TimeErr,Index,IndexErr) # if CutoffName is not None: # gTHCutoff,TgrCutoff,ArrowCutoff = plotting.PlotFoldedLC(Time,TimeErr,Cutoff,CutoffErr) # else : # gTHLC,TgrLC,ArrowLC = plotting.PlotLC(Time,TimeErr,Flux,FluxErr) # gTHIndex,TgrIndex,ArrowIndex = plotting.PlotLC(Time,TimeErr,Index,IndexErr) # if CutoffName is not None: # gTHCutoff,TgrCutoff,ArrowCutoff = plotting.PlotFoldedLC(Time,TimeErr,Cutoff,CutoffErr) # xmin = min(Time) - max(TimeErr) * 10 # xmax = max(Time) + max(TimeErr) * 10 # ymin = min(Flux) - max(FluxErr) * 1.3 # ymax = max(Flux) + max(FluxErr) * 1.3 plt.figure() plt.xlabel(r"Time (s)") plt.ylabel(r"${\rm Flux\ (photon\ cm^{-2}\ s^{-1})}$") # plt.ylim(ymin=ymin,ymax=ymax) # plt.xlim(xmin=xmin,xmax=xmax) #plt.errorbar(Time,Flux,xerr=TimeErr,yerr=FluxErr,fmt='o',color='black',ls='None',uplims=uplim) plot_errorbar_withuls(Time,TimeErr,TimeErr,Flux,FluxErr,FluxErr,uplim,bblocks=True) plt.ylim(ymin=max(plt.ylim()[0],np.percentile(Flux[~uplim],1)*0.1), ymax=min(plt.ylim()[1],np.percentile(Flux[~uplim],99)*2.0)) plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)), xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([float(round(k,5)) for k in ax.get_yticks()*10**(-offset_factor)]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() + r" [${\times 10^{%d}}$]" %offset_factor) # Secondary axis with MJD mjdaxis = plt.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") plt.tight_layout() plt.savefig(LcOutPath+"_LC.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) if self.config["LightCurve"]["SpectralIndex"] == 0 : plt.figure() plt.xlabel(r"Time (s)") plt.ylabel(r"${\rm Index}$") Index = np.asarray(Index) IndexErr = np.asarray(IndexErr) uplimIndex = uplim + Index<0.55 plot_errorbar_withuls(Time[~uplimIndex],TimeErr[~uplimIndex],TimeErr[~uplimIndex], Index[~uplimIndex],IndexErr[~uplimIndex],IndexErr[~uplimIndex], uplimIndex[~uplimIndex],bblocks=True) plt.ylim(ymin=max(plt.ylim()[0],np.percentile(Index[~uplimIndex],1)*0.1),ymax=min(plt.ylim()[1],np.percentile(Index[~uplimIndex],99)*2.0)) plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)),xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time))) plt.savefig(LcOutPath+"_Index.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) # compute Fvar and probability of being cst self.info("Flux vs Time: infos") self.FitWithCst(Time,Flux,FluxErr) self.Fvar(Flux,FluxErr) # ### plot and save the Index LC # CanvIndex = ROOT.TCanvas() # gTHIndex.Draw() # TgrIndex.Draw('zP') # #plot the ul as arrow # for i in xrange(len(ArrowIndex)): # ArrowIndex[i].Draw() # #Save the canvas in the LightCurve subfolder # if self.config["LightCurve"]["SpectralIndex"] == 0 : # self.info("Index vs Time") # self.FitWithCst(Time,Index,IndexErr) # CanvIndex.Print(LcOutPath+'_Index.png') # CanvIndex.Print(LcOutPath+'_Index.eps') # CanvIndex.Print(LcOutPath+'_Index.C') #Dump into ascii lcfilename = LcOutPath+"_results.dat" self.info("Write to Ascii file : "+lcfilename) WriteToAscii(Time,TimeErr,Flux,FluxErr,Index,IndexErr,Cutoff,CutoffErr,TS,Npred,lcfilename) if self.config["LightCurve"]['ComputeVarIndex'] == 'yes': self.VariabilityIndex()
def VariabilityIndex(self): """Compute the variability index as in the 2FGL catalogue. (see Nolan et al, 2012)""" LcOutPath = self.LCfolder + self.config['target']['name'] utils._log('Computing Variability index ') self.config['Spectrum']['FitsGeneration'] = 'no' try : ResultDicDC = utils.ReadResult(self.generalconfig) except : self.warning("No results file found; please run enrico_sed first.") return LogL1 = [] LogL0 = [] Time = [] for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try : ResultDic = utils.ReadResult(CurConfig) except : self._errorReading("Fail reading the config file ",i) continue # LogL1.append(ResultDic.get("log_like")) #Update the time and time error array Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.) ############################################################## # Compute the loglike value using the DC flux or prefactor ############################################################## # Create one obs instance CurConfig['Spectrum']['FitsGeneration'] = 'no' _,Fit = GenAnalysisObjects(CurConfig,verbose=0)#be quiet Fit.ftol = float(self.config['fitting']['ftol']) #Spectral index management! parameters = dict() parameters['Index'] = -2. parameters['alpha'] = +2. parameters['Index1'] = -2. parameters['beta'] = 0 parameters['Index2'] = 2. parameters['Cutoff'] = 30000. # set the cutoff to be high for key in parameters.keys(): try: utils.FreezeParams(Fit, self.srcname, key, parameters[key]) except: continue LogL1.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer'])) for key in ["norm","Prefactor","Integral"]: try: utils.FreezeParams(Fit,self.srcname,key, utils.fluxNorm(ResultsDicDC[key])) except: continue LogL0.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer'])) del Fit #Clean memory plt.figure() plt.xlabel("Time") plt.ylabel("Log(Like) Variability") plt.errorbar(Time,LogL0,fmt='o',color='black',ls='None') plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)), xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([float(round(k,5)) \ for k in ax.get_yticks()*10**(-offset_factor)]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\ r" [${\times 10^{%d}}$]" %offset_factor) # Secondary axis with MJD mjdaxis = ax.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False)) plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 ) plt.tight_layout() plt.savefig(LcOutPath+"_VarIndex.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) self.info("Variability index calculation") print "\t TSvar = ",2*(sum(LogL1)-sum(LogL0)) print "\t NDF = ",len(LogL0)-1 print "\t Chi2 prob = ",1 - chi2.cdf(2*(sum(LogL1)-sum(LogL0)),len(LogL0)-1) print
def VariabilityIndex(self): """Compute the variability index as in the 2FLG catalogue. (see Nolan et al, 2012)""" LcOutPath = self.LCfolder + self.config['target']['name'] utils._log('Computing Variability index ') self.config['Spectrum']['FitsGeneration'] = 'no' try: ResultDicDC = utils.ReadResult(self.generalconfig) except: self.warning("No results file found; please run enrico_sed first.") return LogL1 = [] LogL0 = [] Time = [] for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try: ResultDic = utils.ReadResult(CurConfig) except: self._errorReading("fail reading the config file ", i) # print "WARNING : fail reading the config file : ",CurConfig # print "Job Number : ",i # print "Please have a look at this job log file" continue # LogL1.append(ResultDic.get("log_like")) #Update the time and time error array Time.append((ResultDic.get("tmax") + ResultDic.get("tmin")) / 2.) ############################################################## # Compute the loglike value using the DC flux or prefactor ############################################################## # Create one obs instance CurConfig['Spectrum']['FitsGeneration'] = 'no' _, Fit = GenAnalysisObjects(CurConfig, verbose=0) #be quiet Fit.ftol = float(self.config['fitting']['ftol']) #Spectral index management! parameters = dict() parameters['Index'] = -2. parameters['alpha'] = +2. parameters['Index1'] = -2. parameters['beta'] = 0 parameters['Index2'] = 2. parameters['Cutoff'] = 30000. # set the cutoff to be high for key in parameters.keys(): try: utils.FreezeParams(Fit, self.srcname, key, parameters[key]) except: continue LogL1.append( -Fit.fit(0, optimizer=CurConfig['fitting']['optimizer'])) for key in ["norm", "Prefactor", "Integral"]: try: utils.FreezeParams(Fit, self.srcname, key, utils.fluxNorm(ResultsDicDC[key])) except: continue LogL0.append( -Fit.fit(0, optimizer=CurConfig['fitting']['optimizer'])) del Fit #Clean memory Can = _GetCanvas() TgrDC = ROOT.TGraph(len(Time), np.array(Time), np.array(LogL0)) TgrDC.Draw("ALP*") TgrDC = ROOT.TGraph(len(Time), np.array(Time), np.array(LogL0)) TgrDC.SetMarkerColor(2) TgrDC.Draw("PL*") #Save the canvas in the LightCurve subfolder Can.Print(LcOutPath + '_VarIndex.eps') Can.Print(LcOutPath + '_VarIndex.C') self.info("Variability index calculation") print "\t TSvar = ", 2 * (sum(LogL1) - sum(LogL0)) print "\t NDF = ", len(LogL0) - 1 print "\t Chi2 prob = ", ROOT.TMath.Prob(2 * (sum(LogL1) - sum(LogL0)), len(LogL0) - 1) print
def VariabilityIndex(self): """Compute the variability index as in the 2FLG catalogue. (see Nolan et al, 2012)""" LcOutPath = self.LCfolder + self.config["target"]["name"] utils._log("Computing Variability index ") self.config["Spectrum"]["FitsGeneration"] = "no" # ValueDC = self.GetDCValue() ResultDicDC = utils.ReadResult(self.config) LogL1 = [] LogL0 = [] Time = [] for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) # Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try: ResultDic = utils.ReadResult(CurConfig) except: self._errorReading("fail reading the config file ", i) # print "WARNING : fail reading the config file : ",CurConfig # print "Job Number : ",i # print "Please have a look at this job log file" continue # LogL1.append(ResultDic.get("log_like")) # Update the time and time error array Time.append((ResultDic.get("tmax") + ResultDic.get("tmin")) / 2.0) ############################################################## # Compute the loglike value using the DC flux or prefactor ############################################################## # Create one obs instance CurConfig["Spectrum"]["FitsGeneration"] = "no" _, Fit = GenAnalysisObjects(CurConfig, verbose=0) # be quiet Fit.ftol = float(self.config["fitting"]["ftol"]) # Spectral index management! self.info("Spectral index frozen to a value of 2") utils.FreezeParams(Fit, self.srcname, "Index", -2) LogL1.append(-Fit.fit(0, optimizer=CurConfig["fitting"]["optimizer"])) Model_type = Fit.model.srcs[self.srcname].spectrum().genericName() if Model_type == "PowerLaw": utils.FreezeParams(Fit, self.srcname, "Prefactor", utils.fluxNorm(ResultDicDC["Prefactor"])) if Model_type == "PowerLaw2": utils.FreezeParams(Fit, self.srcname, "Integral", utils.fluxNorm(ResultDicDC["Integral"])) LogL0.append(-Fit.fit(0, optimizer=CurConfig["fitting"]["optimizer"])) del Fit # Clean memory Can = _GetCanvas() TgrDC = ROOT.TGraph(len(Time), np.array(Time), np.array(LogL0)) TgrDC.Draw("ALP*") TgrDC = ROOT.TGraph(len(Time), np.array(Time), np.array(LogL0)) TgrDC.SetMarkerColor(2) TgrDC.Draw("PL*") # Save the canvas in the LightCurve subfolder Can.Print(LcOutPath + "_VarIndex.eps") Can.Print(LcOutPath + "_VarIndex.C") self.info("Variability index calculation") print "\t TSvar = ", 2 * (sum(LogL1) - sum(LogL0)) print "\t NDF = ", len(LogL0) - 1 print "\t Chi2 prob = ", ROOT.TMath.Prob(2 * (sum(LogL1) - sum(LogL0)), len(LogL0) - 1) print
def _PlotLC(self, folded=False): root_style.RootStyle() # Nice plot style self.info("Reading files produced by enrico") LcOutPath = self.LCfolder + self.config["target"]["name"] # Result are stored into list. This allow to get rid of the bin which failled Time = [] TimeErr = [] Flux = [] FluxErr = [] Index = [] IndexErr = [] Cutoff = [] CutoffErr = [] FluxForNpred = [] FluxErrForNpred = [] Npred = [] Npred_detected_indices = [] TS = [] # Find name used for index parameter if self.config["target"]["spectrum"] == "PowerLaw" or self.config["target"]["spectrum"] == "PowerLaw2": IndexName = "Index" CutoffName = None elif ( self.config["target"]["spectrum"] == "PLExpCutoff" or self.config["target"]["spectrum"] == "PLSuperExpCutoff" ): IndexName = "Index1" CutoffName = "Cutoff" CutoffErrName = "dCutoff" IndexErrName = "d" + IndexName Nfail = 0 for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) # Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try: ResultDic = utils.ReadResult(CurConfig) except: self._errorReading("Fail reading config file", i) Nfail += 1 continue # Update the time and time error array Time.append((ResultDic.get("tmax") + ResultDic.get("tmin")) / 2.0) TimeErr.append((ResultDic.get("tmax") - ResultDic.get("tmin")) / 2.0) # Check is an ul have been computed. The error is set to zero for the TGraph. if ResultDic.has_key("Ulvalue"): Flux.append(ResultDic.get("Ulvalue")) FluxErr.append(0) Index.append(ResultDic.get(IndexName)) IndexErr.append(0) else: Flux.append(ResultDic.get("Flux")) FluxErr.append(ResultDic.get("dFlux")) Index.append(ResultDic.get(IndexName)) IndexErr.append(ResultDic.get(IndexErrName)) if CutoffName is not None: Cutoff.append(ResultDic.get(CutoffName)) CutoffErr.append(ResultDic.get(CutoffErrName)) FluxErrForNpred.append(ResultDic.get("dFlux")) FluxForNpred.append(ResultDic.get("Flux")) # Get the Npred and TS values Npred.append(ResultDic.get("Npred")) TS.append(ResultDic.get("TS")) if CurConfig["LightCurve"]["TSLightCurve"] < float(ResultDic.get("TS")): Npred_detected_indices.append(i - Nfail) # change the list into np array TS = np.array(TS) Npred = np.array(Npred) Npred_detected = Npred[Npred_detected_indices] Time = np.array(Time) TimeErr = np.array(TimeErr) Flux = np.array(Flux) FluxErr = np.array(FluxErr) Index = np.array(Index) IndexErr = np.array(IndexErr) Cutoff = np.array(Cutoff) CutoffErr = np.array(CutoffErr) FluxForNpred = np.array(FluxForNpred) FluxErrForNpred = np.array(FluxErrForNpred) # Plots the diagnostic plots is asked # Plots are : Npred vs flux # TS vs Time if self.config["LightCurve"]["DiagnosticPlots"] == "yes": fittedFunc = self.CheckNpred( Npred, FluxForNpred, FluxErrForNpred, Npred_detected_indices ) # check the errors calculation gTHNpred, TgrNpred = plotting.PlotNpred(Npred, FluxForNpred, FluxErrForNpred) CanvNpred = _GetCanvas() gTHNpred.Draw() TgrNpred.Draw("zP") _, TgrNpred_detected = plotting.PlotNpred( Npred_detected, Flux[Npred_detected_indices], FluxErrForNpred[Npred_detected_indices] ) TgrNpred_detected.SetLineColor(2) TgrNpred_detected.SetMarkerColor(2) TgrNpred_detected.Draw("zP") fittedFunc.Draw("SAME") CanvNpred.Print(LcOutPath + "_Npred.png") CanvNpred.Print(LcOutPath + "_Npred.eps") CanvNpred.Print(LcOutPath + "_Npred.C") gTHTS, TgrTS = plotting.PlotTS(Time, TimeErr, TS) CanvTS = _GetCanvas() gTHTS.Draw() TgrTS.Draw("zP") CanvTS.Print(LcOutPath + "_TS.png") CanvTS.Print(LcOutPath + "_TS.eps") CanvTS.Print(LcOutPath + "_TS.C") # Plot the LC itself. This function return a TH2F for a nice plot # a TGraph and a list of TArrow for the ULs if folded: phase = np.linspace(0, 1, self.Nbin + 1) Time = (phase[1:] + phase[:-1]) / 2.0 TimeErr = (phase[1:] - phase[:-1]) / 2.0 gTHLC, TgrLC, ArrowLC = plotting.PlotFoldedLC(Time, TimeErr, Flux, FluxErr) gTHIndex, TgrIndex, ArrowIndex = plotting.PlotFoldedLC(Time, TimeErr, Index, IndexErr) if CutoffName is not None: gTHCutoff, TgrCutoff, ArrowCutoff = plotting.PlotFoldedLC(Time, TimeErr, Cutoff, CutoffErr) else: gTHLC, TgrLC, ArrowLC = plotting.PlotLC(Time, TimeErr, Flux, FluxErr) gTHIndex, TgrIndex, ArrowIndex = plotting.PlotLC(Time, TimeErr, Index, IndexErr) if CutoffName is not None: gTHCutoff, TgrCutoff, ArrowCutoff = plotting.PlotFoldedLC(Time, TimeErr, Cutoff, CutoffErr) ### plot and save the flux LC CanvLC = ROOT.TCanvas() gTHLC.Draw() TgrLC.Draw("zP") # plot the ul as arrow for i in xrange(len(ArrowLC)): ArrowLC[i].Draw() # compute Fvar and probability of being cst self.info("Flux vs Time: infos") self.FitWithCst(TgrLC) self.Fvar(Flux, FluxErr) # Save the canvas in the LightCurve subfolder CanvLC.Print(LcOutPath + "_LC.png") CanvLC.Print(LcOutPath + "_LC.eps") CanvLC.Print(LcOutPath + "_LC.C") ### plot and save the Index LC CanvIndex = ROOT.TCanvas() gTHIndex.Draw() TgrIndex.Draw("zP") # plot the ul as arrow for i in xrange(len(ArrowIndex)): ArrowIndex[i].Draw() # Save the canvas in the LightCurve subfolder if self.config["LightCurve"]["SpectralIndex"] == 0: self.info("Index vs Time") self.FitWithCst(TgrIndex) CanvIndex.Print(LcOutPath + "_Index.png") CanvIndex.Print(LcOutPath + "_Index.eps") CanvIndex.Print(LcOutPath + "_Index.C") if len(Cutoff) > 0: ### plot and save the Cutoff LC CanvCutoff = ROOT.TCanvas() gTHCutoff.Draw() TgrCutoff.Draw("zP") # plot the ul as arrow for i in xrange(len(ArrowCutoff)): ArrowCutoff[i].Draw() print "Cutoff vs Time: infos" self.FitWithCst(TgrCutoff) CanvCutoff.Print(LcOutPath + "_Cutoff.png") CanvCutoff.Print(LcOutPath + "_Cutoff.eps") CanvCutoff.Print(LcOutPath + "_Cutoff.C") # Dump into ascii lcfilename = LcOutPath + "_results.dat" self.info("Write to Ascii file : " + lcfilename) WriteToAscii(Time, TimeErr, Flux, FluxErr, Index, IndexErr, Cutoff, CutoffErr, TS, Npred, lcfilename) if self.config["LightCurve"]["ComputeVarIndex"] == "yes": self.VariabilityIndex()
def VariabilityIndex(self): """Compute the variability index as in the 2FGL catalogue. (see Nolan et al, 2012)""" LcOutPath = self.LCfolder + self.config['target']['name'] utils._log('Computing Variability index ') self.config['Spectrum']['FitsGeneration'] = 'no' try : ResultDicDC = utils.ReadResult(self.generalconfig) except : self.warning("No results file found; please run enrico_sed first.") return LogL1 = [] LogL0 = [] Time = [] for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try : ResultDic = utils.ReadResult(CurConfig) except : self._errorReading("Fail reading the config file ",i) continue # LogL1.append(ResultDic.get("log_like")) #Update the time and time error array Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.) ############################################################## # Compute the loglike value using the DC flux or prefactor ############################################################## # Create one obs instance CurConfig['Spectrum']['FitsGeneration'] = 'no' _,Fit = GenAnalysisObjects(CurConfig,verbose=0)#be quiet Fit.ftol = float(self.config['fitting']['ftol']) #Spectral index management! parameters = dict() parameters['Index'] = -2. parameters['alpha'] = +2. parameters['Index1'] = -2. parameters['beta'] = 0 parameters['Index2'] = 2. parameters['Cutoff'] = 100000. # set the cutoff to be high for key in parameters.keys(): try: utils.FreezeParams(Fit, self.srcname, key, parameters[key]) except: continue LogL1.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer'])) for key in ["norm","Prefactor","Integral"]: try: utils.FreezeParams(Fit,self.srcname,key, utils.fluxNorm(ResultsDicDC[key])) except: continue LogL0.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer'])) del Fit #Clean memory plt.figure() plt.xlabel("Time") plt.ylabel("Log(Like) Variability") plt.errorbar(Time,LogL0,fmt='o',color='black',ls='None') plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)), xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([float(round(k,5)) \ for k in ax.get_yticks()*10**(-offset_factor)]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\ r" [${\times 10^{%d}}$]" %offset_factor) # Secondary axis with MJD mjdaxis = ax.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False)) plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 ) plt.tight_layout() plt.savefig(LcOutPath+"_VarIndex.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) self.info("Variability index calculation") print "\t TSvar = ",2*(sum(LogL1)-sum(LogL0)) print "\t NDF = ",len(LogL0)-1 print "\t Chi2 prob = ",1 - chi2.cdf(2*(sum(LogL1)-sum(LogL0)),len(LogL0)-1) print
def write_config(self): verbose("-> Write config file for %s" %(self.name)) import enrico from enrico.config import get_config # Create object config = ConfigObj(indent_type='\t') mes = Loggin.Message() config['out'] = self.output+'/%s_%s/'%(self.name,self.band) config['Submit'] = 'no' # target config['target'] = {} config['target']['name'] = self.name config['target']['ra'] = str("%.4f" %self.RA) config['target']['dec'] = str("%.4f" %self.DEC) config['target']['redshift'] = '0' config['target']['spectrum'] = 'PowerLaw' # space config['space'] = {} config['space']['xref'] = config['target']['ra'] config['space']['yref'] = config['target']['dec'] config['space']['rad'] = self.roi # files #basepath = "/".join(enrico.__path__[0].split("/")[0:-1]) #datapath = basepath+"/Data/download/" config['file'] = {} #config['file']['spacecraft'] = str("%s/lat_spacecraft_merged.fits" %datapath) #config['file']['event'] = str("%s/event.list" %datapath) config['file']['spacecraft'] = self.SCfile config['file']['event'] = self.PHfile config['file']['tag'] = 'fast' # time config['time'] = {} self.calculate_times() config['time']['tmin'] = self.timemin config['time']['tmax'] = self.timemax # energy config['energy'] = {} if (self.band=='lo'): config['energy']['emin'] = self.energymin config['energy']['emax'] = self.energycut elif (self.band=='hi'): config['energy']['emin'] = self.energycut config['energy']['emax'] = self.energymax else: config['energy']['emin'] = self.energymin config['energy']['emax'] = self.energymax config['energy']['enumbins_per_decade'] = 5 # event class config['event'] = {} config['event']['irfs'] = 'CALDB' config['event']['evclass'] = '64' #'128' #64=transients, 128=source config['event']['evtype'] = '3' # analysis config['analysis'] = {} config['analysis']['zmax'] = 100 # Validate verbose(config) # Add the rest of the values config = get_config(config) # Tune the remaining variables config['AppLC']['index'] = self.spindex config['AppLC']['NLCbin'] = int(24*self.timewindow/self.binsize+0.5) config['AppLC']['rad'] = self.roi # Write config file self.configfile = str("%s/%s_%sE.conf" %(self.output,self.name,self.band)) with open(self.configfile,'w') as f: config.write(f)
src.getSrcFuncs()['Spectrum'].getParam('Scale').setValue(300) src.getSrcFuncs()['Spectrum'].getParam('Scale').setBounds(1e-5,1e5) return src if __name__ == '__main__': try: infile = sys.argv[1] except: from enrico import Loggin mes = Loggin.Message() mes.error('Config file not found.') from enrico.config import get_config config = get_config(infile) TSmap = TSMap(config,infile) if len(sys.argv)== 6 : if TSmap.config['TSMap']['method'] == 'row' : TSmap.FitOneRow(float(sys.argv[2]),int(sys.argv[4])) else : TSmap.FitOnePixel(float(sys.argv[2]),float(sys.argv[3]),int(sys.argv[4]),int(sys.argv[5])) else : from enrico import Loggin mes = Loggin.Message() mes.error("Wrong number of arguments")
def _PlotLC(self, folded=False): self.info("Reading files produced by enrico") LcOutPath = self.LCfolder + self.config['target']['name'] #Result are stored into list. This allow to get rid of the bin which failled Time = [] TimeErr = [] Flux = [] FluxErr = [] # FluxErrChi2 = [] Index = [] IndexErr = [] Cutoff = [] CutoffErr = [] FluxForNpred = [] # FluxErrForNpred = [] Npred = [] Npred_detected_indices = [] TS = [] uplim = [] # Find name used for index parameter if ((self.config['target']['spectrum'] == 'PowerLaw' or self.config['target']['spectrum'] == 'PowerLaw2') and self.config['target']['redshift'] == 0): IndexName = 'Index' CutoffName = None elif (self.config['target']['spectrum'] == 'PLExpCutoff' or self.config['target']['spectrum'] == 'PLSuperExpCutoff'): IndexName = 'Index1' CutoffName = 'Cutoff' CutoffErrName = 'dCutoff' else: IndexName = 'alpha' CutoffName = None IndexErrName = 'd' + IndexName Nfail = 0 for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try: ResultDic = utils.ReadResult(CurConfig) if ResultDic == {}: raise (ValueError) except: self._errorReading("Fail reading config file", i) Nfail += 1 continue #Update the time and time error array Time.append((ResultDic.get("tmax") + ResultDic.get("tmin")) / 2.) TimeErr.append( (ResultDic.get("tmax") - ResultDic.get("tmin")) / 2.) #Check is an ul have been computed. The error is set to zero for the TGraph. if ResultDic.has_key('Ulvalue'): uplim.append(1) Flux.append(ResultDic.get("Ulvalue")) # FluxErr.append(0) # FluxErrChi2.append(ResultDic.get("dFlux")) # Index.append(ResultDic.get(IndexName)) # IndexErr.append(0) else: uplim.append(0) Flux.append(ResultDic.get("Flux")) FluxErr.append(ResultDic.get("dFlux")) # FluxErrChi2.append(ResultDic.get("dFlux")) Index.append(ResultDic.get(IndexName)) IndexErr.append(ResultDic.get(IndexErrName)) # if CutoffName is not None: # Cutoff.append(ResultDic.get(CutoffName)) # CutoffErr.append(ResultDic.get(CutoffErrName)) # FluxErrForNpred.append(ResultDic.get("dFlux")) FluxForNpred.append(ResultDic.get("Flux")) #Get the Npred and TS values Npred.append(ResultDic.get("Npred")) TS.append(ResultDic.get("TS")) if (CurConfig['BayesianBlocks']['TSLightCurve'] < float( ResultDic.get("TS"))): Npred_detected_indices.append(i - Nfail) # #change the list into np array # TS = np.array(TS) Npred = np.asarray(Npred) Npred_detected = np.asarray(Npred[Npred_detected_indices]) Time = np.asarray(Time) TimeErr = np.asarray(TimeErr) Flux = np.asarray(Flux) FluxErr = np.asarray(FluxErr) # Index = np.array(Index) # IndexErr = np.array(IndexErr) # Cutoff = np.array(Cutoff) # CutoffErr = np.array(CutoffErr) FluxForNpred = np.asarray(FluxForNpred) # FluxErrForNpred = np.array(FluxErrForNpred) uplim = np.asarray(uplim, dtype=bool) #Plots the diagnostic plots is asked # Plots are : Npred vs flux # TS vs Time if self.config['BayesianBlocks']['DiagnosticPlots'] == 'yes' and len( Npred) > 0: #plot Npred vs flux plt.figure() NdN = np.asarray(Npred) / np.sqrt(Npred) FdF = np.asarray(FluxForNpred) / (np.asarray(FluxErr) + 1e-20) plt.errorbar(NdN, FdF, fmt='+', color='black') if len(Npred_detected) > 2: NdN = np.asarray(Npred_detected) / np.sqrt(Npred_detected) FdF = np.asarray(FluxForNpred[Npred_detected_indices]) / ( np.asarray(FluxErr[Npred_detected_indices]) + 1e-20) plt.errorbar(NdN, FdF, fmt='+', color='red') popt, _ = scipy.optimize.curve_fit(pol1, NdN, FdF, p0=[0, 1]) #, sigma=dydata) for i in xrange(len(FluxForNpred)): if FluxForNpred[i] / FluxErr[i] > 2 * pol1( sqrt(Npred[i]), popt[0], popt[1]): self._errorReading("problem in errors calculation for", i) print "Flux +/- error = ", FluxForNpred[ i], " +/- ", FluxErr[i] print "V(Npred) = ", sqrt(Npred[i]) print plt.plot(np.array([0, max(NdN)]), pol1(np.array([0, max(NdN)]), popt[0], popt[1]), '--', color='black') plt.xlabel(r"${\rm Npred/\sqrt{Npred}}$") plt.ylabel(r"${\rm Flux/\Delta Flux}$") plt.savefig(LcOutPath + "_Npred.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) else: print "No Npred Plot produced" #plot TS vs Time plt.figure() plt.xlabel(r"Time (s)") plt.ylabel(r"Test Statistic") plt.errorbar(x=Time, y=TS, xerr=TimeErr, fmt='+', color='black', ls='None') plt.ylim(ymin=min(TS) * 0.8, ymax=max(TS) * 1.2) plt.xlim(xmin=max(plt.xlim()[0], 1.02 * min(Time) - 0.02 * max(Time)), xmax=min(plt.xlim()[1], 1.02 * max(Time) - 0.02 * min(Time))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([ float(round(k, 5)) for k in ax.get_yticks() * 10**(-offset_factor) ]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() + r" [${\times 10^{%d}}$]" % offset_factor) # Secondary axis with MJD mjdaxis = ax.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") mjdaxis.xaxis.set_major_formatter( matplotlib.ticker.ScalarFormatter(useOffset=False)) plt.setp(mjdaxis.xaxis.get_majorticklabels(), rotation=15) plt.tight_layout() plt.savefig(LcOutPath + "_TS.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) if len(Time) > 0: plt.figure() plt.xlabel(r"Time (s)") plt.ylabel(r"${\rm Flux\ (photon\ cm^{-2}\ s^{-1})}$") plot_bayesianblocks(Time - TimeErr, Time + TimeErr, Flux, FluxErr, FluxErr, uplim) plt.ylim(ymin=max(plt.ylim()[0], np.percentile(Flux[~uplim], 1) * 0.1), ymax=min(plt.ylim()[1], np.percentile(Flux[~uplim], 99) * 2.0)) plt.xlim(xmin=max( plt.xlim()[0], 1.02 * min(Time - TimeErr) - 0.02 * max(Time + TimeErr)), xmax=min( plt.xlim()[1], 1.02 * max(Time + TimeErr) - 0.02 * min(Time - TimeErr))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([float(round(k,5)) \ for k in ax.get_yticks()*10**(-offset_factor)]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\ r" [${\times 10^{%d}}$]" %offset_factor) # Secondary axis with MJD mjdaxis = ax.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") mjdaxis.xaxis.set_major_formatter( matplotlib.ticker.ScalarFormatter(useOffset=False)) plt.setp(mjdaxis.xaxis.get_majorticklabels(), rotation=15) plt.tight_layout() plt.savefig(LcOutPath + "_LC.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) else: print "[BayesianBlocks] Warning : No valid data" if self.config["BayesianBlocks"]["SpectralIndex"] == 0: if len(Time[~uplimIndex]) > 0: plt.figure() plt.xlabel(r"Time (s)") plt.ylabel(r"${\rm Index}$") Index = np.asarray(Index) IndexErr = np.asarray(IndexErr) uplimIndex = uplim #+ Index<0.55 plot_bayesianblocks(Time[~uplimIndex] - TimeErr[~uplimIndex], Time[~uplimIndex] + TimeErr[~uplimIndex], Index[~uplimIndex], IndexErr[~uplimIndex], IndexErr[~uplimIndex], uplimIndex[~uplimIndex]) plt.ylim(ymin=max(plt.ylim()[0], np.percentile(Index[~uplimIndex], 1) * 0.1), ymax=min(plt.ylim()[1], np.percentile(Index[~uplimIndex], 99) * 2.0)) plt.xlim(xmin=max( plt.xlim()[0], 1.02 * min(Time - TimeErr) - 0.02 * max(Time + TimeErr)), xmax=min( plt.xlim()[1], 1.02 * max(Time + TimeErr) - 0.02 * min(Time - TimeErr))) # Move the offset to the axis label ax = plt.gca() ax.get_yaxis().get_major_formatter().set_useOffset(False) offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim())))) if (offset_factor != 0): ax.set_yticklabels([float(round(k,5)) \ for k in ax.get_yticks()*10**(-offset_factor)]) ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\ r" [${\times 10^{%d}}$]" %offset_factor) # Secondary axis with MJD mjdaxis = ax.twiny() mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()]) mjdaxis.set_xlabel(r"Time (MJD)") mjdaxis.xaxis.set_major_formatter( matplotlib.ticker.ScalarFormatter(useOffset=False)) plt.setp(mjdaxis.xaxis.get_majorticklabels(), rotation=15) plt.tight_layout() plt.savefig(LcOutPath + "_Index.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) else: print "[BayesianBlocks] Warning : No valid data" #Dump into ascii lcfilename = LcOutPath + "_results.dat" self.info("Write to Ascii file : " + lcfilename) lightcurve.WriteToAscii(Time, TimeErr, Flux, FluxErr, Index, IndexErr, Cutoff, CutoffErr, TS, Npred, lcfilename)
def run(infile): from enrico import utils from enrico import energybin from enrico.config import get_config from enrico import Loggin mes = Loggin.Message() """Run an entire Fermi analysis (spectrum) by reading a config file""" config = get_config(infile) folder = config['out'] utils.mkdir_p(folder) FitRunner, Fit = GenAnalysisObjects(config) # create all the fit files and run gtlike FitRunner.PerformFit(Fit) sedresult = None #plot the SED and model map if possible and asked if float(config['UpperLimit']['TSlimit']) < Fit.Ts( config['target']['name']): if config['Spectrum']['ResultPlots'] == 'yes': from enrico.constants import SpectrumPath utils.mkdir_p("%s/%s/" % (config['out'], SpectrumPath)) sedresult = FitRunner.ComputeSED(Fit, dump=True) else: sedresult = FitRunner.ComputeSED(Fit, dump=False) if (config['energy']['decorrelation_energy'] == 'yes'): #Update the energy scale to decorrelation energy mes.info( 'Setting the decorrelation energy as new Scale for the spectral parameters' ) spectrum = Fit[FitRunner.obs.srcname].funcs['Spectrum'] modeltype = spectrum.genericName() genericName = Fit.model.srcs[ FitRunner.obs.srcname].spectrum().genericName() varscale = None if genericName == "PowerLaw2": varscale = None elif genericName in [ "PowerLaw", "PLSuperExpCutoff", "EblAtten::PLSuperExpCutoff" ]: varscale = "Scale" elif genericName in ["LogParabola","EblAtten::LogParabola", \ "BrokenPowerLaw", "EblAtten::BrokenPowerLaw"]: varscale = "Eb" if varscale is not None: spectrum.getParam(varscale).setValue(sedresult.decE) FitRunner.PerformFit(Fit) #Get and dump the target specific results Result = FitRunner.GetAndPrintResults(Fit) utils.DumpResult(Result, config) # Make energy bins by running a *new* analysis Nbin = config['Ebin']['NumEnergyBins'] FitRunner.config['file']['parent_config'] = infile if config['Spectrum']['ResultParentPlots'] == "yes": plot_sed_fromconfig(get_config(config['file']['parent_config']), ignore_missing_bins=True) if config['Spectrum']['ResultPlots'] == 'yes': outXml = utils._dump_xml(config) # the possibility of making the model map is checked inside the function FitRunner.ModelMap(outXml) if Nbin > 0: FitRunner.config['Spectrum']['ResultParentPlots'] = "yes" plot_sed_fromconfig(get_config(infile), ignore_missing_bins=True) energybin.RunEbin(folder, Nbin, Fit, FitRunner, sedresult) del (sedresult) del (Result) del (FitRunner)
def VariabilityIndex(self): """Compute the variability index as in the 2FLG catalogue. (see Nolan et al, 2012)""" LcOutPath = self.LCfolder + self.config['target']['name'] utils._log('Computing Variability index ') self.config['Spectrum']['FitsGeneration'] = 'no' # ValueDC = self.GetDCValue() ResultDicDC = utils.ReadResult(self.config) LogL1 = [] LogL0 = [] Time = [] for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try : ResultDic = utils.ReadResult(CurConfig) except : print "WARNING : fail reading the config file : ",CurConfig print "Job Number : ",i print "Please have a look at this job log file" continue # LogL1.append(ResultDic.get("log_like")) #Update the time and time error array Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.) ############################################################## # Compute the loglike value using the DC flux or prefactor ############################################################## # Create one obs instance CurConfig['Spectrum']['FitsGeneration'] = 'no' _,Fit = GenAnalysisObjects(CurConfig,verbose=0)#be quiet Fit.ftol = float(self.config['fitting']['ftol']) #Spectral index management! utils.FreezeParams(Fit, self.srcname, 'Index', -self.config['LightCurve']['SpectralIndex']) LogL1.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer'])) Model_type = Fit.model.srcs[self.srcname].spectrum().genericName() if (Model_type == 'PowerLaw') : utils.FreezeParams(Fit, self.srcname, 'Prefactor', utils.fluxNorm(ResultDicDC['Prefactor'])) if (Model_type == 'PowerLaw2') : utils.FreezeParams(Fit, self.srcname, 'Integral', utils.fluxNorm(ResultDicDC['Integral'])) LogL0.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer'])) del Fit #Clean memory Can = _GetCanvas() TgrDC = ROOT.TGraph(len(Time),np.array(Time),np.array(LogL0)) TgrDC.Draw("ALP*") TgrDC = ROOT.TGraph(len(Time),np.array(Time),np.array(LogL0)) TgrDC.SetMarkerColor(2) TgrDC.Draw("PL*") #Save the canvas in the LightCurve subfolder Can.Print(LcOutPath+'_VarIndex.eps') Can.Print(LcOutPath+'_VarIndex.C') print print "\t TSvar = ",2*(sum(LogL1)-sum(LogL0)) print "\t NDF = ",len(LogL0)-1 print "\t Chi2 prob = ",ROOT.TMath.Prob(2*(sum(LogL1)-sum(LogL0)),len(LogL0)-1) print
def VariabilityIndex(self): """Compute the variability index as in the 2FLG catalogue. (see Nolan et al, 2012)""" LcOutPath = self.LCfolder + self.config['target']['name'] utils._log('Computing Variability index ') self.config['Spectrum']['FitsGeneration'] = 'no' try : ResultDicDC = utils.ReadResult(self.generalconfig) except : self.warning("No results file found; please run enrico_sed first.") return LogL1 = [] LogL0 = [] Time = [] for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try : ResultDic = utils.ReadResult(CurConfig) except : self._errorReading("Fail reading the config file ",i) continue # LogL1.append(ResultDic.get("log_like")) #Update the time and time error array Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.) ############################################################## # Compute the loglike value using the DC flux or prefactor ############################################################## # Create one obs instance CurConfig['Spectrum']['FitsGeneration'] = 'no' _,Fit = GenAnalysisObjects(CurConfig,verbose=0)#be quiet Fit.ftol = float(self.config['fitting']['ftol']) #Spectral index management! parameters = dict() parameters['Index'] = -2. parameters['alpha'] = +2. parameters['Index1'] = -2. parameters['beta'] = 0 parameters['Index2'] = 2. parameters['Cutoff'] = 30000. # set the cutoff to be high for key in parameters.keys(): try: utils.FreezeParams(Fit, self.srcname, key, parameters[key]) except: continue LogL1.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer'])) for key in ["norm","Prefactor","Integral"]: try: utils.FreezeParams(Fit,self.srcname,key, utils.fluxNorm(ResultsDicDC[key])) except: continue LogL0.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer'])) del Fit #Clean memory plt.figure() plt.xlabel("Time") plt.ylabel("Log(Like) Variability") plt.errorbar(Time,LogL0,fmt='o',color='black',ls='None') plt.savefig(LcOutPath+"_VarIndex.png", dpi=150, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) self.info("Variability index calculation") print "\t TSvar = ",2*(sum(LogL1)-sum(LogL0)) print "\t NDF = ",len(LogL0)-1 print "\t Chi2 prob = ",1 - chi2.cdf(2*(sum(LogL1)-sum(LogL0)),len(LogL0)-1) print
def _PlotLC(self, folded=False): root_style.RootStyle() #Nice plot style self.info("Reading files produced by enrico") LcOutPath = self.LCfolder + self.config['target']['name'] #Result are stored into list. This allow to get rid of the bin which failled Time = [] TimeErr = [] Flux = [] FluxErr = [] Index = [] IndexErr = [] Cutoff = [] CutoffErr = [] FluxForNpred = [] FluxErrForNpred = [] Npred = [] Npred_detected_indices = [] TS = [] # Find name used for index parameter if (self.config['target']['spectrum'] == 'PowerLaw' or self.config['target']['spectrum'] == 'PowerLaw2'): IndexName = 'Index' CutoffName = None elif (self.config['target']['spectrum'] == 'PLExpCutoff' or self.config['target']['spectrum'] == 'PLSuperExpCutoff'): IndexName = 'Index1' CutoffName = 'Cutoff' CutoffErrName = 'dCutoff' IndexErrName = 'd' + IndexName Nfail = 0 for i in xrange(self.Nbin): CurConfig = get_config(self.configfile[i]) #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed try: ResultDic = utils.ReadResult(CurConfig) except: self._errorReading("Fail reading config file", i) Nfail += 1 continue #Update the time and time error array Time.append((ResultDic.get("tmax") + ResultDic.get("tmin")) / 2.) TimeErr.append( (ResultDic.get("tmax") - ResultDic.get("tmin")) / 2.) #Check is an ul have been computed. The error is set to zero for the TGraph. if ResultDic.has_key('Ulvalue'): Flux.append(ResultDic.get("Ulvalue")) FluxErr.append(0) Index.append(ResultDic.get(IndexName)) IndexErr.append(0) else: Flux.append(ResultDic.get("Flux")) FluxErr.append(ResultDic.get("dFlux")) Index.append(ResultDic.get(IndexName)) IndexErr.append(ResultDic.get(IndexErrName)) if CutoffName is not None: Cutoff.append(ResultDic.get(CutoffName)) CutoffErr.append(ResultDic.get(CutoffErrName)) FluxErrForNpred.append(ResultDic.get("dFlux")) FluxForNpred.append(ResultDic.get("Flux")) #Get the Npred and TS values Npred.append(ResultDic.get("Npred")) TS.append(ResultDic.get("TS")) if (CurConfig['LightCurve']['TSLightCurve'] < float( ResultDic.get("TS"))): Npred_detected_indices.append(i - Nfail) #change the list into np array TS = np.array(TS) Npred = np.array(Npred) Npred_detected = Npred[Npred_detected_indices] Time = np.array(Time) TimeErr = np.array(TimeErr) Flux = np.array(Flux) FluxErr = np.array(FluxErr) Index = np.array(Index) IndexErr = np.array(IndexErr) Cutoff = np.array(Cutoff) CutoffErr = np.array(CutoffErr) FluxForNpred = np.array(FluxForNpred) FluxErrForNpred = np.array(FluxErrForNpred) #Plots the diagnostic plots is asked # Plots are : Npred vs flux # TS vs Time if self.config['LightCurve']['DiagnosticPlots'] == 'yes': fittedFunc = self.CheckNpred( Npred, FluxForNpred, FluxErrForNpred, Npred_detected_indices) #check the errors calculation gTHNpred, TgrNpred = plotting.PlotNpred(Npred, FluxForNpred, FluxErrForNpred) CanvNpred = _GetCanvas() gTHNpred.Draw() TgrNpred.Draw('zP') _, TgrNpred_detected = plotting.PlotNpred( Npred_detected, Flux[Npred_detected_indices], FluxErrForNpred[Npred_detected_indices]) TgrNpred_detected.SetLineColor(2) TgrNpred_detected.SetMarkerColor(2) TgrNpred_detected.Draw('zP') fittedFunc.Draw("SAME") CanvNpred.Print(LcOutPath + "_Npred.png") CanvNpred.Print(LcOutPath + "_Npred.eps") CanvNpred.Print(LcOutPath + "_Npred.C") gTHTS, TgrTS = plotting.PlotTS(Time, TimeErr, TS) CanvTS = _GetCanvas() gTHTS.Draw() TgrTS.Draw('zP') CanvTS.Print(LcOutPath + '_TS.png') CanvTS.Print(LcOutPath + '_TS.eps') CanvTS.Print(LcOutPath + '_TS.C') # Plot the LC itself. This function return a TH2F for a nice plot # a TGraph and a list of TArrow for the ULs if folded: phase = np.linspace(0, 1, self.Nbin + 1) Time = (phase[1:] + phase[:-1]) / 2. TimeErr = (phase[1:] - phase[:-1]) / 2. gTHLC, TgrLC, ArrowLC = plotting.PlotFoldedLC( Time, TimeErr, Flux, FluxErr) gTHIndex, TgrIndex, ArrowIndex = plotting.PlotFoldedLC( Time, TimeErr, Index, IndexErr) if CutoffName is not None: gTHCutoff, TgrCutoff, ArrowCutoff = plotting.PlotFoldedLC( Time, TimeErr, Cutoff, CutoffErr) else: gTHLC, TgrLC, ArrowLC = plotting.PlotLC(Time, TimeErr, Flux, FluxErr) gTHIndex, TgrIndex, ArrowIndex = plotting.PlotLC( Time, TimeErr, Index, IndexErr) if CutoffName is not None: gTHCutoff, TgrCutoff, ArrowCutoff = plotting.PlotFoldedLC( Time, TimeErr, Cutoff, CutoffErr) ### plot and save the flux LC CanvLC = ROOT.TCanvas() gTHLC.Draw() TgrLC.Draw('zP') #plot the ul as arrow for i in xrange(len(ArrowLC)): ArrowLC[i].Draw() # compute Fvar and probability of being cst self.info("Flux vs Time: infos") self.FitWithCst(TgrLC) self.Fvar(Flux, FluxErr) #Save the canvas in the LightCurve subfolder CanvLC.Print(LcOutPath + '_LC.png') CanvLC.Print(LcOutPath + '_LC.eps') CanvLC.Print(LcOutPath + '_LC.C') ### plot and save the Index LC CanvIndex = ROOT.TCanvas() gTHIndex.Draw() TgrIndex.Draw('zP') #plot the ul as arrow for i in xrange(len(ArrowIndex)): ArrowIndex[i].Draw() #Save the canvas in the LightCurve subfolder if self.config["LightCurve"]["SpectralIndex"] == 0: self.info("Index vs Time") self.FitWithCst(TgrIndex) CanvIndex.Print(LcOutPath + '_Index.png') CanvIndex.Print(LcOutPath + '_Index.eps') CanvIndex.Print(LcOutPath + '_Index.C') if len(Cutoff) > 0: ### plot and save the Cutoff LC CanvCutoff = ROOT.TCanvas() gTHCutoff.Draw() TgrCutoff.Draw('zP') #plot the ul as arrow for i in xrange(len(ArrowCutoff)): ArrowCutoff[i].Draw() print "Cutoff vs Time: infos" self.FitWithCst(TgrCutoff) CanvCutoff.Print(LcOutPath + '_Cutoff.png') CanvCutoff.Print(LcOutPath + '_Cutoff.eps') CanvCutoff.Print(LcOutPath + '_Cutoff.C') #Dump into ascii lcfilename = LcOutPath + "_results.dat" self.info("Write to Ascii file : " + lcfilename) WriteToAscii(Time, TimeErr, Flux, FluxErr, Index, IndexErr, Cutoff, CutoffErr, TS, Npred, lcfilename) if self.config["LightCurve"]['ComputeVarIndex'] == 'yes': self.VariabilityIndex()