Example #1
0
    def ComputeUL(self, Fit):
        """Compute an Upper Limit using either the profil or integral method
        See the ST cicerone for more information on the 2 method"""

        self._log('UpperLimit', 'Compute upper Limit')
        #Index given by the user  
        self.info("Assumed index is ", self.config['UpperLimit']['SpectralIndex'])

        IdGamma = utils.getParamIndx(Fit, self.obs.srcname, 'Index')
        Fit[IdGamma] = -self.config['UpperLimit']['SpectralIndex']#set the index
        Fit[IdGamma].setFree(0)#the variable index is frozen to compute the UL

        import scipy.stats
        cl = float(self.config['UpperLimit']['cl'])
        delta = 0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1)
        print cl,' ',delta

        if self.config['UpperLimit']['Method'] == "Profile": #The method is Profile
            import UpperLimits
            ulobject = UpperLimits.UpperLimits(Fit)
            ul, _ = ulobject[self.obs.srcname].compute(emin=self.obs.Emin,
                                      emax=self.obs.Emax,delta=delta)
                                      #delta=2.71 / 2)
            self.info("Upper limit using Profile method: ")
            print ulobject[self.obs.srcname].results
        if self.config['UpperLimit']['Method'] == "Integral": #The method is Integral
            import IntegralUpperLimit
            ul, _ = IntegralUpperLimit.calc_int(Fit, self.obs.srcname, cl=cl,
                                                verbosity=0)
            print "Upper limit using Integral method: ", ul
        return ul #Return the result. This is an ul on the integral flux in ph/cm2/s 
Example #2
0
def runBayesianUL(analysis, Name, minEnergy, maxEnergy):
    print "\nThe TS is below the threshold, calculating 95% confidence-level Bayesian upper limit."
    limit, results = IUL.calc_int(analysis,
                                  Name,
                                  cl=0.95,
                                  emin=minEnergy,
                                  emax=maxEnergy)
    print "Bayesian upper limit: " + str(limit) + " photons/cm^2/s"
    if minEnergy != 100 and maxEnergy != 1e5:
        print "Calculating greater than 100 MeV upper limit flux"
        limit, results = IUL.calc_int(analysis,
                                      Name,
                                      cl=0.95,
                                      emin=100,
                                      emax=1e5)
        print "0.1 - 100 GeV Bayesian upper limit: " + str(
            limit) + " photons/cm^2/s"
Example #3
0
    def EnvelopeUL(self, Fit):
        """Compute the envelope UL. An UL is computed for different index and the maximum is taken at each energy.
        This is usefull when the source index is not know or can not be constrain by theoritical argument
        The index range form 1.5 to 2.5"""
        import IntegralUpperLimit
        import UpperLimits
        self._log('EnvelopeUL', 'Compute upper limit envelope')
        PhIndex = Fit.par_index(self.obs.srcname, 'Index')
        Nbp = 20  #Make Nbp computations
        Npgraph = 100  #The graph has Npgraph points
        ener = np.logspace(np.log10(self.obs.Emin), np.log10(self.obs.Emax),
                           Npgraph)  #the array containing the energy
        Ulenv = np.array(Npgraph * [0.])  #the array containing the UL value

        for i in xrange(Nbp):
            indx = -1.5 - i / (Nbp - 1.)
            utils.FreezeParams(Fit, self.srcname, PhIndex, indx)
            #Use either the profile or the integral method
            self.info("Methode used: " + self.config['UpperLimit']['Method'])
            if self.config['UpperLimit']['Method'] == "Profile":
                ul = UpperLimits.UpperLimits(Fit)
                source_ul = ul[self.obs.srcname]
                ul_val, _ = source_ul.compute(emin=self.obs.Emin,
                                              emax=self.obs.Emax,
                                              delta=2.71 / 2)
            if self.config['UpperLimit']['Method'] == "Integral":
                ul_val, _ = IntegralUpperLimit.calc_int(Fit,
                                                        self.obs.srcname,
                                                        verbosity=0)
            self.success("Upper Limits calculated")
            print "Index = ", indx, " UL = ", ul_val  #small print
            for j in xrange(Npgraph):
                model_name = Fit.model.srcs[
                    self.obs.srcname].spectrum().genericName()
                #compute the DNDE value. The computation change is
                #the model is PowerLaw or PowerLaw2
                #Note : Other model are not taken into account
                #and no UL will be computed
                if model_name == 'PowerLaw2':
                    newUl = ul_val * (indx + 1) * pow(ener[j], indx + 2) \
                        / (pow(self.obs.Emax, indx + 1) - pow(self.obs.Emin, indx + 1))
                elif model_name == 'PowerLaw':
                    IdEScale = utils.getParamIndx(Fit, self.obs.srcname,
                                                  'Scale')
                    Escale = Fit[IdEScale].value()
                    newUl = ul_val * pow(ener[j] / Escale,
                                         indx + 2) * Escale**2 * 1.6022e-6
                Ulenv[j] = max(Ulenv[j], newUl)

        print
        self.info("Result of the UL envelope")
        for j in xrange(Npgraph):
            print ener[j], " ", Ulenv[j]
Example #4
0
    def ComputeUL(self, Fit):
        """Compute an Upper Limit using either the profil or integral method
        See the ST cicerone for more information on the 2 method"""

        self._log('UpperLimit', 'Compute upper Limit')
        #Index given by the user
        self.info("Assumed index is "+str(self.config['UpperLimit']['SpectralIndex']))

        parameters = dict()
        parameters['Index']  = -float(self.config['UpperLimit']['SpectralIndex'])
        parameters['alpha']  = +float(self.config['UpperLimit']['SpectralIndex'])
        parameters['Index1'] = -float(self.config['UpperLimit']['SpectralIndex'])
        parameters['beta']   = 0
        parameters['Index2'] = 2.
        parameters['Cutoff'] = 30000. # set the cutoff to be high

        for key in parameters.keys():
            try:
                utils.FreezeParams(Fit,self.obs.srcname, key, parameters[key])
            except:
                continue

        import scipy.stats
        cl = float(self.config['UpperLimit']['cl'])
        delta = 0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1)


        if self.config['UpperLimit']['Method'] == "Profile": #The method is Profile
            if Fit.Ts(self.obs.srcname)<2 :
                self.warning("TS of the source is very low, better to use Integral method")
            import UpperLimits
            ulobject = UpperLimits.UpperLimits(Fit)
            ul, _ = ulobject[self.obs.srcname].compute(emin=self.obs.Emin,
                                      emax=self.obs.Emax,delta=delta)
                                      #delta=2.71 / 2)
            self.info("Upper limit using Profile method: ")
            print ulobject[self.obs.srcname].results
            self.warning("Be sure to have enough photons to validate the gaussian assumption")
        if self.config['UpperLimit']['Method'] == "Integral": #The method is Integral
            import IntegralUpperLimit
            ul, _ = IntegralUpperLimit.calc_int(Fit, self.obs.srcname, cl=cl,
                                                verbosity=0,emin=self.obs.Emin,
                                                emax=self.obs.Emax)
            print "Upper limit using Integral method: ", ul
            self.warning("Be sure to have enough photons to validate the gaussian assumption")

        if self.config['UpperLimit']['Method'] == "Poisson": #The method is Poisson
            ul = self.PoissonUL(Fit)
            print "Upper limit using Poisson statistic: ", ul

        print "This is an ul on the integral flux in ph/cm2/s"
        return ul #Return the result. This is an ul on the integral flux in ph/cm2/s
Example #5
0
    def EnvelopeUL(self, Fit):
        """Compute the envelope UL. An UL is computed for different index and the maximum is taken at each energy.
        This is usefull when the source index is not know or can not be constrain by theoritical argument
        The index range form 1.5 to 2.5"""
        import IntegralUpperLimit
        import UpperLimits
        self._log('EnvelopeUL', 'Compute upper limit envelope')
        PhIndex = Fit.par_index(self.obs.srcname, 'Index')
        Nbp = 20 #Make Nbp computations
        Npgraph = 100#The graph has Npgraph points
        ener = np.logspace(np.log10(self.obs.Emin),
                           np.log10(self.obs.Emax), Npgraph)#the array containing the energy
        Ulenv = np.array(Npgraph * [0.])#the array containing the UL value

        for i in xrange(Nbp):
            indx = -1.5 - i / (Nbp - 1.)
            Fit[PhIndex] = indx
            Fit.freeze(PhIndex)#Freeze the index
            #Use either the profile or the integral method
            self.info("Methode used: "+self.config['UpperLimit']['Method'])
            if self.config['UpperLimit']['Method'] == "Profile":
                ul = UpperLimits.UpperLimits(Fit)
                source_ul = ul[self.obs.srcname]
                ul_val, _ = source_ul.compute(emin=self.obs.Emin,
                                              emax=self.obs.Emax,
                                              delta=2.71 / 2)
            if self.config['UpperLimit']['Method'] == "Integral":
                ul_val, _ = IntegralUpperLimit.calc_int(Fit, self.obs.srcname,
                                                        verbosity=0)
            self.success("Upper Limits calculated")
            print "Index = ", indx, " UL = ", ul_val  #small print
            for j in xrange(Npgraph):
                model_name = Fit.model.srcs[self.obs.srcname].spectrum().genericName()
                #compute the DNDE value. The computation change is
                #the model is PowerLaw or PowerLaw2
                #Note : Other model are not taken into account
                #and no UL will be computed
                if model_name == 'PowerLaw2':
                    newUl = ul_val * (indx + 1) * pow(ener[j], indx + 2) \
                        / (pow(self.obs.Emax, indx + 1) - pow(self.obs.Emin, indx + 1))
                elif model_name == 'PowerLaw':
                    IdEScale = utils.getParamIndx(Fit, self.obs.srcname, 'Scale')
                    Escale = Fit[IdEScale].value()
                    newUl = ul_val * pow(ener[j] / Escale, indx + 2)*Escale**2*1.6022e-6
                Ulenv[j] = max(Ulenv[j], newUl)

        print
        self.info("Result of the UL envelope")
        for j in xrange(Npgraph):
            print ener[j], " ", Ulenv[j]
Example #6
0
    def ComputeUL(self, Fit):
        """Compute an Upper Limit using either the profil or integral method
        See the ST cicerone for more information on the 2 method"""

        self._log('UpperLimit', 'Compute upper Limit')
        #Index given by the user
        self.info("Assumed index is "+str(self.config['UpperLimit']['SpectralIndex']))

        IdGamma = utils.getParamIndx(Fit, self.obs.srcname, 'Index')
        Fit[IdGamma] = -self.config['UpperLimit']['SpectralIndex']#set the index
        Fit[IdGamma].setFree(0)#the variable index is frozen to compute the UL

        import scipy.stats
        cl = float(self.config['UpperLimit']['cl'])
        delta = 0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1)


        if self.config['UpperLimit']['Method'] == "Profile": #The method is Profile
            if Fit.Ts(self.obs.srcname)<2 :
                self.warning("TS of the source is very low, better to use Integral method")
            import UpperLimits
            ulobject = UpperLimits.UpperLimits(Fit)
            ul, _ = ulobject[self.obs.srcname].compute(emin=self.obs.Emin,
                                      emax=self.obs.Emax,delta=delta)
                                      #delta=2.71 / 2)
            self.info("Upper limit using Profile method: ")
            print ulobject[self.obs.srcname].results
            self.warning("Be sure to have enough photons to validate the gaussian assumption")
        if self.config['UpperLimit']['Method'] == "Integral": #The method is Integral
            import IntegralUpperLimit
            ul, _ = IntegralUpperLimit.calc_int(Fit, self.obs.srcname, cl=cl,
                                                verbosity=0,emin=self.obs.Emin,
                                                emax=self.obs.Emax)
            print "Upper limit using Integral method: ", ul
            self.warning("Be sure to have enough photons to validate the gaussian assumption")

        if self.config['UpperLimit']['Method'] == "Poisson": #The method is Poisson
            ul = self.PoissonUL(Fit)
            print "Upper limit using Poisson statistic: ", ul

        print "This is an ul on the integral flux in ph/cm2/s"
        return ul #Return the result. This is an ul on the integral flux in ph/cm2/s
Example #7
0
    def ComputeUL(self, Fit):
        """Compute an Upper Limit using either the profil or integral method
        See the ST cicerone for more information on the 2 method"""

        self._log('UpperLimit', 'Compute upper Limit')
        #Index given by the user  
        self.info("Assumed index is "+str(self.config['UpperLimit']['SpectralIndex']))

        IdGamma = utils.getParamIndx(Fit, self.obs.srcname, 'Index')
        Fit[IdGamma] = -self.config['UpperLimit']['SpectralIndex']#set the index
        Fit[IdGamma].setFree(0)#the variable index is frozen to compute the UL

        import scipy.stats
        cl = float(self.config['UpperLimit']['cl'])
        delta = 0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1)


        if self.config['UpperLimit']['Method'] == "Profile": #The method is Profile
            if Fit.Ts(self.obs.srcname)<2 :
                self.warning("TS of the source is very low, better to use Integral method")
            import UpperLimits
            ulobject = UpperLimits.UpperLimits(Fit)
            ul, _ = ulobject[self.obs.srcname].compute(emin=self.obs.Emin,
                                      emax=self.obs.Emax,delta=delta)
                                      #delta=2.71 / 2)
            self.info("Upper limit using Profile method: ")
            print ulobject[self.obs.srcname].results
            self.warning("Be sure to have enough photons to validate the gaussian assumption")
        if self.config['UpperLimit']['Method'] == "Integral": #The method is Integral
            import IntegralUpperLimit
            ul, _ = IntegralUpperLimit.calc_int(Fit, self.obs.srcname, cl=cl,
                                                verbosity=0,emin=self.obs.Emin,
                                                emax=self.obs.Emax)
            print "Upper limit using Integral method: ", ul
            self.warning("Be sure to have enough photons to validate the gaussian assumption")

        if self.config['UpperLimit']['Method'] == "Poisson": #The method is Poisson
            ul = self.PoissonUL(Fit)
            print "Upper limit using Poisson statistic: ", ul

        print "This is an ul on the integral flux in ph/cm2/s"
        return ul #Return the result. This is an ul on the integral flux in ph/cm2/s 
Example #8
0
def run(Name, Ra, Dec, minEnergy, maxEnergy, SCFile, ulTS, NMtol,evclass,model):

    if evclass == 512:
        irf = "P8R2_ULTRACLEAN_V6"
    elif evclass == 128:
        irf = "P8R2_SOURCE_V6"
    elif evclass == 256:
        irf = "P8R2_CLEAN_V6"
    elif evclass == 1024:
        irf = "P8R2_ULTRACLEANVETO_V6"

    print "This is multiUBLike.\nPlease make sure that you have added a model for your source with the name: " + str(Name)

    print "Calcualting the diffuse response for photons in this bin."    
    my_apps.diffResps['evfile'] = '../' + Name + '_gtmktime.fits'
    my_apps.diffResps['scfile'] = SCFile
    my_apps.diffResps['srcmdl'] = model
    my_apps.diffResps['irfs'] = irf
    my_apps.diffResps.run()

    print "Finished calculating diffuse response. Now moving to conduct a UNBINNED likelihood analysis."

    obs = UnbinnedObs('../' + Name + '_gtmktime.fits', SCFile,expMap= '../' + Name + '_expMap.fits',expCube= '../' + Name + '_ltcube.fits', irfs=irf)
    analysis = UnbinnedAnalysis(obs,model,optimizer='NewMinuit')
    likeObj = pyLike.NewMinuit(analysis.logLike)
    analysis.tol = NMtol
    lkl = analysis.fit(verbosity=0,covar=True,optObject=likeObj)
    analysis.writeXml( Name + '_output_model.xml')
    fit = likeObj.getRetCode()
    print "Likelihood has converged whith Code " + str(likeObj.getRetCode())

    multiLike.printResults(analysis,Name, minEnergy,maxEnergy)
    print "Fit has likelihood: " + str(lkl)

    print "\nThe TS is below the threshold, calculating 95% confidence-level Bayesian upper limit."
    limit,results = IUL.calc_int(analysis,Name,cl=0.95,emin=minEnergy, emax=maxEnergy)
    print "Bayesian upper limit: " + str(limit) + " photons/cm^2/s"
    #Array that returns the results of the unbinned analysis
    #log-likelihood,flux,flux_err,test statisitc
    Return = [lkl,analysis.flux(Name, emin=minEn, emax=maxEn),analysis.fluxError(Name, emin=minEn, emax=maxEn),limit,analysis.Ts(Name,reoptimize=False)]
    return Return
Example #9
0
    def ComputeUL(self, Fit):
        """Compute an Upper Limit using either the profil or integral method
        See the ST cicerone for more information on the 2 method"""

        self._log('UpperLimit', 'Compute upper Limit')
        #Index given by the user
        print "Assumed index is ", self.config['UpperLimit']['SpectralIndex']

        IdGamma = utils.getParamIndx(Fit, self.obs.srcname, 'Index')
        Fit[IdGamma] = -self.config['UpperLimit'][
            'SpectralIndex']  #set the index
        Fit[IdGamma].setFree(
            0)  #the variable index is frozen to compute the UL

        import scipy.stats
        cl = float(self.config['UpperLimit']['cl'])
        delta = 0.5 * scipy.stats.chi2.isf(1 - 2 * (cl - 0.5), 1)
        print cl, ' ', delta

        if self.config['UpperLimit'][
                'Method'] == "Profile":  #The method is Profile
            import UpperLimits
            ulobject = UpperLimits.UpperLimits(Fit)
            ul, _ = ulobject[self.obs.srcname].compute(emin=self.obs.Emin,
                                                       emax=self.obs.Emax,
                                                       delta=delta)
            #delta=2.71 / 2)
            print "Upper limit using Profile method: "
            print ulobject[self.obs.srcname].results
        if self.config['UpperLimit'][
                'Method'] == "Integral":  #The method is Integral
            import IntegralUpperLimit
            ul, _ = IntegralUpperLimit.calc_int(Fit,
                                                self.obs.srcname,
                                                cl=cl,
                                                verbosity=0)
            print "Upper limit using Integral method: ", ul
        return ul  #Return the result. This is an ul on the integral flux in ph/cm2/s
Example #10
0
    def processAllObs(self, fix_shape=True, delete_below_ts=None,
                      ul_flux_dflux=0, ul_chi2_ts=None, ul_bayes_ts=4.0,
                      ul_cl=0.95, verbosity=0, emin=0, emax=0,
                      interim_save_filename=None):

        self.logger.info("Processing all observations.")
        for f in self.obsfiles:
            lc = dict()
            lc['config'] = dict()
            lc['config']['fix_shape']       = fix_shape
            lc['config']['delete_below_ts'] = delete_below_ts
            lc['config']['ul_flux_dflux']   = ul_flux_dflux
            lc['config']['ul_chi2_ts']      = ul_chi2_ts
            lc['config']['ul_bayes_ts']     = ul_bayes_ts
            lc['config']['ul_cl']           = ul_cl
            lc['config']['emin']            = emin
            lc['config']['emax']            = emax
            lc['config']['files']           = f
            #lc['config']['argv']            = sys.argv

            lc['e_min'] = emin;
            lc['e_max'] = emax;

            if type(f) != list:
                [ obs, like ] = self.loadObs(f,verbosity)
                lc['t_min'] = obs.roiCuts().minTime()
                lc['t_max'] = obs.roiCuts().maxTime()
                if (emin == 0 or emax == 0):
                    lc['e_min'] = obs.roiCuts().getEnergyCuts()[0];
                    lc['e_max'] = obs.roiCuts().getEnergyCuts()[1];

            else:
                lc['t_min'] = None
                lc['t_max'] = None
                like = SL.SummedLikelihood(self.optimizer)
                for ff in f:
                    [ obs, like1 ] = self.loadObs(ff,verbosity)
                    tmin = obs.roiCuts().minTime()
                    tmax = obs.roiCuts().maxTime()
                    if lc['t_min'] == None or tmin<lc['t_min']:
                        lc['t_min'] = tmin
                    if lc['t_max'] == None or tmax>lc['t_max']:
                        lc['t_max'] = tmax
                    if (lc['e_min'] == 0 or lc['e_max'] == 0):
                        ecuts = obs.roiCuts().getEnergyCuts()
                        lc['e_min'] = ecuts[0]
                        lc['e_max'] = ecuts[1]
                    elif (emin == 0 or emax == 0):
                        ecuts = obs.roiCuts().getEnergyCuts()
                        lc['e_min'] = max(lc['e_min'], ecuts[0])
                        lc['e_max'] = min(lc['e_max'], ecuts[1])
                    like.addComponent(like1)

            emin = lc['e_min']
            emax = lc['e_max']

            like.tol = like.tol*0.01;

            if verbosity > 1:
                print '- Time:',lc['t_min'],'to',lc['t_max']

            src = like[self.likelihoodConf['sourcename']]
            if src == None:
                raise NameError("No source \""+self.likelihoodConf['sourcename']+"\" in model "+
                                self.model)
            srcfreepar=like.freePars(self.likelihoodConf['sourcename'])
            srcnormpar=like.normPar(self.likelihoodConf['sourcename'])
            if len(srcfreepar)>0:
                like.setFreeFlag(self.likelihoodConf['sourcename'], srcfreepar, 0)
                like.syncSrcParams(self.likelihoodConf['sourcename'])


            meanvalue = srcnormpar.getValue()
            meanerror = srcnormpar.error()
            if meanerror == 0:
                self.logger.critical("The error on the normalization for your source is 0!  You need to do a global fit first (with quickLike) and provide the final XML file (<basename>_likeMinuit.xml) with errors included before you run compute.")
                return

            lc['original']=dict()
            lc['original']['normpar_init_value'] = meanvalue
            lc['original']['normpar_name'] = srcnormpar.getName()
            lc['original']['nfree'] = len(like.freePars(self.likelihoodConf['sourcename']))
            lc['original']['flux'] = like[self.likelihoodConf['sourcename']].flux(emin, emax)
            lc['original']['logL'] = like.logLike.value()
            if verbosity > 1:
                print '- Original log Like:',lc['original']['logL']

            if fix_shape:
                if verbosity > 1:
                    print '- Fixing spectral shape parameters'
                sync_name = ""
                for p in like.params():
                    if sync_name != "" and sync_name != p.srcName:
                        like.syncSrcParams(sync_name)
                        sync_name = ""
                    if(p.isFree() and p.srcName!=self.likelihoodConf['sourcename'] and
                       p.getName()!=like.normPar(p.srcName).getName()):
                        if verbosity > 2:
                            print '-- '+p.srcName+'.'+p.getName()
                        p.setFree(False)
                        sync_name = p.srcName
                if sync_name != "" and sync_name != p.srcName:
                    like.syncSrcParams(sync_name)
                    sync_name = ""

           # ----------------------------- FIT 1 -----------------------------

            if verbosity > 1:
                print '- Fit 1 - All parameters of',self.likelihoodConf['sourcename'],'fixed'
            like.fit(max(verbosity-3, 0))

            lc['allfixed'] = dict()
            lc['allfixed']['logL'] = like.logLike.value()
            fitstat = like.optObject.getRetCode()
            if verbosity > 1 and fitstat != 0:
                print "- Fit 1 - Minimizer returned with code: ", fitstat
            lc['allfixed']['fitstat'] = fitstat
            if verbosity > 1:
                print '- Fit 1 - log Like:',lc['allfixed']['logL']

            if delete_below_ts:
                frozensrc = []
                if verbosity > 1:
                    print '- Deleting point sources with TS<'+str(delete_below_ts)
                deletesrc = []
                for s in like.sourceNames():
                    freepars = like.freePars(s)
                    if(s!=self.likelihoodConf['sourcename'] and like[s].src.getType() == 'Point'
                       and len(freepars)>0):
                        ts = like.Ts(s)
                        if ts<delete_below_ts:
                            deletesrc.append(s)
                            if verbosity > 2:
                                print '--',s,'(TS='+str(ts)+')'
                if deletesrc:
                    for s in deletesrc:
                        like.deleteSource(s)
                    if verbosity > 1:
                        print '- Fit 1 - refitting model'
                    like.fit(max(verbosity-3, 0))
                    lc['allfixed']['fitstat_initial'] = \
                        lc['allfixed']['fitstat']
                    fitstat = like.optObject.getRetCode()
                    if verbosity > 1 and fitstat != 0:
                        print "- Fit 1 - Minimizer returned with code: ",\
                            fitstat
                    lc['allfixed']['fitstat'] = fitstat
                    lc['allfixed']['logL'] = like.logLike.value()
                    if verbosity > 1:
                        print '- Fit 1 - log Like:',lc['allfixed']['logL']


            lc['allfixed']['flux']=like[self.likelihoodConf['sourcename']].flux(emin, emax)
            pars = dict()
            for pn in like[self.likelihoodConf['sourcename']].funcs['Spectrum'].paramNames:
                p = like[self.likelihoodConf['sourcename']].funcs['Spectrum'].getParam(pn)
                pars[p.getName()] = dict(name      = p.getName(),
                                         value     = p.getTrueValue(),
                                         error     = p.error()*p.getScale(),
                                         free      = p.isFree())
            lc['allfixed']['pars'] = pars
    

            # ------------------ N SIGMA PROFILE LIKELIHOOD -------------------

            prof_sigma = (-1,-0.5,0,0.5,1.0)
            lc['profile'] = dict();
            lc['profile']['sigma'] = []
            lc['profile']['value'] = []
            lc['profile']['logL'] = []
            lc['profile']['flux'] = []
            lc['profile']['fitstat'] = []

            if verbosity > 1:
                print '- Fit 1 - generating %d point likelihood profile'%\
                      len(prof_sigma)
            for sigma in prof_sigma:
                val = sigma*meanerror+meanvalue
                if val < srcnormpar.getBounds()[0]:
                    val = srcnormpar.getBounds()[0]
                if (lc['profile']['value']
                    and lc['profile']['value'][-1]==val):
                    continue
                lc['profile']['value'].append(val)
                lc['profile']['sigma'].append((val-meanvalue)/meanerror)
                if(val == meanvalue):
                    lc['profile']['logL'].append(lc['allfixed']['logL'])
                    lc['profile']['flux'].append(lc['allfixed']['flux'])
                else:
                    srcnormpar.setValue(val)
                    like.syncSrcParams(self.likelihoodConf['sourcename'])
                    like.fit(max(verbosity-3, 0))
                    fitstat = like.optObject.getRetCode()
                    if verbosity > 2 and fitstat != 0:
                        print "- Fit 1 - profile: Minimizer returned code: ",\
                            fitstat
                    lc['profile']['fitstat'].append(fitstat)
                    lc['profile']['logL'].append(like.logLike.value())
                    lc['profile']['flux'].append(like[self.likelihoodConf['sourcename']].\
                                              flux(emin, emax))
                if verbosity > 2:
                    print '- Fit 1 - profile: %+g, %f -> %f'%\
                          (sigma,lc['profile']['value'][-1],
                           lc['profile']['logL'][-1]-lc['allfixed']['logL'])

            srcnormpar.setValue(meanvalue)
            like.syncSrcParams(self.likelihoodConf['sourcename'])

            # ----------------------------- FIT 2 -----------------------------

            if verbosity > 1:
                print '- Fit 2 - Normalization parameter of',\
                      self.likelihoodConf['sourcename'],'free'
            srcnormpar.setFree(1)
            like.syncSrcParams(self.likelihoodConf['sourcename'])
            like.fit(max(verbosity-3, 0))
            lc['normfree'] = dict()
            fitstat = like.optObject.getRetCode()
            if verbosity > 1 and fitstat != 0:
                print "- Fit 2 - Minimizer returned with code: ", fitstat
            lc['normfree']['fitstat'] = fitstat
            lc['normfree']['logL'] = like.logLike.value()
            lc['normfree']['ts'] = like.Ts(self.likelihoodConf['sourcename'])
            lc['normfree']['flux_dflux'] = \
                srcnormpar.getValue()/srcnormpar.error()
            if verbosity > 1:
                print '- Fit 2 - log Like:',lc['normfree']['logL'],\
                      '(TS='+str(lc['normfree']['ts'])+')'

            lc['normfree']['nfree']=len(like.freePars(self.likelihoodConf['sourcename']))
            lc['normfree']['flux']=like[self.likelihoodConf['sourcename']].flux(emin, emax)
            pars = dict()
            for pn in like[self.likelihoodConf['sourcename']].funcs['Spectrum'].paramNames:
                p = like[self.likelihoodConf['sourcename']].funcs['Spectrum'].getParam(pn)
                pars[p.getName()] = dict(name      = p.getName(),
                                         value     = p.getTrueValue(),
                                         error     = p.error()*p.getScale(),
                                         free      = p.isFree())
            lc['normfree']['pars'] = pars
            ul_type = None
            if ul_bayes_ts != None and lc['normfree']['ts'] < ul_bayes_ts:

                ul_type = 'bayesian'
                [ul_flux, ul_results] = \
                    IUL.calc_int(like,self.likelihoodConf['sourcename'],cl=ul_cl,
                                                skip_global_opt=True,
                                                verbosity = max(verbosity-2,0),
                                                emin=emin, emax=emax,
                                            poi_values = lc['profile']['value'])
            elif ( ul_flux_dflux != None and \
                   lc['normfree']['flux_dflux'] < ul_flux_dflux ) or \
                   ( ul_chi2_ts != None and lc['normfree']['ts'] < ul_chi2_ts):
                ul_type = 'chi2'
                [ul_flux, ul_results] = \
                    IUL.calc_chi2(like,self.likelihoodConf['sourcename'],cl=ul_cl,
                                                 skip_global_opt=True,
                                                 verbosity = max(verbosity-2,0),
                                                 emin=emin, emax=emax)
            if ul_type != None:
                lc['normfree']['ul'] = dict(flux    = ul_flux,
                                            results = ul_results,
                                            type    = ul_type)

            # ----------------------------- FIT 3 -----------------------------

            if verbosity > 1:
                print '- Fit 3 - All parameters of',self.likelihoodConf['sourcename'],'free'
            like.setFreeFlag(self.likelihoodConf['sourcename'], srcfreepar, 1)
            like.syncSrcParams(self.likelihoodConf['sourcename'])
            like.fit(max(verbosity-3, 0))
            lc['allfree'] = dict()
            fitstat = like.optObject.getRetCode()
            if verbosity > 1 and fitstat != 0:
                print "- Fit 3 - Minimizer returned with code: ", fitstat
            lc['allfree']['fitstat'] = fitstat
            lc['allfree']['logL'] = like.logLike.value()
            lc['allfree']['ts'] = like.Ts(self.likelihoodConf['sourcename'])
            if verbosity > 1:
                print '- Fit 3 - log Like:',lc['allfree']['logL'],\
                      '(TS='+str(lc['allfree']['ts'])+')'
            lc['allfree']['nfree']=len(like.freePars(self.likelihoodConf['sourcename']))
            lc['allfree']['flux']=like[self.likelihoodConf['sourcename']].flux(emin, emax)
            pars = dict()
            for pn in like[self.likelihoodConf['sourcename']].funcs['Spectrum'].paramNames:
                p = like[self.likelihoodConf['sourcename']].funcs['Spectrum'].getParam(pn)
                pars[p.getName()] = dict(name      = p.getName(),
                                         value     = p.getTrueValue(),
                                         error     = p.error()*p.getScale(),
                                         free      = p.isFree())
            lc['allfree']['pars'] = pars

            self.lc.append(lc)
            if interim_save_filename != None:
                self.saveProcessedObs(interim_save_filename)
Example #11
0
    def processAllObs(self, fix_shape=True, delete_below_ts=None,
                      ul_flux_dflux=0,ul_chi2_ts=None, ul_bayes_ts=4.0,
                      ul_cl = 0.95, verbosity=0, ul_optimizer=None):
        for f in self.obsfiles:
            spect = dict()
            spect['config']=dict()
            spect['config']['fix_shape'] = fix_shape
            spect['config']['delete_below_ts'] = delete_below_ts
            spect['config']['ul_flux_dflux'] = ul_flux_dflux
            spect['config']['ul_chi2_ts'] = ul_chi2_ts
            spect['config']['ul_bayes_ts'] = ul_bayes_ts
            spect['config']['ul_cl'] = ul_cl
            spect['config']['files'] = f

            [ obs, like ] = self.loadObs(f,verbosity)

            spect['t_min'] = obs.roiCuts().minTime()
            spect['t_max'] = obs.roiCuts().maxTime()
            [emin, emax] = obs.roiCuts().getEnergyCuts()
            spect['e_min'] = emin
            spect['e_max'] = emax
            
            if verbosity > 1:
                print '- Time:',spect['t_min'],'to',spect['t_max']
                print '- Energy:',emin,'to',emax,'MeV'

            src = like[self.srcName]
            if src == None:
                raise NameError("No source \""+self.srcName+"\" in model "+
                                self.model)
            srcnormpar=like.normPar(self.srcName)

            spect['original']=dict()
            spect['original']['normpar_init_value'] = srcnormpar.getValue()
            spect['original']['normpar_name'] = srcnormpar.getName()
            spect['original']['flux'] = like[self.srcName].flux(emin, emax)
            spect['original']['logL'] = like.logLike.value()
            if verbosity > 1:
                print '- Original log Like:',spect['original']['logL']

            if fix_shape:
                if verbosity > 1:
                    print '- Fixing spectral shape parameters'
                sync_name = ""
                for p in like.params():
                    if sync_name != "" and sync_name != p.srcName:
                        like.syncSrcParams(sync_name)
                        sync_name = ""
                    if(p.isFree() and #p.srcName!=self.srcName and
                       p.getName()!=like.normPar(p.srcName).getName()):
                        if verbosity > 2:
                            print '-- '+p.srcName+'.'+p.getName()
                        p.setFree(False)
                        sync_name = p.srcName
                if sync_name != "" and sync_name != p.srcName:
                    like.syncSrcParams(sync_name)
                    sync_name = ""

            # ------------------------------ FIT ------------------------------

            if verbosity > 1:
                print '- Fit - starting'
            like.fit(max(verbosity-3, 0))

            spect['fit'] = dict()
            spect['fit']['logL'] = like.logLike.value()
            if verbosity > 1:
                print '- Fit - log Like:',spect['fit']['logL']

            if delete_below_ts:
                frozensrc = []
                if verbosity > 1:
                    print '- Deleting point sources with TS<'+str(delete_below_ts)
                deletesrc = []
                for s in like.sourceNames():
                    freepars = like.freePars(s)
                    if(s!=self.srcName and like[s].type == 'PointSource'
                       and len(freepars)>0):
                        ts = like.Ts(s)
                        if ts<delete_below_ts:
                            deletesrc.append(s)
                            if verbosity > 2:
                                print '--',s,'(TS='+str(ts)+')'
                if deletesrc:
                    for s in deletesrc:
                        like.deleteSource(s)
                    if verbosity > 1:
                        print '- Fit - refitting model'
                    like.fit(max(verbosity-3, 0))
                    spect['fit']['logL'] = like.logLike.value()
                    if verbosity > 1:
                        print '- Fit - log Like:',spect['fit']['logL']
                        
            spect['fit']['ts']=like.Ts(self.srcName)
            if verbosity > 1:
                print '- TS of %s: %f'%(self.srcName,spect['fit']['ts'])

            spect['fit']['flux']=like[self.srcName].flux(emin, emax)
            emid = math.sqrt(emin*emax)
            spect['fit']['e_mid']=emid
            # Note: be careful about the meaning here - it is the
            # differential flux in the middle of the energy bin, not a
            # flux error. This contradicts the meaning in 'flux_dflux' 
            spect['fit']['dflux'] = \
                like[self.srcName].flux(emid*(1-0.001),emid*(1+0.001))/(emid*0.002)
            spect['fit']['flux_dflux'] = \
                srcnormpar.getValue()/srcnormpar.error()
            pars = dict()
            for pn in like[self.srcName].funcs['Spectrum'].paramNames:
                p = like[self.srcName].funcs['Spectrum'].getParam(pn)
                pars[p.getName()] = dict(name      = p.getName(),
                                         value     = p.getTrueValue(),
                                         error     = p.error()*p.getScale(),
                                         free      = p.isFree())
            spect['fit']['pars'] = pars

            ul_type = None
            if ul_bayes_ts != None and spect['fit']['ts'] < ul_bayes_ts:
                ul_type = 'bayesian'
                [ul_flux, ul_results] = \
                    IntegralUpperLimit.calc_int(like,self.srcName,cl=ul_cl,
                                                skip_global_opt=True,
                                                verbosity = max(verbosity-2,0),
                                                emin=emin, emax=emax,
                                                profile_optimizer=ul_optimizer)
            elif ( ul_flux_dflux != None and \
                   spect['fit']['flux_dflux'] < ul_flux_dflux ) or \
                   ( ul_chi2_ts != None and spect['fit']['ts'] < ul_chi2_ts):
                ul_type = 'chi2'
                [ul_flux, ul_results] = \
                    IntegralUpperLimit.calc_chi2(like,self.srcName,cl=ul_cl,
                                                skip_global_opt=True,
                                                verbosity = max(verbosity-2,0),
                                                emin=emin, emax=emax,
                                                profile_optimizer=ul_optimizer)
            if ul_type != None:
                spect['fit']['ul'] = dict(flux    = ul_flux,
                                          results = ul_results,
                                          type    = ul_type)

            self.spectra.append(spect)
Example #12
0
def runFermiTools(Name, RA, DEC, minEnergy, maxEnergy, SCFile, radius, binsz,
                  TSTART, TSTOP, Evfile, bins, zmax, evclass, evtype, TSul,
                  NMtol, lc_bin_num, runMRM):

    print "Working on bin " + str(lc_bin_num) + " for the light curve."

    f = FermiObject()
    """

        Following steps execute Fermi Tool gtselect

    """

    print('\nWorking on file.')
    print('Cutting file to fit desired parameters . . .\n')
    f._setEvclass(evclass)
    f._setEvtype(evtype)
    f._setRa(RA)
    f._setDec(DEC)
    f._setRad(radius)
    f._setEmin(minEnergy)
    f._setEmax(maxEnergy)
    f._setZmax(zmax)
    f._setTmin(TSTART)
    f._setTmax(TSTOP)
    f._setInfile(Evfile)
    f._setOutfile(Name + '_gtselect' + str(lc_bin_num) + '_lc.fits')
    f.amonSelect()
    print(
        'File cuts have been made. Now making cuts for GTI using spacecraft file.'
    )
    """

        Following steps execute Fermi Tool gtmktime

    """

    f._setScfile(SCFile)
    f._setRoicut('no')
    f._setEvfile(Name + '_gtselect' + str(lc_bin_num) + '_lc.fits')
    f._setOutfile(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits')
    ###############################################
    #         Filter expression                   #
    Filter = '(DATA_QUAL>0)&&(LAT_CONFIG==1)'
    ###############################################
    f._setFilter(Filter)
    print('Working on file ' + str(f.getOutfile()) + '. . .')
    f.amonTime()
    print(
        'File cuts have been made. Now begining construction of the counts map from event data.'
    )
    """

        Following steps execute Fermi Tool gtbin
        to create the counts map

    """

    f._setAlgorithm('CMAP')
    f._setEvfile(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits')
    f._setOutfile(Name + '_cmap' + str(lc_bin_num) + '_lc.fits')
    f._setScfile('NONE')
    num_pix = int((2 * radius) / float(binsz))
    print "Counts map is " + str(num_pix) + " by " + str(num_pix) + " pixels."
    f._setNxpix(num_pix)
    f._setNypix(num_pix)
    f._setBinsz(binsz)
    f._setCoordsys('CEL')
    f._setAxisrot(0)
    f._setProj('AIT')
    f.amonBincmap()
    print(
        'Counts map is complete.Now begining construction of the counts cube.')
    """

        Following steps execute Fermi Tool gtbin
        to create counts cube (3D counts map).

    """

    f._setAlgorithm('CCUBE')
    f._setOutfile(Name + '_ccube' + str(lc_bin_num) + '_lc.fits')
    pix = int((sqrt(2) * radius) / float(binsz))
    print "Counts cube is " + str(pix) + " by " + str(pix) + " pixels."
    f._setNxpix(pix)
    f._setNypix(pix)
    ebin = int(10 * log10(maxEnergy / minEnergy))
    print "There are " + str(ebin) + " logarithmically uniform energy bins."
    f._setEnumbins(ebin)
    f.amonBinccube()
    print('Counts cube is complete.')

    print('Using XML model from whole dataset.\n Moving on to gtltcube.')

    print "Now working on ltcube file using gtltcube\n"

    my_apps.expCube['evfile'] = Name + '_gtmktime' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.expCube['scfile'] = SCFile
    my_apps.expCube['outfile'] = Name + '_ltcube' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.expCube['dcostheta'] = 0.025
    my_apps.expCube['binsz'] = 1
    my_apps.expCube['phibins'] = 0
    my_apps.expCube['zmax'] = zmax
    my_apps.expCube['chatter'] = 0
    my_apps.expCube.run()

    print "\nltcube complete.\nMoving to compute exposure map for whole sky with gtexpcube2.\n"

    f._setInfile(Name + '_ltcube' + str(lc_bin_num) + '_lc.fits')
    cubePix = int((2 * radius + 20) / binsz)
    f._setNxpix(cubePix)
    f._setNypix(cubePix)
    f._setBinsz(binsz)
    f._setCoordsys('CEL')
    f._setRa(RA)
    f._setDec(DEC)
    f._setAxisrot(0)
    f._setProj('AIT')
    f._setEmin(minEnergy)
    f._setEmax(maxEnergy)
    f._setEnumbins(ebin)
    f._setOutfile(Name + '_expcube' + str(lc_bin_num) + '_lc.fits')
    f._setIrfs('P8R2_SOURCE_V6')
    f.amonExpcube2()

    print "Finnished making exposure map.\n"

    print "Now begining computation of model counts map with gtsrcmaps.\n"

    f._setScfile(SCFile)
    f._setExpcube(Name + '_ltcube' + str(lc_bin_num) + '_lc.fits')
    f._setCmap(Name + '_ccube' + str(lc_bin_num) + '_lc.fits')
    f._setBexpmap(Name + '_expcube' + str(lc_bin_num) + '_lc.fits')
    f._setOutfile(Name + '_srcmaps' + str(lc_bin_num) + '_lc.fits')
    f._setIrfs('CALDB')
    f._setSrcmdl(Name + '_output_model.xml')
    f.amonSrcmaps()

    print "Finished srcmaps. Now moving to conduct a BINNED likelihood analysis."

    obs = BinnedObs(binnedExpMap=Name + '_expcube' + str(lc_bin_num) +
                    '_lc.fits',
                    expCube=Name + '_ltcube' + str(lc_bin_num) + '_lc.fits',
                    srcMaps=Name + '_srcmaps' + str(lc_bin_num) + '_lc.fits',
                    irfs='P8R2_SOURCE_V6')
    analysis = BinnedAnalysis(obs,
                              srcModel=Name + '_output_model.xml',
                              optimizer='NEWMINUIT')
    likeObj = pyLike.NewMinuit(analysis.logLike)
    analysis.tol = NMtol
    analysis.fit(verbosity=0, covar=True, optObject=likeObj)
    fit = likeObj.getRetCode()
    print "Likelihood has converged whith Code " + str(likeObj.getRetCode())
    Flux = analysis.flux(Name, emin=minEnergy, emax=maxEnergy)
    Ferr = analysis.fluxError(Name, emin=minEnergy, emax=maxEnergy)
    MeVtoErg = 1.602e-6
    ef = analysis.energyFlux(Name, minEnergy, maxEnergy) * MeVtoErg
    ef_err = analysis.energyFluxError(Name, minEnergy, maxEnergy) * MeVtoErg
    UL = False
    TSUM = TSTART + TSTOP
    TMID = TSUM / 2
    limit = Flux
    if analysis.Ts(Name) < TSul:
        UL = True
        limit, results = IUL.calc_int(analysis,
                                      Name,
                                      cl=0.90,
                                      emin=minEnergy,
                                      emax=maxEnergy)

    #Run gtselect to make smaller data fits file to compute the exposure, set to 3 degrees around source of interest
    f._setRad(3)
    f._setInfile(Evfile)
    f._setOutfile(Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits')
    print "Creating file " + Name + "_gtselect_exposure.fits"
    f.amonSelect()

    #Run gtmaketime on this small region
    f._setEvfile(Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits')
    f._setOutfile(Name + '_gtmktime' + str(lc_bin_num) + '_exposure.fits')
    print('Working on file ' + str(f.getOutfile()))
    f.amonTime()

    my_apps.evtbin['algorithm'] = 'LC'
    my_apps.evtbin['evfile'] = f.getOutfile()
    my_apps.evtbin['outfile'] = Name + '_LC' + str(
        lc_bin_num) + '_exposure.fits'
    my_apps.evtbin['scfile'] = f.getScfile()
    my_apps.evtbin['tbinalg'] = 'LIN'
    my_apps.evtbin['tstart'] = f.getTmin()
    my_apps.evtbin['tstop'] = f.getTmax()
    my_apps.evtbin['dtime'] = TSTOP - TSTART
    my_apps.evtbin.run()

    yes = subprocess.call([
        'gtexposure', Name + '_LC' + str(lc_bin_num) + '_exposure.fits',
        f.getScfile(), 'P8R2_SOURCE_V6', Name + '_output_model.xml', Name
    ])
    if yes == 0:
        print "Exposure map has been created"
    else:
        print "Subprocessing failed. Unable to create exposure map with gtexposure."

    print "Time bin complete."

    hdulist = pyfits.open(Name + '_LC' + str(lc_bin_num) + '_exposure.fits')
    tbdata = hdulist[1].data
    z = tbdata['EXPOSURE']
    exp = z[0]

    ################################################################
    #           This portion prints to the text file               #
    ################################################################

    f = open("lc_output.txt", "a")
    f.write(
        str(Flux) + ',' + str(Ferr) + ',' + str(ef) + ',' + str(ef_err) + ',' +
        str(limit) + ',' + str(analysis.Ts(Name)) + ',' + str(UL) + ',' +
        str(TMID) + ',' + str(exp) + '\n')
    f.close()
    print "Likelihood analysis on this band is complete."
    if runMRM == True:
        runMakeResMap(Name, RA, DEC, radius, binsz, minEnergy, maxEnergy,
                      lc_bin_num)
    else:
        pass

    yes = subprocess.call([
        'rm', Name + '_gtselect' + str(lc_bin_num) + '_lc.fits',
        Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits',
        Name + '_cmap' + str(lc_bin_num) + '_lc.fits',
        Name + '_ccube' + str(lc_bin_num) + '_lc.fits',
        Name + '_ltcube' + str(lc_bin_num) + '_lc.fits',
        Name + '_expcube' + str(lc_bin_num) + '_lc.fits',
        Name + '_LC' + str(lc_bin_num) + '_exposure.fits',
        Name + '_srcmaps' + str(lc_bin_num) + '_lc.fits',
        Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits',
        Name + '_gtmktime' + str(lc_bin_num) + '_exposure.fits'
    ])
    if yes == 0:
        print 'Files for bin have been deleted'
    else:
        print "Subprocessing failed. Unable to delete files for bin."
Example #13
0
def runFermiTools(Name, RA, DEC, minEnergy, maxEnergy, SCFile, radius, binsz,
                  TSTART, TSTOP, Evfile, bins, zmax, evclass, evtype, TSul,
                  NMtol, lc_bin_num, runMRM):

    print "Working on bin " + str(lc_bin_num) + " for the light curve."

    f = FermiObject()
    """

        Following steps execute Fermi Tool gtselect

    """

    print('\nWorking on file.')
    print('Cutting file to fit desired parameters . . .\n')
    f._setEvclass(evclass)
    f._setEvtype(evtype)
    f._setRa(RA)
    f._setDec(DEC)
    f._setRad(radius)
    f._setEmin(minEnergy)
    f._setEmax(maxEnergy)
    f._setZmax(zmax)
    f._setTmin(TSTART)
    f._setTmax(TSTOP)
    f._setInfile(Evfile)
    f._setOutfile(Name + '_gtselect' + str(lc_bin_num) + '_lc.fits')
    f.amonSelect()
    print(
        'File cuts have been made. Now making cuts for GTI using spacecraft file.'
    )
    """

        Following steps execute Fermi Tool gtmktime

    """

    f._setScfile(SCFile)
    f._setRoicut('no')
    f._setEvfile(Name + '_gtselect' + str(lc_bin_num) + '_lc.fits')
    f._setOutfile(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits')
    ###############################################
    #         Filter expression                   #
    Filter = '(DATA_QUAL>0)&&(LAT_CONFIG==1)'
    ###############################################
    f._setFilter(Filter)
    print('Working on file ' + str(f.getOutfile()) + '. . .')
    f.amonTime()
    print('File cuts have been made.')
    print('Using XML model from whole dataset.\n Moving on to gtltcube.')

    print "Now working on ltcube file using gtltcube\n"
    my_apps.expCube['evfile'] = Name + '_gtmktime' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.expCube['scfile'] = SCFile
    my_apps.expCube['outfile'] = Name + '_ltcube' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.expCube['dcostheta'] = 0.025
    my_apps.expCube['binsz'] = 1
    my_apps.expCube['phibins'] = 0
    my_apps.expCube['zmax'] = zmax
    my_apps.expCube['chatter'] = 0
    my_apps.expCube.run()

    print "\nltcube complete.\nMoving to compute exposure map with gtexpmap.\n"
    my_apps.expMap['evfile'] = Name + '_gtmktime' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.expMap['scfile'] = SCFile
    my_apps.expMap['expcube'] = Name + '_ltcube' + str(lc_bin_num) + '_lc.fits'
    my_apps.expMap['outfile'] = Name + '_expMap' + str(lc_bin_num) + '_lc.fits'
    my_apps.expMap['irfs'] = 'CALDB'
    my_apps.expMap['srcrad'] = radius + 10
    my_apps.expMap['nlong'] = 4 * (radius + 10)
    my_apps.expMap['nlat'] = 4 * (radius + 10)
    ebin = int(10 * log10(maxEnergy / minEnergy))
    print "There are " + str(ebin) + " energy bans."
    my_apps.expMap['nenergies'] = ebin
    my_apps.expMap.run()
    print "Finnished making exposure map.\n"

    print "Calcualting the diffuse response for photons in this bin."
    my_apps.diffResps['evfile'] = Name + '_gtmktime' + str(
        lc_bin_num) + '_lc.fits'
    my_apps.diffResps['scfile'] = SCFile
    my_apps.diffResps['srcmdl'] = Name + '_output_model.xml'
    my_apps.diffResps['irfs'] = 'CALDB'
    my_apps.diffResps.run()

    print "Finished calculating diffuse response. Now moving to conduct a UNBINNED likelihood analysis."

    obs = UnbinnedObs(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits',
                      SCFile,
                      expMap=Name + '_expMap' + str(lc_bin_num) + '_lc.fits',
                      expCube=Name + '_ltcube' + str(lc_bin_num) + '_lc.fits',
                      irfs='P8R2_SOURCE_V6')
    analysis = UnbinnedAnalysis(obs,
                                Name + '_output_model.xml',
                                optimizer='NewMinuit')
    likeObj = pyLike.NewMinuit(analysis.logLike)
    analysis.tol = NMtol
    LIKE = analysis.fit(verbosity=0, covar=True, optObject=likeObj)
    fit = likeObj.getRetCode()
    print "Likelihood has converged whith Code " + str(likeObj.getRetCode())
    Flux = analysis.flux(Name, emin=minEnergy, emax=maxEnergy)
    Ferr = analysis.fluxError(Name, emin=minEnergy, emax=maxEnergy)
    MeVtoErg = 1.602e-6
    ef = analysis.energyFlux(Name, minEnergy, maxEnergy) * MeVtoErg
    ef_err = analysis.energyFluxError(Name, minEnergy, maxEnergy) * MeVtoErg
    UL = False
    TSUM = TSTART + TSTOP
    TMID = TSUM / 2
    limit = Flux
    if analysis.Ts(Name) < TSul:
        UL = True
        limit, results = IUL.calc_int(analysis,
                                      Name,
                                      cl=0.90,
                                      emin=minEnergy,
                                      emax=maxEnergy)

    #Do second likelihood with constant flux to calculate the TS variability
    obsC = UnbinnedObs(Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits',
                       SCFile,
                       expMap=Name + '_expMap' + str(lc_bin_num) + '_lc.fits',
                       expCube=Name + '_ltcube' + str(lc_bin_num) + '_lc.fits',
                       irfs='P8R2_SOURCE_V6')
    analysisC = UnbinnedAnalysis(obsC,
                                 Name + '_var_model.xml',
                                 optimizer='NewMinuit')
    likeObjC = pyLike.NewMinuit(analysisC.logLike)
    analysisC.tol = NMtol
    LIKEC = analysisC.fit(verbosity=0, covar=True, optObject=likeObjC)

    #Run gtselect to make smaller data fits file to compute the exposure, set to 3 degrees around source of interest
    f._setRad(3)
    f._setInfile(Evfile)
    f._setOutfile(Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits')
    print "Creating file " + Name + "_gtselect_exposure.fits"
    f.amonSelect()

    #Run gtmaketime on this small region
    f._setEvfile(Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits')
    f._setOutfile(Name + '_gtmktime' + str(lc_bin_num) + '_exposure.fits')
    print('Working on file ' + str(f.getOutfile()))
    f.amonTime()

    my_apps.evtbin['algorithm'] = 'LC'
    my_apps.evtbin['evfile'] = f.getOutfile()
    my_apps.evtbin['outfile'] = Name + '_LC' + str(
        lc_bin_num) + '_exposure.fits'
    my_apps.evtbin['scfile'] = f.getScfile()
    my_apps.evtbin['tbinalg'] = 'LIN'
    my_apps.evtbin['tstart'] = f.getTmin()
    my_apps.evtbin['tstop'] = f.getTmax()
    my_apps.evtbin['dtime'] = TSTOP - TSTART
    my_apps.evtbin.run()

    yes = subprocess.call([
        'gtexposure', Name + '_LC' + str(lc_bin_num) + '_exposure.fits',
        f.getScfile(), 'P8R2_SOURCE_V6', Name + '_output_model.xml', Name
    ])
    if yes == 0:
        print "Exposure map has been created"
    else:
        print "Subprocessing failed. Unable to create exposure map with gtexposure."

    print "Time bin complete."

    hdulist = pyfits.open(Name + '_LC' + str(lc_bin_num) + '_exposure.fits')
    tbdata = hdulist[1].data
    z = tbdata['EXPOSURE']
    exp = z[0]

    ################################################################
    #           This portion prints to the text file               #
    ################################################################

    f = open("lc_output.txt", "a")
    f.write(
        str(Flux) + ',' + str(Ferr) + ',' + str(ef) + ',' + str(ef_err) + ',' +
        str(limit) + ',' + str(analysis.Ts(Name)) + ',' + str(UL) + ',' +
        str(TMID) + ',' + str(exp) + ',' + str(LIKE) + ',' + str(LIKEC) + '\n')
    f.close()
    print "Likelihood analysis on this band is complete."

    yes = subprocess.call([
        'rm', Name + '_gtselect' + str(lc_bin_num) + '_lc.fits',
        Name + '_gtmktime' + str(lc_bin_num) + '_lc.fits',
        Name + '_cmap' + str(lc_bin_num) + '_lc.fits',
        Name + '_ccube' + str(lc_bin_num) + '_lc.fits',
        Name + '_ltcube' + str(lc_bin_num) + '_lc.fits',
        Name + '_expMap' + str(lc_bin_num) + '_lc.fits',
        Name + '_LC' + str(lc_bin_num) + '_exposure.fits',
        Name + '_srcmaps' + str(lc_bin_num) + '_lc.fits',
        Name + '_gtselect' + str(lc_bin_num) + '_exposure.fits',
        Name + '_gtmktime' + str(lc_bin_num) + '_exposure.fits'
    ])
    if yes == 0:
        print 'Files for bin have been deleted'
    else:
        print "Subprocessing failed. Unable to delete files for bin."
Example #14
0
    def _compute(self):
        if self.verbosity: print 'Calculating gtlike upper limit'

        like = self.like
        name = self.name

        saved_state = SuperState(like)
        source = like.logLike.getSource(name)

        try:
            import IntegralUpperLimit

            # First, freeze spectrum (except for normalization)
            # of our soruce
            gtlike_allow_fit_only_prefactor(like, name)

            # Spectral fit whole ROI

            if self.verbosity:
                print 'Before fitting before Upper Limit:'
                print summary(like)

            """ N.B. spectral fit in this function instead
                of in upper limits code since my
                paranoid_gtlike_fit function is more robust. """
            paranoid_gtlike_fit(like, verbosity=self.verbosity)

            if self.verbosity:
                print 'After fitting before Upper Limit:'
                print summary(like)

            # Freeze everything but our source of interest
            for i in range(len(like.model.params)):
                like.model[i].setFree(False)
                like.syncSrcParams(like[i].srcName)

            if self.rescale_parameters_before_limit:
                source=like.logLike.getSource(name)
                spectrum=source.spectrum()
                model = build_pointlike_model(spectrum)
                if self.verbosity:
                    print 'Rescaling %s model parameters before limits' % model.name
                    print ' * Initial mappers:',model.mappers
                norm_name = model.param_names[0]
                default_norm_limits = model.default_limits[norm_name]
                lower,upper = model.get_limits(norm_name)
                norm_min = min(lower, default_norm_limits.lower)
                norm_max = max(upper, default_norm_limits.upper)

                model.set_limits(norm_name, norm_min, norm_max, scale=1)
                if self.verbosity:
                    print ' * New mappers:',model.mappers

                spectrum = build_gtlike_spectrum(model)
                like.setSpectrum(name,spectrum)
                like.syncSrcParams(name)

            # Note, I think freeze_all is redundant, but flag it just 
            # to be paranoid
            flux_ul, results = IntegralUpperLimit.calc_int(like, name, 
                                                           freeze_all=True,
                                                           skip_global_opt=True,
                                                           cl=self.cl,
                                                           emin=self.emin, 
                                                           emax=self.emax, 
                                                           verbosity=self.verbosity,
                                                           **self.upper_limit_kwargs)

            if self.verbosity:
                print 'After computing Upper limit:'
                print summary(like)

            source=like.logLike.getSource(name)
            spectrum=source.spectrum()
            prefactor=spectrum.normPar()
            pref_ul = results['ul_value']*prefactor.getScale()
            prefactor.setTrueValue(pref_ul)

            self.results = flux_dict(like, name, 
                                     emin=self.emin,emax=self.emax,
                                     flux_units=self.flux_units, 
                                     energy_units=self.energy_units,
                                     errors=False,
                                     include_prefactor=self.include_prefactor,
                                     prefactor_energy=self.prefactor_energy)

            self.results['spectrum'] = spectrum_to_dict(spectrum)
            self.results['confidence'] = self.cl

            if self.xml_name is not None:
                # refree parameters before saving the XML
                saved_state.restore_free()

                # build pointlike model from object
                source=like.logLike.getSource(name)
                spectrum=source.spectrum()
                model = build_pointlike_model(spectrum)

                # set default oomp limits
                model.set_default_limits(oomp_limits=True)

                # place back into xml
                spectrum = build_gtlike_spectrum(model)
                like.setSpectrum(name,spectrum)
                like.syncSrcParams(name)

                like.writeXml(self.xml_name)


        except Exception, ex:
            print 'ERROR gtlike upper limit: ', ex
            traceback.print_exc(file=sys.stdout)
            self.results = None
Example #15
0
def run(clean=False):
    gtselect['tmin'] = 0. + start_time
    gtselect['tmax'] = 86400/2 + start_time
    gtselect['infile'] = 'test_events.fits'
    gtselect['outfile'] = 'filtered1.fits'
    gtselect['ra'] = 90
    gtselect['dec'] = 20
    gtselect['rad'] = 20
    gtselect.run()

    gtvcut.run(infile=gtselect['outfile'], table='EVENTS')
    
    gtltcube['evfile'] = 'filtered1.fits'
    gtltcube['scfile'] = 'orbSim_scData_0000.fits'
    gtltcube['outfile'] = 'expcube1.fits'
    gtltcube['dcostheta'] = 0.05
    gtltcube['binsz'] = 1
    gtltcube['phibins'] = 0
    gtltcube['chatter'] = 4
    gtltcube.run()

    gtselect['tmin'] = 86400/2 + start_time
    gtselect['tmax'] = 86400 + start_time
    gtselect['outfile'] = 'filtered2.fits'
    gtselect.run()

    gtvcut.run(infile=gtselect['outfile'], table='EVENTS')
   
    gtltcube['evfile'] = 'filtered2.fits'
    gtltcube['scfile'] = 'orbSim_scData_0000.fits'
    gtltcube['outfile'] = 'expcube2.fits'
    gtltcube['dcostheta'] = 0.05
    gtltcube['binsz'] = 1
    gtltcube['chatter'] = 4
    gtltcube.run()

    gtltsum['infile1'] = 'expcube1.fits'
    gtltsum['infile2'] = 'expcube2.fits'
    gtltsum['outfile'] = 'expcube_1_day.fits'
    gtltsum['chatter'] = 4
    gtltsum.run()
   
    gtexpmap.copy(gtltcube)
    gtexpmap['evfile'] = 'filtered_events_0000.fits'
    gtexpmap['irfs'] = irfs
    gtexpmap['srcrad'] = 30
    gtexpmap['nlong'] = 120
    gtexpmap['nlat'] = 120
    gtexpmap['nenergies'] = 20
    gtexpmap['expcube'] = 'expcube_1_day.fits'
    gtexpmap['outfile'] = 'expMap.fits'
    gtexpmap.run(chatter=4)

    gtvcut.run(infile=gtexpmap['outfile'], table='PRIMARY')

    gtdiffrsp.copy(gtexpmap)
    gtdiffrsp['srcmdl'] = srcmdl
    gtdiffrsp['evfile'] = 'filtered_events_0000.fits'
    gtdiffrsp['evtype'] = 'INDEF'
    gtdiffrsp.run(chatter=4)

    gtvcut.run(infile=gtdiffrsp['evfile'], table='EVENTS')

    gtlike.copy(gtltcube)
    gtlike.copy(gtdiffrsp)
    gtlike['expmap'] = gtexpmap['outfile']
    gtlike['expcube'] = gtltsum['outfile']
    gtlike['statistic'] = 'UNBINNED'
    gtlike['optimizer'] = 'MINUIT'
    if sys.platform == 'darwin':
        gtlike['optimizer'] = 'NEWMINUIT'
    gtlike['ftol'] = 1e-4
    gtlike['refit'] = 'no'
    gtlike.run(chatter=3)

    like = unbinnedAnalysis(mode='h', optimizer='NEWMINUIT')
    like.fit(verbosity=0, tol=gtlike['ftol'])
    print like.model
    print "Ts values:"
    for src in like.sourceNames():
        print src, like.Ts(src)

    print "Exercise UpperLimits.py"
    ul = UpperLimits(like)
#    for src in like.sourceNames():
    for src in like.sourceNames()[:1]:
        if like[src].src.getType() == 'Point':
            flux_ul = ul[src].compute(emin=100, emax=3e5)[0]
            print src, flux_ul

    print "Exercise IntegralUpperLimit.py"
#    for src in like.sourceNames():
    for src in like.sourceNames()[:1]:
        if like[src].src.getType() == 'Point':
            flux_ul = IntegralUpperLimit.calc_int(like, src)
            print src, flux_ul[0]

#            bayes_ul = ul[src].bayesianUL(emin=100, emax=3e5)[0]
#            print src, bayes_ul
    
#    TsMap['srcmdl'] = 'Ts_srcModel.xml'
#    TsMap.run()
    if clean:
        cleanUp()