def gtlike_analysis(pipeline, roi, name, hypothesis, upper_limit): print 'Performing Gtlike crosscheck for %s' % hypothesis gtlike = Gtlike(roi, savedir='savedir' if pipeline.cachedata else None) like = gtlike.like print 'About to fit gtlike ROI' print summary(like, maxdist=10) paranoid_gtlike_fit(like, verbosity=4) print 'Done fiting gtlike ROI' print summary(like, maxdist=10) like.writeXml("%s/srcmodel_gtlike_%s_%s.xml" % (pipeline.dirdict['data'], hypothesis, name)) r = source_dict(like, name) upper_limit_kwargs = dict() if upper_limit: pul = GtlikePowerLawUpperLimit(like, name, cl=.95, verbosity=4) r['powerlaw_upper_limit'] = pul.todict() def sed(kind, **kwargs): print 'Making %s SED' % kind s = GtlikeSED(like, name, always_upper_limit=True, verbosity=4, upper_limit_kwargs=upper_limit_kwargs, **kwargs) s.plot('%s/sed_gtlike_%s_%s.png' % (pipeline.dirdict['seds'], kind, name)) s.save('%s/sed_gtlike_%s_%s.yaml' % (pipeline.dirdict['seds'], kind, name)) sed('1bpd_%s' % hypothesis, bin_edges=[10**2, 10**3, 10**4, 10**5.5]) sed('2bpd_%s' % hypothesis, bin_edges=np.logspace(2, 5.5, 8)) if not pipeline.fast: sed('4bpd_%s' % hypothesis, bin_edges=np.logspace(2, 5.5, 15)) return r
def gtlike_analysis(pipeline, roi, name, hypothesis, upper_limit): print "Performing Gtlike crosscheck for %s" % hypothesis gtlike = Gtlike(roi, savedir="savedir" if pipeline.cachedata else None) like = gtlike.like print "About to fit gtlike ROI" print summary(like, maxdist=10) paranoid_gtlike_fit(like, verbosity=4) print "Done fiting gtlike ROI" print summary(like, maxdist=10) like.writeXml("%s/srcmodel_gtlike_%s_%s.xml" % (pipeline.dirdict["data"], hypothesis, name)) r = source_dict(like, name) upper_limit_kwargs = dict() if upper_limit: pul = GtlikePowerLawUpperLimit(like, name, cl=0.95, verbosity=4) r["powerlaw_upper_limit"] = pul.todict() def sed(kind, **kwargs): print "Making %s SED" % kind s = GtlikeSED(like, name, always_upper_limit=True, verbosity=4, upper_limit_kwargs=upper_limit_kwargs, **kwargs) s.plot("%s/sed_gtlike_%s_%s.png" % (pipeline.dirdict["seds"], kind, name)) s.save("%s/sed_gtlike_%s_%s.yaml" % (pipeline.dirdict["seds"], kind, name)) sed("1bpd_%s" % hypothesis, bin_edges=[10 ** 2, 10 ** 3, 10 ** 4, 10 ** 5.5]) sed("2bpd_%s" % hypothesis, bin_edges=np.logspace(2, 5.5, 8)) if not pipeline.fast: sed("4bpd_%s" % hypothesis, bin_edges=np.logspace(2, 5.5, 15)) return r
def gtlike_analysis(roi, name, hypothesis, max_free, seddir, datadir, plotdir, upper_limit=False, cutoff=False, cutoff_model=None, do_bandfitter=False, do_sed=False, ): print 'Performing Gtlike crosscheck for %s' % hypothesis frozen = freeze_far_away(roi, roi.get_source(name).skydir, max_free) gtlike=Gtlike(roi, extended_dir_name=datadir) unfreeze_far_away(roi, frozen) global like like=gtlike.like like.tol = 1e-1 # I found that the default tol '1e-3' would get the fitter stuck in infinite loops import pyLikelihood as pyLike like.setFitTolType(pyLike.ABSOLUTE) emin, emax = get_full_energy_range(like) print 'About to fit gtlike ROI' print summary(like, maxdist=10) paranoid_gtlike_fit(like, verbosity=4) print 'Done fiting gtlike ROI' print summary(like, maxdist=10) spectrum_name = like.logLike.getSource(name).spectrum().genericName() like.writeXml("%s/srcmodel_gtlike_%s_%s_%s.xml"%(datadir, hypothesis, spectrum_name, name)) r=source_dict(like, name) #upper_limit_kwargs=dict(delta_log_like_limits=10) upper_limit_kwargs=dict() if upper_limit: pul = GtlikePowerLawUpperLimit(like, name, emin=emin, emax=emax, cl=.95, upper_limit_kwargs=upper_limit_kwargs, verbosity=4, xml_name=join("%s/srcmodel_gtlike_%s_%s_%s.xml" % (datadir, hypothesis, 'PowerLaw_Upper_Limit', name))) r['powerlaw_upper_limit'] = pul.todict() cul = GtlikeCutoffUpperLimit(like, name, Index=1.7, Cutoff=3e3, b=1, cl=.95, upper_limit_kwargs=upper_limit_kwargs, verbosity=4, xml_name=join("%s/srcmodel_gtlike_%s_%s_%s.xml" % (datadir, hypothesis, 'PLSuperExpCutoff_Upper_Limit', name))) r['cutoff_upper_limit'] = cul.todict() if do_bandfitter: if all_energy(emin,emax): try: bf = GtlikeBandFitter(like, name, bin_edges=one_bin_per_dec(emin,emax), upper_limit_kwargs=upper_limit_kwargs, verbosity=4) bf.plot('%s/bandfits_gtlike_%s_%s.png' % (plotdir,hypothesis,name)) r['bandfits'] = bf.todict() except Exception, ex: print 'ERROR computing bandfit:', ex traceback.print_exc(file=sys.stdout)
def _calculate(self,*args,**kwargs): """ Convert all units into sympy arrays after the initial calculation. """ like = self.like name = self.name init_energes = like.energies[[0,-1]] # Freeze all sources except one to make sed of. all_sources = like.sourceNames() if name not in all_sources: raise Exception("Cannot find source %s in list of sources" % name) # make copy of parameter values + free parameters saved_state = SuperState(like) if self.verbosity: print 'Freezing background sources' for other_name in get_background(like): if self.freeze_bg_diffuse: if self.verbosity: print ' * Freezing diffuse source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for diffuse source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) for other_name in get_sources(like): if self.freeze_bg_sources: if self.verbosity: print ' * Freezing bg source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for bg source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) self.raw_results = [] for i,(lower,upper) in enumerate(zip(self.lower,self.upper)): like.setEnergyRange(float(lower)+1, float(upper)-1) e = np.sqrt(lower*upper) if self.verbosity: print 'Calculating SED from %.0dMeV to %.0dMeV' % (lower,upper) """ Note, the most robust method I have found for computing SEDs in gtlike is: (a) Create a generic spectral model with a fixed spectral index. (b) Set the 'Scale' to sqrt(emin*emax) so the prefactor is dNdE in the middle of the sed bin. (b) Set the limits to go from norm/fit_range to norm*fit_range and set the scale to 'norm' """ old_flux = self.init_model.i_flux(emin=lower,emax=upper) model = PowerLaw(index=self.powerlaw_index, e0=e) model.set_flux(old_flux, emin=lower, emax=upper) norm = model['norm'] model.set_limits('norm',norm/float(self.fit_range),norm*self.fit_range, scale=norm) model.set_limits('index',-5,5) model.freeze('index') spectrum = build_gtlike_spectrum(model) like.setSpectrum(name,spectrum) like.syncSrcParams(name) if self.verbosity: print 'Before fitting SED from %.0dMeV to %.0dMeV' % (lower,upper) print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'After fitting SED from %.0dMeV to %.0dMeV' % (lower,upper) print summary(like) d = dict() self.raw_results.append(d) d['energy'] = energy_dict(emin=lower, emax=upper, energy_units=self.energy_units) d['flux'] = flux_dict(like, name, emin=lower,emax=upper, flux_units=self.flux_units, errors=True, include_prefactor=True, prefactor_energy=e) d['prefactor'] = powerlaw_prefactor_dict(like, name, errors=self.save_hesse_errors, minos_errors=True, flux_units=self.flux_units) d['TS'] = ts_dict(like, name, verbosity=self.verbosity) if self.verbosity: print 'Calculating SED upper limit from %.0dMeV to %.0dMeV' % (lower,upper) if self.always_upper_limit or d['TS']['reoptimize'] < self.min_ts: ul = GtlikePowerLawUpperLimit(like, name, cl=self.ul_confidence, emin=lower,emax=upper, flux_units=self.flux_units, energy_units=self.energy_units, upper_limit_kwargs=self.upper_limit_kwargs, include_prefactor=True, prefactor_energy=e, verbosity=self.verbosity, ) d['upper_limit'] = ul.todict() # revert to old model like.setEnergyRange(*init_energes) saved_state.restore() self._condense_results()
def _calculate(self, *args, **kwargs): """ Convert all units into sympy arrays after the initial calculation. """ like = self.like name = self.name init_energes = like.energies[[0, -1]] # Freeze all sources except one to make sed of. all_sources = like.sourceNames() if name not in all_sources: raise Exception("Cannot find source %s in list of sources" % name) # make copy of parameter values + free parameters saved_state = SuperState(like) if self.verbosity: print 'Freezing background sources' for other_name in get_background(like): if self.freeze_bg_diffuse: if self.verbosity: print ' * Freezing diffuse source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for diffuse source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) for other_name in get_sources(like): if self.freeze_bg_sources: if self.verbosity: print ' * Freezing bg source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for bg source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) self.raw_results = [] for i, (lower, upper) in enumerate(zip(self.lower, self.upper)): like.setEnergyRange(float(lower) + 1, float(upper) - 1) e = np.sqrt(lower * upper) if self.verbosity: print 'Calculating SED from %.0dMeV to %.0dMeV' % (lower, upper) """ Note, the most robust method I have found for computing SEDs in gtlike is: (a) Create a generic spectral model with a fixed spectral index. (b) Set the 'Scale' to sqrt(emin*emax) so the prefactor is dNdE in the middle of the sed bin. (b) Set the limits to go from norm/fit_range to norm*fit_range and set the scale to 'norm' """ old_flux = self.init_model.i_flux(emin=lower, emax=upper) model = PowerLaw(index=self.powerlaw_index, e0=e) model.set_flux(old_flux, emin=lower, emax=upper) norm = model['norm'] model.set_limits('norm', norm / float(self.fit_range), norm * self.fit_range, scale=norm) model.set_limits('index', -5, 5) model.freeze('index') spectrum = build_gtlike_spectrum(model) like.setSpectrum(name, spectrum) like.syncSrcParams(name) if self.verbosity: print 'Before fitting SED from %.0dMeV to %.0dMeV' % (lower, upper) print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'After fitting SED from %.0dMeV to %.0dMeV' % (lower, upper) print summary(like) d = dict() self.raw_results.append(d) d['energy'] = energy_dict(emin=lower, emax=upper, energy_units=self.energy_units) d['flux'] = flux_dict(like, name, emin=lower, emax=upper, flux_units=self.flux_units, errors=True, include_prefactor=True, prefactor_energy=e) d['prefactor'] = powerlaw_prefactor_dict( like, name, errors=self.save_hesse_errors, minos_errors=True, flux_units=self.flux_units) d['TS'] = ts_dict(like, name, verbosity=self.verbosity) if self.verbosity: print 'Calculating SED upper limit from %.0dMeV to %.0dMeV' % ( lower, upper) if self.always_upper_limit or d['TS']['reoptimize'] < self.min_ts: ul = GtlikePowerLawUpperLimit( like, name, cl=self.ul_confidence, emin=lower, emax=upper, flux_units=self.flux_units, energy_units=self.energy_units, upper_limit_kwargs=self.upper_limit_kwargs, include_prefactor=True, prefactor_energy=e, verbosity=self.verbosity, ) d['upper_limit'] = ul.todict() # revert to old model like.setEnergyRange(*init_energes) saved_state.restore() self._condense_results()