def altdiff_followup(self, hypothesis, dist, halo, TS): name = self.name print dist, halo, TS # Note, Gulli's alt diffuse models only go to 100GeV! roi = self.reload_roi(hypothesis, fit_emax=1e5) roi.print_summary() print roi.get_source(name) for source in get_background(roi): roi.del_source(source) print 'Loading alternate diffuse models' diff = get_gulli_diffuse(dist=dist, halo=halo, TS=TS, version=2, event_class='source', verbosity=True) print 'Adding alternate diffuse models to ROI' for source in diff: roi.add_source(source) print 'Freezing insignificant diffuse models' freeze_insignificant_diffuse(roi,allowed_fraction=0.03, verbosity=True) print 'Printing ROI' roi.print_summary() print 'Doing gtlike analysis' results=gtlike_analysis(roi, name=name, max_free = self.max_free, seddir=self.seddir, datadir=self.datadir, plotdir=self.plotdir, hypothesis='%s_altdiff_dist_%s_halo_%s_TS_%s' % (hypothesis,dist,halo,TS), upper_limit=False, do_bandfitter=False, do_sed=True, ) savedict(results,'results_%s_altdiff_dist_%s_halo_%s_TS_%s_%s.yaml' % (name,dist,halo,TS,hypothesis))
def get_most_significant_diffuse(roi, allowed_fraction,max_n_models=1, verbosity=False): from lande.fermi.likelihood.save import get_background significant_list = [] for name in get_background(roi): oc=observed_counts(roi) mc=model_counts(roi,name) fraction=float(mc)/oc if verbosity: print ' .. Source %s predicts %0.2f%% of total counts' % (name,fraction) if fraction > allowed_fraction: significant_list.append((name,)+(fraction,)) if verbosity: print 'keeping it.' else: if verbosity: print 'discarding it.' sorted_list=sorted(significant_list, key=lambda frac: frac[1], reverse= True) cut_list=[] for i in sorted_list[:max_n_models]: cut_list.append(i[0]) if verbosity: print significant_list print sorted_list print cut_list return cut_list
def get_insignificant_diffuse(roi, allowed_fraction, verbosity=False): insignificant_list = [] for source in get_background(roi): if is_significant(roi, source, allowed_fraction, verbosity): if verbosity: print '... keep source.' else: if verbosity: print "... Don't keep source." insignificant_list.append(source) return insignificant_list
def altdiff_followup(self, hypothesis, dist, halo, TS): name = self.name print dist, halo, TS # Note, Gulli's alt diffuse models only go to 100GeV! roi = self.reload_roi(hypothesis, fit_emax=1e5) roi.print_summary() print roi.get_source(name) for source in get_background(roi): roi.del_source(source) print 'Loading alternate diffuse models' diff = get_gulli_diffuse(dist=dist, halo=halo, TS=TS, version=2, event_class='source', verbosity=True) print 'Adding alternate diffuse models to ROI' for source in diff: roi.add_source(source) print 'Freezing insignificant diffuse models' freeze_insignificant_diffuse(roi, allowed_fraction=0.03, verbosity=True) print 'Printing ROI' roi.print_summary() print 'Doing gtlike analysis' results = gtlike_analysis( roi, name=name, max_free=self.max_free, seddir=self.seddir, datadir=self.datadir, plotdir=self.plotdir, hypothesis='%s_altdiff_dist_%s_halo_%s_TS_%s' % (hypothesis, dist, halo, TS), upper_limit=False, do_bandfitter=False, do_sed=True, ) savedict( results, 'results_%s_altdiff_dist_%s_halo_%s_TS_%s_%s.yaml' % (name, dist, halo, TS, hypothesis))
def _calculate(self,*args,**kwargs): """ Convert all units into sympy arrays after the initial calculation. """ like = self.like name = self.name init_energes = like.energies[[0,-1]] # Freeze all sources except one to make sed of. all_sources = like.sourceNames() if name not in all_sources: raise Exception("Cannot find source %s in list of sources" % name) # make copy of parameter values + free parameters saved_state = SuperState(like) if self.verbosity: print 'Freezing background sources' for other_name in get_background(like): if self.freeze_bg_diffuse: if self.verbosity: print ' * Freezing diffuse source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for diffuse source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) for other_name in get_sources(like): if self.freeze_bg_sources: if self.verbosity: print ' * Freezing bg source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for bg source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) self.raw_results = [] for i,(lower,upper) in enumerate(zip(self.lower,self.upper)): like.setEnergyRange(float(lower)+1, float(upper)-1) e = np.sqrt(lower*upper) if self.verbosity: print 'Calculating SED from %.0dMeV to %.0dMeV' % (lower,upper) """ Note, the most robust method I have found for computing SEDs in gtlike is: (a) Create a generic spectral model with a fixed spectral index. (b) Set the 'Scale' to sqrt(emin*emax) so the prefactor is dNdE in the middle of the sed bin. (b) Set the limits to go from norm/fit_range to norm*fit_range and set the scale to 'norm' """ old_flux = self.init_model.i_flux(emin=lower,emax=upper) model = PowerLaw(index=self.powerlaw_index, e0=e) model.set_flux(old_flux, emin=lower, emax=upper) norm = model['norm'] model.set_limits('norm',norm/float(self.fit_range),norm*self.fit_range, scale=norm) model.set_limits('index',-5,5) model.freeze('index') spectrum = build_gtlike_spectrum(model) like.setSpectrum(name,spectrum) like.syncSrcParams(name) if self.verbosity: print 'Before fitting SED from %.0dMeV to %.0dMeV' % (lower,upper) print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'After fitting SED from %.0dMeV to %.0dMeV' % (lower,upper) print summary(like) d = dict() self.raw_results.append(d) d['energy'] = energy_dict(emin=lower, emax=upper, energy_units=self.energy_units) d['flux'] = flux_dict(like, name, emin=lower,emax=upper, flux_units=self.flux_units, errors=True, include_prefactor=True, prefactor_energy=e) d['prefactor'] = powerlaw_prefactor_dict(like, name, errors=self.save_hesse_errors, minos_errors=True, flux_units=self.flux_units) d['TS'] = ts_dict(like, name, verbosity=self.verbosity) if self.verbosity: print 'Calculating SED upper limit from %.0dMeV to %.0dMeV' % (lower,upper) if self.always_upper_limit or d['TS']['reoptimize'] < self.min_ts: ul = GtlikePowerLawUpperLimit(like, name, cl=self.ul_confidence, emin=lower,emax=upper, flux_units=self.flux_units, energy_units=self.energy_units, upper_limit_kwargs=self.upper_limit_kwargs, include_prefactor=True, prefactor_energy=e, verbosity=self.verbosity, ) d['upper_limit'] = ul.todict() # revert to old model like.setEnergyRange(*init_energes) saved_state.restore() self._condense_results()
def update_roi(self): roi = self.roi if self.verbosity: print "Replacing current diffuse file with Gulli's diffuse files" if self.verbosity: print 'Deleting previous background sources:' for source in get_background(roi): if self.verbosity: print ' .. Deleting source %s' % source roi.del_source(source) if self.verbosity: print 'Getting Gulli Diffuse:' diffuse_sources = get_gulli_diffuse(verbosity=self.verbosity, **self.diffuse_kwargs) if self.verbosity: print 'Adding Gulli Diffuse files to ROI:' for source in diffuse_sources: if self.verbosity: print ' .. adding file %s to the ROI' % source.name roi.add_source(source) galatic_sources = [i for i in diffuse_sources if isinstance(i.dmodel[0],DiffuseFunction)] assert len(galatic_sources) == len(diffuse_sources) - 1, "Exactly one isotropic diffue source" free_galatic_sources = [i for i in diffuse_sources if np.any(i.smodel.free)] if self.verbosity: print 'Testing significance of free Galactic sources (to see if they need to be merged):' insignificant = [i for i in free_galatic_sources if not is_significant(roi, i.name, self.fraction, self.verbosity)] if len(insignificant) == 0: print 'No insignificant sources, returning!' return elif len(insignificant) == 0: print 'Only one insignificant source (%s). Freezing it and returning' % insignificant[0].name roi.modify(which=insignificant[0].name, free=False) return else: if self.verbosity: print 'Multiple insignificant sources in ROI. Merging them!' print 'Insignificant free Galactic diffuse sources are:' for i in insignificant: print ' .. %s' % i.name if self.verbosity: print 'Deleting (and the merging) the insignificant free Galactic sources' for source in insignificant: if self.verbosity: print ' .. Deleting (and then merging) %s' % source.name roi.del_source(source.name) merged = merge_diffuse(insignificant, mergefile=self.mergefile, verbosity=self.verbosity, short_name=self.short_name , compress=self.compress) if self.verbosity: print 'Adding merged Galactic diffuse source %s to ROI:' % (merged.name) roi.add_source(merged) if self.verbosity: print 'Testing if the merged Galactic diffuse source is significant' if is_significant(roi, merged.name, self.fraction, self.verbosity): if self.verbosity: print 'Keeping free the merged Galactic diffuse sources!' else: if self.verbosity: print 'Freezing the insignificant merged Galactic diffuse sources!' roi.modify(which=merged.name, free=False)
def _calculate(self, *args, **kwargs): """ Convert all units into sympy arrays after the initial calculation. """ like = self.like name = self.name init_energes = like.energies[[0, -1]] # Freeze all sources except one to make sed of. all_sources = like.sourceNames() if name not in all_sources: raise Exception("Cannot find source %s in list of sources" % name) # make copy of parameter values + free parameters saved_state = SuperState(like) if self.verbosity: print 'Freezing background sources' for other_name in get_background(like): if self.freeze_bg_diffuse: if self.verbosity: print ' * Freezing diffuse source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for diffuse source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) for other_name in get_sources(like): if self.freeze_bg_sources: if self.verbosity: print ' * Freezing bg source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for bg source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) self.raw_results = [] for i, (lower, upper) in enumerate(zip(self.lower, self.upper)): like.setEnergyRange(float(lower) + 1, float(upper) - 1) e = np.sqrt(lower * upper) if self.verbosity: print 'Calculating SED from %.0dMeV to %.0dMeV' % (lower, upper) """ Note, the most robust method I have found for computing SEDs in gtlike is: (a) Create a generic spectral model with a fixed spectral index. (b) Set the 'Scale' to sqrt(emin*emax) so the prefactor is dNdE in the middle of the sed bin. (b) Set the limits to go from norm/fit_range to norm*fit_range and set the scale to 'norm' """ old_flux = self.init_model.i_flux(emin=lower, emax=upper) model = PowerLaw(index=self.powerlaw_index, e0=e) model.set_flux(old_flux, emin=lower, emax=upper) norm = model['norm'] model.set_limits('norm', norm / float(self.fit_range), norm * self.fit_range, scale=norm) model.set_limits('index', -5, 5) model.freeze('index') spectrum = build_gtlike_spectrum(model) like.setSpectrum(name, spectrum) like.syncSrcParams(name) if self.verbosity: print 'Before fitting SED from %.0dMeV to %.0dMeV' % (lower, upper) print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'After fitting SED from %.0dMeV to %.0dMeV' % (lower, upper) print summary(like) d = dict() self.raw_results.append(d) d['energy'] = energy_dict(emin=lower, emax=upper, energy_units=self.energy_units) d['flux'] = flux_dict(like, name, emin=lower, emax=upper, flux_units=self.flux_units, errors=True, include_prefactor=True, prefactor_energy=e) d['prefactor'] = powerlaw_prefactor_dict( like, name, errors=self.save_hesse_errors, minos_errors=True, flux_units=self.flux_units) d['TS'] = ts_dict(like, name, verbosity=self.verbosity) if self.verbosity: print 'Calculating SED upper limit from %.0dMeV to %.0dMeV' % ( lower, upper) if self.always_upper_limit or d['TS']['reoptimize'] < self.min_ts: ul = GtlikePowerLawUpperLimit( like, name, cl=self.ul_confidence, emin=lower, emax=upper, flux_units=self.flux_units, energy_units=self.energy_units, upper_limit_kwargs=self.upper_limit_kwargs, include_prefactor=True, prefactor_energy=e, verbosity=self.verbosity, ) d['upper_limit'] = ul.todict() # revert to old model like.setEnergyRange(*init_energes) saved_state.restore() self._condense_results()