def get_default_sources(self): point_sources, diffuse_sources = [], [] model = PowerLaw(index=self.powerlaw_index, e0=np.sqrt(self.emin*self.emax)) model.set_flux(self.flux, emin=self.emin, emax=self.emax) ps = PointSource( name = 'source', model = model.copy(), skydir = self.roi_dir) point_sources.append(ps) if self.isotropic_bg: ds = get_sreekumar() diffuse_sources.append(ds) if self.nearby_source: ps = PointSource( name = 'nearby_source', model = model.copy(), skydir = SkyDir(self.roi_dir.ra(),self.roi_dir.dec()+3) ) point_sources.append(ps) return point_sources, diffuse_sources
def get_sreekumar(diff_factor=1, free=(True, False)): # use Sreekumar-like defaults if diff_factor == 1: name = 'Sreekumar Isotropic' else: name = 'Sreekumar Isotropic x%s' % diff_factor free = np.asarray(free).copy() model = PowerLaw(index=2.1, free=free) model.set_flux(1.5e-5*diff_factor, emin=100, emax=np.inf) return DiffuseSource( name=name, diffuse_model=IsotropicConstant(), scaling_model=model)
def get_source(name, position, fit_emin, fit_emax, extended=False, sigma=None): """ build a souce. """ model = PowerLaw(index=2, e0=np.sqrt(fit_emin * fit_emax)) PWNRegion.limit_powerlaw(model) flux = PowerLaw(norm=1e-11, index=2, e0=1e3).i_flux(fit_emin, fit_emax) model.set_flux(flux, emin=fit_emin, emax=fit_emax) if extended and sigma != 0: if not isnum(sigma): raise Exception("sigma must be set. " "") return ExtendedSource(name=name, model=model, spatial_model=Gaussian(sigma=sigma, center=position)) else: return PointSource(name=name, model=model, skydir=position)
def get_source(name, position, fit_emin, fit_emax, extended=False, sigma=None): """ build a souce. """ model=PowerLaw(index=2, e0=np.sqrt(fit_emin*fit_emax)) PWNRegion.limit_powerlaw(model) flux=PowerLaw(norm=1e-11, index=2, e0=1e3).i_flux(fit_emin,fit_emax) model.set_flux(flux,emin=fit_emin,emax=fit_emax) if extended and sigma != 0: if not isnum(sigma): raise Exception("sigma must be set. """) return ExtendedSource( name=name, model=model, spatial_model=Gaussian(sigma=sigma, center=position)) else: return PointSource( name=name, model=model, skydir=position)
def test_ff(self): """ Simulate from a filefunction object and test that the best fit flux is consistent with the simulated flux. """ name = 'ff' model = PowerLaw(index=2) model.set_flux(1e-6) simdir = path.expand('$SIMDIR/%s' % name) if not os.path.exists(simdir): os.makedirs(simdir) filename = abspath(join(simdir, 'file_function.txt')) model.save_profile(filename, 10, 1e6) ff = FileFunction(file=filename) center = SkyDir(0, 0) ps = PointSource(name='source', skydir=center, model=ff) point_sources = [ps] diffuse_sources = None roi = PointlikeTest.get_roi(name, center, point_sources, diffuse_sources, emin=1e2, emax=1e5, binsperdec=4) if PointlikeTest.VERBOSE: roi.print_summary() print roi roi.fit(use_gradient=PointlikeTest.USE_GRADIENT) if PointlikeTest.VERBOSE: roi.print_summary() print roi fit, error = ff.i_flux(1e2, 1e5, error=True) true = model.i_flux(1e2, 1e5, error=False) self.assertPull(fit, true, error, 'flux')
def test_ps1(self): if PointlikeTest.VERBOSE: print '\nAnalyze a simulated point source against the galactic + isotropic diffuse\n' center = SkyDir(0, 0) diffuse_sources = get_default_diffuse( diffdir='$GLAST_EXT/diffuseModels/v2r0p1/', gfile='ring_2year_P76_v0.fits', ifile='isotrop_2year_P76_source_v1.txt') model = PowerLaw(index=2) model.set_flux(1e-6) ps_mc = PointSource(name='source', skydir=center, model=model) ps_fit = ps_mc.copy() point_sources = [ps_fit] roi = PointlikeTest.get_roi('ps1', center, point_sources, diffuse_sources) global roi_pt roi_pt = roi # helps with debugging if PointlikeTest.VERBOSE: print roi roi.fit(use_gradient=PointlikeTest.USE_GRADIENT) if PointlikeTest.VERBOSE: print roi roi.localize(update=True) roi.fit(use_gradient=PointlikeTest.USE_GRADIENT) if PointlikeTest.VERBOSE: roi.print_summary() print roi self.compare_model(ps_fit, ps_mc) self.compare_spatial_model(ps_fit, ps_mc, roi.lsigma)
def get_default_sources(self): point_sources, diffuse_sources = [], [] model = PowerLaw(index=self.powerlaw_index, e0=np.sqrt(self.emin * self.emax)) model.set_flux(self.flux, emin=self.emin, emax=self.emax) ps = PointSource(name='source', model=model.copy(), skydir=self.roi_dir) point_sources.append(ps) if self.isotropic_bg: ds = get_sreekumar() diffuse_sources.append(ds) if self.nearby_source: ps = PointSource(name='nearby_source', model=model.copy(), skydir=SkyDir(self.roi_dir.ra(), self.roi_dir.dec() + 3)) point_sources.append(ps) return point_sources, diffuse_sources
def test_extended_source(self): PointlikeTest.p('USE_GRADIENT=%s' % PointlikeTest.USE_GRADIENT) if PointlikeTest.VERBOSE: PointlikeTest.p( 'Analyze a simulated extended source against an isotropic background (E>10GeV)' ) center = SkyDir(0, 0) # Sreekumar-like isotropic point_sources = [] diffuse_sources = [ get_diffuse_source('ConstantValue', None, 'PowerLaw', None, 'Isotropic Diffuse') ] model = PowerLaw(index=2) model.set_flux(1e-4) if PointlikeTest.VERBOSE: PointlikeTest.p('Simulating gaussian source with sigma=1 degrees') spatial_model = Gaussian(p=[1], center=center) es_mc = ExtendedSource(name='source', spatial_model=spatial_model, model=model) es_fit = es_mc.copy() diffuse_sources.append(es_fit) roi = PointlikeTest.get_roi('extended_test', center, point_sources, diffuse_sources, emin=1e4) global roi_ext roi_ext = roi # helps with debugging if PointlikeTest.VERBOSE: print roi if PointlikeTest.VERBOSE: PointlikeTest.p('Setting initial spatial model to 0.3 degrees') roi.modify(which='source', spatial_model=Gaussian(0.3)) if PointlikeTest.VERBOSE: print roi roi.fit(use_gradient=PointlikeTest.USE_GRADIENT) if PointlikeTest.VERBOSE: print roi roi.fit_extension(which='source', use_gradient=PointlikeTest.USE_GRADIENT) roi.localize(update=True) roi.fit(use_gradient=PointlikeTest.USE_GRADIENT) self.compare_model(es_fit, es_mc) self.compare_spatial_model(es_fit, es_mc, roi.lsigma) self.assertTrue( roi.TS(which='source') > 25, 'The source should be significant') self.assertTrue( roi.TS_ext(which='source') > 25, 'And significantly extended') es_mc.spatial_model.save_template('$SIMDIR/extended_template.fits') if PointlikeTest.VERBOSE: PointlikeTest.p( 'Now, switching from Disk soruce to template source.') roi.del_source(which='source') template_source = ExtendedSource( name='template_source', model=es_mc.model, spatial_model=SpatialMap(file='$SIMDIR/extended_template.fits')) roi.add_source(template_source) roi.fit(use_gradient=PointlikeTest.USE_GRADIENT) self.compare_model(template_source, es_mc) self.assertTrue( roi.TS(which='template_source') > 25, 'Make sure these functions work similary with spatial_map')
def _calculate(self): like = self.like name = self.name if self.verbosity: print 'Testing cutoff in gtlike' saved_state = SuperState(like) emin, emax = get_full_energy_range(like) self.results = d = dict( energy = energy_dict(emin=emin, emax=emax, energy_units=self.energy_units) ) try: def get_flux(): return like.flux(name, emin, emax) def spectrum(): source = like.logLike.getSource(name) s=source.spectrum() return spectrum_to_dict(s, errors=True) old_flux = get_flux() if spectrum()['name'] == 'PowerLaw': pass else: powerlaw_model=PowerLaw(norm=1e-11, index=2, e0=np.sqrt(emin*emax)) powerlaw_model.set_flux(old_flux,emin=emin,emax=emax) powerlaw_model.set_default_limits(oomp_limits=True) if self.verbosity: print 'powerlaw_model is',powerlaw_model powerlaw_spectrum=build_gtlike_spectrum(powerlaw_model) like.setSpectrum(name,powerlaw_spectrum) if self.verbosity: print 'About to fit powerlaw_spectrum' print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'Done fitting powerlaw_spectrum' print summary(like) d['hypothesis_0'] = source_dict(like, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) if self.cutoff_model is None: self.cutoff_model=PLSuperExpCutoff(norm=1e-9, index=1, cutoff=1000, e0=1000, b=1) self.cutoff_model.set_free('b', False) self.cutoff_model.set_flux(old_flux,emin=emin,emax=emax) self.cutoff_model.set_default_limits(oomp_limits=True) if self.verbosity: print 'cutoff_model is',self.cutoff_model cutoff_spectrum=build_gtlike_spectrum(self.cutoff_model) like.setSpectrum(name,cutoff_spectrum) if self.verbosity: print 'About to fit cutoff_model' print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) ll = like.logLike.value() if ll < d['hypothesis_0']['logLikelihood']: # if fit is worse than PowerLaw fit, then # restart fit with parameters almost # equal to best fit powerlaw cutoff_plaw=PLSuperExpCutoff(b=1) cutoff_plaw.set_free('b', False) cutoff_plaw.setp_gtlike('norm', d['hypothesis_0']['spectrum']['Prefactor']) cutoff_plaw.setp_gtlike('index', d['hypothesis_0']['spectrum']['Index']) cutoff_plaw.setp_gtlike('e0', d['hypothesis_0']['spectrum']['Scale']) cutoff_plaw.setp_gtlike('cutoff', 1e6) cutoff_plaw.set_default_limits(oomp_limits=True) temp=build_gtlike_spectrum(cutoff_plaw) like.setSpectrum(name,temp) if self.verbosity: print 'Redoing fit with cutoff same as plaw' print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'Done fitting cutoff_spectrum' print summary(like) d['hypothesis_1'] = source_dict(like, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) if self.cutoff_xml_name is not None: like.writeXml(self.cutoff_xml_name) d['TS_cutoff']=d['hypothesis_1']['TS']['reoptimize']-d['hypothesis_0']['TS']['reoptimize'] if self.verbosity: print 'For cutoff test, TS_cutoff = ', d['TS_cutoff'] except Exception, ex: print 'ERROR gtlike test cutoff: ', ex traceback.print_exc(file=sys.stdout) self.results = None
def _calculate(self): roi = self.roi name = self.name if self.verbosity: print 'Testing cutoff in pointlike' emin,emax=get_full_energy_range(roi) self.results = d = dict( energy = energy_dict(emin=emin, emax=emax, energy_units=self.energy_units) ) saved_state = PointlikeState(roi) old_flux = roi.get_model(name).i_flux(emin,emax) if not isinstance(roi.get_model(name),PowerLaw): powerlaw_model=PowerLaw(norm=1e-11, index=2, e0=np.sqrt(emin*emax)) powerlaw_model.set_mapper('Index', PowerLaw.default_limits['Index']) powerlaw_model.set_flux(old_flux,emin=emin,emax=emax) if self.verbosity: print "powerlaw_model is ",powerlaw_model roi.modify(which=name, model=powerlaw_model, keep_old_flux=False) fit = lambda: roi.fit(**self.fit_kwargs) def ts(): old_quiet = roi.quiet; roi.quiet=True ts = roi.TS(name,quick=False) roi.quiet = old_quiet return ts spectrum = lambda: spectrum_to_dict(roi.get_model(name), errors=True) if self.verbosity: print 'About to fit powerlaw_model' roi.print_summary() fit() if self.verbosity: print 'Done fitting powerlaw_model' roi.print_summary() d['hypothesis_0'] = source_dict(roi, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) if self.cutoff_model is not None: pass else: self.cutoff_model=PLSuperExpCutoff(norm=1e-9, index=1, cutoff=1000, e0=1000, b=1) # Note, don't limit the normalization parameter for p in ['Index', 'Cutoff', 'b']: self.cutoff_model.set_mapper(p, PLSuperExpCutoff.default_limits[p]) self.cutoff_model.set_free('b', False) self.cutoff_model.set_flux(old_flux,emin=emin,emax=emax) if self.verbosity: print "cutoff_model is ",self.cutoff_model roi.modify(which=name, model=self.cutoff_model, keep_old_flux=False) if self.verbosity: print 'About to fit cutoff_model' roi.print_summary() fit() ll = -roi.logLikelihood(roi.parameters()) if ll < d['hypothesis_0']['logLikelihood']: # if fit is worse than PowerLaw fit, then # restart fit with parameters almost # equal to best fit powerlaw self.cutoff_plaw=PLSuperExpCutoff(b=1) self.cutoff_plaw.set_free('b', False) self.cutoff_plaw.setp('norm', d['hypothesis_0']['spectrum']['Norm']) self.cutoff_plaw.setp('index', d['hypothesis_0']['spectrum']['Index']) self.cutoff_plaw.setp('e0', d['hypothesis_0']['spectrum']['e0']) self.cutoff_plaw.setp('cutoff', 1e6) roi.modify(which=name, model=self.cutoff_plaw, keep_old_flux=False) fit() if self.verbosity: print 'Redoing fit with cutoff same as plaw' print 'Before:' roi.print_summary() print fit() if self.verbosity: print 'Done fitting cutoff_model' roi.print_summary() d['hypothesis_1'] = source_dict(roi, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) d['TS_cutoff']=d['hypothesis_1']['TS']['noquick']-d['hypothesis_0']['TS']['noquick'] saved_state.restore()
def _calculate(self): """ Compute the flux data points for each energy. """ like = self.like name = self.name # Freeze all sources except one to make sed of. all_sources = like.sourceNames() if name not in all_sources: raise Exception("Cannot find source %s in list of sources" % name) saved_state = SuperState(like) self.results = dict( name=name, bands=[], min_ts=self.min_ts, ) for i,(emin,emax,e_middle) in enumerate(zip(self.lower_energy,self.upper_energy,self.middle_energy)): if self.verbosity: print 'Calculating bandfits from %.0dMeV to %.0dMeV' % (emin,emax) like.setEnergyRange(float(emin)+1, float(emax)-1) # Scale the powerlaw to the input spectral model => helps with convergence old_flux = self.init_model.i_flux(emin=emin, emax=emax) model = PowerLaw(index=2, e0=e_middle) model.set_flux(old_flux, emin=emin, emax=emax) norm = model['norm'] model.set_limits('norm',norm/float(self.fit_range),norm*self.fit_range, scale=norm) model.set_limits('index',-5,5) spectrum = build_gtlike_spectrum(model) like.setSpectrum(name,spectrum) like.syncSrcParams(name) if self.verbosity: print 'Before bandfits fitting from %.0dMeV to %.0dMeV' % (emin,emax) print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'After bandfits fitting from %.0dMeV to %.0dMeV' % (emin,emax) print summary(like) r = source_dict(like, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) if self.verbosity: print 'Calculating bandfits upper limit from %.0dMeV to %.0dMeV' % (emin,emax) g = GtlikePowerLawUpperLimit(like, name, powerlaw_index=self.upper_limit_index, cl=self.ul_confidence, emin=emin,emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, upper_limit_kwargs=self.upper_limit_kwargs, include_prefactor=True, prefactor_energy=e_middle, verbosity=self.verbosity) r['upper_limit'] = g.todict() r['prefactor'] = powerlaw_prefactor_dict(like, name, errors=True, minos_errors=False, flux_units=self.flux_units) r['significant']=r['TS']['reoptimize']>self.min_ts self.results['bands'].append(r) # revert to old model like.setEnergyRange(*self.init_energes) saved_state.restore()
def _calculate(self, *args, **kwargs): """ Convert all units into sympy arrays after the initial calculation. """ like = self.like name = self.name init_energes = like.energies[[0, -1]] # Freeze all sources except one to make sed of. all_sources = like.sourceNames() if name not in all_sources: raise Exception("Cannot find source %s in list of sources" % name) # make copy of parameter values + free parameters saved_state = SuperState(like) if self.verbosity: print 'Freezing background sources' for other_name in get_background(like): if self.freeze_bg_diffuse: if self.verbosity: print ' * Freezing diffuse source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for diffuse source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) for other_name in get_sources(like): if self.freeze_bg_sources: if self.verbosity: print ' * Freezing bg source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for bg source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) self.raw_results = [] for i, (lower, upper) in enumerate(zip(self.lower, self.upper)): like.setEnergyRange(float(lower) + 1, float(upper) - 1) e = np.sqrt(lower * upper) if self.verbosity: print 'Calculating SED from %.0dMeV to %.0dMeV' % (lower, upper) """ Note, the most robust method I have found for computing SEDs in gtlike is: (a) Create a generic spectral model with a fixed spectral index. (b) Set the 'Scale' to sqrt(emin*emax) so the prefactor is dNdE in the middle of the sed bin. (b) Set the limits to go from norm/fit_range to norm*fit_range and set the scale to 'norm' """ old_flux = self.init_model.i_flux(emin=lower, emax=upper) model = PowerLaw(index=self.powerlaw_index, e0=e) model.set_flux(old_flux, emin=lower, emax=upper) norm = model['norm'] model.set_limits('norm', norm / float(self.fit_range), norm * self.fit_range, scale=norm) model.set_limits('index', -5, 5) model.freeze('index') spectrum = build_gtlike_spectrum(model) like.setSpectrum(name, spectrum) like.syncSrcParams(name) if self.verbosity: print 'Before fitting SED from %.0dMeV to %.0dMeV' % (lower, upper) print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'After fitting SED from %.0dMeV to %.0dMeV' % (lower, upper) print summary(like) d = dict() self.raw_results.append(d) d['energy'] = energy_dict(emin=lower, emax=upper, energy_units=self.energy_units) d['flux'] = flux_dict(like, name, emin=lower, emax=upper, flux_units=self.flux_units, errors=True, include_prefactor=True, prefactor_energy=e) d['prefactor'] = powerlaw_prefactor_dict( like, name, errors=self.save_hesse_errors, minos_errors=True, flux_units=self.flux_units) d['TS'] = ts_dict(like, name, verbosity=self.verbosity) if self.verbosity: print 'Calculating SED upper limit from %.0dMeV to %.0dMeV' % ( lower, upper) if self.always_upper_limit or d['TS']['reoptimize'] < self.min_ts: ul = GtlikePowerLawUpperLimit( like, name, cl=self.ul_confidence, emin=lower, emax=upper, flux_units=self.flux_units, energy_units=self.energy_units, upper_limit_kwargs=self.upper_limit_kwargs, include_prefactor=True, prefactor_energy=e, verbosity=self.verbosity, ) d['upper_limit'] = ul.todict() # revert to old model like.setEnergyRange(*init_energes) saved_state.restore() self._condense_results()
def _calculate(self): roi = self.roi name = self.name if self.verbosity: print 'Testing cutoff in pointlike' emin, emax = get_full_energy_range(roi) self.results = d = dict(energy=energy_dict( emin=emin, emax=emax, energy_units=self.energy_units)) saved_state = PointlikeState(roi) old_flux = roi.get_model(name).i_flux(emin, emax) if not isinstance(roi.get_model(name), PowerLaw): powerlaw_model = PowerLaw(norm=1e-11, index=2, e0=np.sqrt(emin * emax)) powerlaw_model.set_mapper('Index', PowerLaw.default_limits['Index']) powerlaw_model.set_flux(old_flux, emin=emin, emax=emax) if self.verbosity: print "powerlaw_model is ", powerlaw_model roi.modify(which=name, model=powerlaw_model, keep_old_flux=False) fit = lambda: roi.fit(**self.fit_kwargs) def ts(): old_quiet = roi.quiet roi.quiet = True ts = roi.TS(name, quick=False) roi.quiet = old_quiet return ts spectrum = lambda: spectrum_to_dict(roi.get_model(name), errors=True) if self.verbosity: print 'About to fit powerlaw_model' roi.print_summary() fit() if self.verbosity: print 'Done fitting powerlaw_model' roi.print_summary() d['hypothesis_0'] = source_dict(roi, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) if self.cutoff_model is not None: pass else: self.cutoff_model = PLSuperExpCutoff(norm=1e-9, index=1, cutoff=1000, e0=1000, b=1) # Note, don't limit the normalization parameter for p in ['Index', 'Cutoff', 'b']: self.cutoff_model.set_mapper( p, PLSuperExpCutoff.default_limits[p]) self.cutoff_model.set_free('b', False) self.cutoff_model.set_flux(old_flux, emin=emin, emax=emax) if self.verbosity: print "cutoff_model is ", self.cutoff_model roi.modify(which=name, model=self.cutoff_model, keep_old_flux=False) if self.verbosity: print 'About to fit cutoff_model' roi.print_summary() fit() ll = -roi.logLikelihood(roi.parameters()) if ll < d['hypothesis_0']['logLikelihood']: # if fit is worse than PowerLaw fit, then # restart fit with parameters almost # equal to best fit powerlaw self.cutoff_plaw = PLSuperExpCutoff(b=1) self.cutoff_plaw.set_free('b', False) self.cutoff_plaw.setp('norm', d['hypothesis_0']['spectrum']['Norm']) self.cutoff_plaw.setp('index', d['hypothesis_0']['spectrum']['Index']) self.cutoff_plaw.setp('e0', d['hypothesis_0']['spectrum']['e0']) self.cutoff_plaw.setp('cutoff', 1e6) roi.modify(which=name, model=self.cutoff_plaw, keep_old_flux=False) fit() if self.verbosity: print 'Redoing fit with cutoff same as plaw' print 'Before:' roi.print_summary() print fit() if self.verbosity: print 'Done fitting cutoff_model' roi.print_summary() d['hypothesis_1'] = source_dict(roi, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) d['TS_cutoff'] = d['hypothesis_1']['TS']['noquick'] - d[ 'hypothesis_0']['TS']['noquick'] saved_state.restore()
PixelData(ft1files=diffuse_ft1, binfile=diffuse_binfile, binsperdec=4, event_class=0) results_dict = [] index_mc = 2 for flux_mc in [1e-9, 3e-6, 3e-9, 1e-6, 1e-8, 3e-7, 3e-8, 1e-7]: source_str = "%g_%g_%s" % (flux_mc, index_mc, istr) print "Flux_mc=%g, Index_mc=%g" % (flux_mc, index_mc) name_mc = "source_%s" % istr model_mc = PowerLaw(p=[1, index_mc]) model_mc.set_flux(flux_mc, 100, N.inf) source_mc = PointSource(name=name_mc, skydir=skydir_mc, model=model_mc) source_ft1 = join(tempdir, "source_%s_ft1.fits" % source_str) source_binfile = join(tempdir, "source_%s_binned.fits" % source_str) all_binfile = join(tempdir, "all_%s_binned.fits" % source_str) mc = MonteCarlo( point_sources=source_mc, seed=i, irf=irf, ft1=source_ft1, ft2=ft2, roi_dir=skydir_mc,
class PowerLawApproximator(BaseFitter): defaults = BaseFitter.defaults + ( ('npoints',1000,'number of points in fit'), ('e0',None,'scale for power law'), ('energy_units', 'MeV', 'default units to plot energy flux (y axis) in.'), ('flux_units', 'erg', 'default units to plot energy (x axis) in'), ) @keyword_options.decorate(defaults) def __init__(self, input_model, emin, emax, **kwargs): """ Create an approximate power law spectrum. """ raise Exception("This code doesn't work yet. I think you need the exposure to do the fit correctly.") self.input_model = input_model self.emin = emin self.emax = emax keyword_options.process(self, kwargs) self._calculate() def _calculate(self): self.results = dict() energies = np.logspace(np.log10(self.emin),np.log10(self.emax),self.npoints) if self.e0 is None: self.e0=np.sqrt(self.emin*self.emax) self.results['input_model'] = spectrum_to_dict(self.input_model) self.results['dnde'] = dnde = self.input_model(energies) self.pl_model = PowerLaw(e0=self.e0) self.pl_model.set_flux(self.input_model.i_flux(emin=self.emin,emax=self.emax), emin=self.emin,emax=self.emax) def residuals(args): norm,index=args self.pl_model['norm']=norm self.pl_model['index']=index dnde_pl = self.pl_model(energies) #return np.sum((np.log(dnde) - np.log(dnde_pl))**2) print (np.log10(dnde)-np.log10(dnde_pl))**2 return np.sum((np.log(dnde) - np.log(dnde_pl))**2) #return np.sum((dnde - dnde_pl)**2) best_norm,best_index=fmin(residuals,[self.pl_model['norm'],self.pl_model['index']]) self.pl_model['norm']=best_norm self.pl_model['index']=best_index self.results['pl_model'] = spectrum_to_dict(self.pl_model) def plot(self,filename=None,axes=None,fignum=None,figsize=(4,4)): if axes is None: fig = P.figure(fignum,figsize) axes = SpectralAxes(fig=fig, rect=(0.22,0.15,0.75,0.8), flux_units=self.flux_units, energy_units=self.energy_units) fig.add_axes(axes) axes.set_xlim_units(self.emin*units.MeV, self.emax*units.MeV) sp=SpectrumPlotter(axes=axes) sp.plot(self.results['input_model'], label='input') sp.plot(self.results['pl_model'], label='powerlaw') if filename is not None: P.savefig(filename) if __name__ == "__main__": import doctest doctest.testmod()
def integral(skydir): i = lambda m: m.integral(skydir, emin, emax) return i(gal.dmodel[0]) + i(iso.dmodel[0]) bg_ratio = integral(SkyDir(0,0,SkyDir.GALACTIC))/integral(roi_dir) flux = galcenter_flux*bg_ratio**-0.5 print 'index=%.1f, galcenter_flux=%.1e, bg_ratio=%.2f, l,b=%.2f,%.2f, flux=%.1e' % \ (index,galcenter_flux,bg_ratio,roi_dir.l(),roi_dir.b(),flux) name = 'source_index_%g' % index tempdir = mkdtemp(prefix='/scratch/') model_mc = PowerLaw(index=index); model_mc.set_flux(flux, 1e2, 1e5) ft1 = join(tempdir,'ft1.fits') binfile = join(tempdir,'binned.fits') ft2 = join(tempdir, 'ft2.fits') ltcube = join(tempdir, 'ltcube.fits') ds = DataSpecification( ft1files = ft1, ft2files = ft2, binfile = binfile, ltcube = ltcube) sa = SpectralAnalysisMC(ds, emin=emin, emax=emax, binsperdec=8,
index=args.index phibins=args.phibins if args.position == 'galcenter': roi_dir = SkyDir(0,0,SkyDir.GALACTIC) elif args.position == 'allsky': roi_dir=random_on_sphere() elif args.position == 'bad': roi_dir=SkyDir(314.4346,-69.5670,SkyDir.GALACTIC) elif args.position == 'pole': roi_dir=SkyDir(0,-90,SkyDir.GALACTIC) elif args.position == 'w44': roi_dir=SkyDir(283.98999,1.355) model_mc = PowerLaw(index=index) model_mc.set_flux(flux, emin=args.emin, emax=args.emax) if args.spatial == 'point': ps = PointSource(name=name, model=model_mc, skydir=roi_dir) point_sources, diffuse_sources = [ps],None sources = [ps] elif args.spatial == 'disk': spatial_model = Disk(sigma=0.25, center=roi_dir) es = ExtendedSource(name=name, model=model_mc, spatial_model=spatial_model) point_sources, diffuse_sources = [],[es] sources = [es] elif args.spatial == 'w44': spatial_model = EllipticalRing(major_axis=.3, minor_axis=0.19, pos_angle=-33, fraction=0.75, center=roi_dir) es = ExtendedSource(name=name, model=model_mc, spatial_model=spatial_model) point_sources, diffuse_sources = [],[es] sources = [es]
def _compute(self): """ Wrap up calculating the flux upper limit for a powerlaw source. This function employes the pyLikelihood function IntegralUpperLimit to calculate a Bayesian upper limit. The primary benefit of this function is that it replaces the spectral model automatically with a PowerLaw spectral model and fixes the index to -2. It then picks a better scale for the powerlaw and gives the upper limit calculation a more reasonable starting value, which helps the convergence. """ if self.verbosity: print 'Calculating gtlike power-law upper limit' like = self.like name = self.name saved_state = SuperState(like) e = np.sqrt(self.emin*self.emax) """ I had tons of trouble getting a robust fitting algorithm. The problem with computing upper limits is (a) Getting an initial fit of the region (with the spectral index fixed) to converge (b) Getting the upper limit to integrate over a good range. This is what I found to be most robust way to compute upper limits: (a) Create a generic powerlaw model with the spectral index fixed at the desired value (typically set to -2). Note, don't set e0, use default. This ensures that the prefactor range really does convert to a physically reasonable range of parameters. (b) Give the spectral model the pointlike default spectral limits. This is important because it gives the source a big enough range such that the upper limit can find a proper integration range. (c) Set the flux of the current model to equal the flux of the input model. This starts the fitter at a reasonable value. Do this setting with the set_flux flag strict=False beacuse, in case the initial fit totally failed to converge (flux -> 0), you don't want to put the starting value fo the flux too far away from the true value. (d) Keep the lower and upper limit on the prefactor as the pointlike default limits, but set the scale of the source to be the new 'norm' found by preserving the flux. This ensures the fitter doesn't have too much trouble finding the true minimum. Using this procedure, you get a reasonable parameter limits which allows the preliminary fit to converge and the upper limits code to integrate over a reasonable parameter range. """ source = like.logLike.getSource(name) spectrum=source.spectrum() old_flux = like.flux(name, self.emin, self.emax) model = PowerLaw(index=self.powerlaw_index) model.set_flux(old_flux, emin=self.emin, emax=self.emax, strict=False) model.set_default_limits(oomp_limits=True) spectrum = build_gtlike_spectrum(model) like.setSpectrum(name,spectrum) like.syncSrcParams(name) results = super(GtlikePowerLawUpperLimit,self)._compute() saved_state.restore()
skydir_mc = SkyDir() bg = get_sreekumar() ft2 = dict2fgl['ft2'] ltcube = dict2fgl['ltcube'] results = [] for extension_mc in extensions: print 'Looping over extension_mc=%g' % extension_mc model_mc = PowerLaw(index=index_mc) model_mc.set_flux(flux_mc(extension_mc), emin, emax) r = dict( type = args.type, mc = dict( extension=extension_mc, gal=[ skydir_mc.l(), skydir_mc.b() ], cel=[ skydir_mc.ra(), skydir_mc.dec() ], model=spectrum_to_dict(model_mc), flux=pointlike_model_to_flux(model_mc, emin, emax), ) ) tempdir = mkdtemp() point = 'point'
def _calculate(self,*args,**kwargs): """ Convert all units into sympy arrays after the initial calculation. """ like = self.like name = self.name init_energes = like.energies[[0,-1]] # Freeze all sources except one to make sed of. all_sources = like.sourceNames() if name not in all_sources: raise Exception("Cannot find source %s in list of sources" % name) # make copy of parameter values + free parameters saved_state = SuperState(like) if self.verbosity: print 'Freezing background sources' for other_name in get_background(like): if self.freeze_bg_diffuse: if self.verbosity: print ' * Freezing diffuse source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for diffuse source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) for other_name in get_sources(like): if self.freeze_bg_sources: if self.verbosity: print ' * Freezing bg source %s' % other_name modify(like, other_name, free=False) else: if self.verbosity: print ' * Freezing spectral shape for bg source %s' % other_name modify(like, other_name, freeze_spectral_shape=True) self.raw_results = [] for i,(lower,upper) in enumerate(zip(self.lower,self.upper)): like.setEnergyRange(float(lower)+1, float(upper)-1) e = np.sqrt(lower*upper) if self.verbosity: print 'Calculating SED from %.0dMeV to %.0dMeV' % (lower,upper) """ Note, the most robust method I have found for computing SEDs in gtlike is: (a) Create a generic spectral model with a fixed spectral index. (b) Set the 'Scale' to sqrt(emin*emax) so the prefactor is dNdE in the middle of the sed bin. (b) Set the limits to go from norm/fit_range to norm*fit_range and set the scale to 'norm' """ old_flux = self.init_model.i_flux(emin=lower,emax=upper) model = PowerLaw(index=self.powerlaw_index, e0=e) model.set_flux(old_flux, emin=lower, emax=upper) norm = model['norm'] model.set_limits('norm',norm/float(self.fit_range),norm*self.fit_range, scale=norm) model.set_limits('index',-5,5) model.freeze('index') spectrum = build_gtlike_spectrum(model) like.setSpectrum(name,spectrum) like.syncSrcParams(name) if self.verbosity: print 'Before fitting SED from %.0dMeV to %.0dMeV' % (lower,upper) print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'After fitting SED from %.0dMeV to %.0dMeV' % (lower,upper) print summary(like) d = dict() self.raw_results.append(d) d['energy'] = energy_dict(emin=lower, emax=upper, energy_units=self.energy_units) d['flux'] = flux_dict(like, name, emin=lower,emax=upper, flux_units=self.flux_units, errors=True, include_prefactor=True, prefactor_energy=e) d['prefactor'] = powerlaw_prefactor_dict(like, name, errors=self.save_hesse_errors, minos_errors=True, flux_units=self.flux_units) d['TS'] = ts_dict(like, name, verbosity=self.verbosity) if self.verbosity: print 'Calculating SED upper limit from %.0dMeV to %.0dMeV' % (lower,upper) if self.always_upper_limit or d['TS']['reoptimize'] < self.min_ts: ul = GtlikePowerLawUpperLimit(like, name, cl=self.ul_confidence, emin=lower,emax=upper, flux_units=self.flux_units, energy_units=self.energy_units, upper_limit_kwargs=self.upper_limit_kwargs, include_prefactor=True, prefactor_energy=e, verbosity=self.verbosity, ) d['upper_limit'] = ul.todict() # revert to old model like.setEnergyRange(*init_energes) saved_state.restore() self._condense_results()
def _calculate(self): like = self.like name = self.name if self.verbosity: print 'Testing cutoff in gtlike' saved_state = SuperState(like) emin, emax = get_full_energy_range(like) self.results = d = dict(energy=energy_dict( emin=emin, emax=emax, energy_units=self.energy_units)) try: def get_flux(): return like.flux(name, emin, emax) def spectrum(): source = like.logLike.getSource(name) s = source.spectrum() return spectrum_to_dict(s, errors=True) old_flux = get_flux() if spectrum()['name'] == 'PowerLaw': pass else: powerlaw_model = PowerLaw(norm=1e-11, index=2, e0=np.sqrt(emin * emax)) powerlaw_model.set_flux(old_flux, emin=emin, emax=emax) powerlaw_model.set_default_limits(oomp_limits=True) if self.verbosity: print 'powerlaw_model is', powerlaw_model powerlaw_spectrum = build_gtlike_spectrum(powerlaw_model) like.setSpectrum(name, powerlaw_spectrum) if self.verbosity: print 'About to fit powerlaw_spectrum' print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'Done fitting powerlaw_spectrum' print summary(like) d['hypothesis_0'] = source_dict(like, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) if self.cutoff_model is None: self.cutoff_model = PLSuperExpCutoff(norm=1e-9, index=1, cutoff=1000, e0=1000, b=1) self.cutoff_model.set_free('b', False) self.cutoff_model.set_flux(old_flux, emin=emin, emax=emax) self.cutoff_model.set_default_limits(oomp_limits=True) if self.verbosity: print 'cutoff_model is', self.cutoff_model cutoff_spectrum = build_gtlike_spectrum(self.cutoff_model) like.setSpectrum(name, cutoff_spectrum) if self.verbosity: print 'About to fit cutoff_model' print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) ll = like.logLike.value() if ll < d['hypothesis_0']['logLikelihood']: # if fit is worse than PowerLaw fit, then # restart fit with parameters almost # equal to best fit powerlaw cutoff_plaw = PLSuperExpCutoff(b=1) cutoff_plaw.set_free('b', False) cutoff_plaw.setp_gtlike( 'norm', d['hypothesis_0']['spectrum']['Prefactor']) cutoff_plaw.setp_gtlike('index', d['hypothesis_0']['spectrum']['Index']) cutoff_plaw.setp_gtlike('e0', d['hypothesis_0']['spectrum']['Scale']) cutoff_plaw.setp_gtlike('cutoff', 1e6) cutoff_plaw.set_default_limits(oomp_limits=True) temp = build_gtlike_spectrum(cutoff_plaw) like.setSpectrum(name, temp) if self.verbosity: print 'Redoing fit with cutoff same as plaw' print summary(like) paranoid_gtlike_fit(like, verbosity=self.verbosity) if self.verbosity: print 'Done fitting cutoff_spectrum' print summary(like) d['hypothesis_1'] = source_dict(like, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) if self.cutoff_xml_name is not None: like.writeXml(self.cutoff_xml_name) d['TS_cutoff'] = d['hypothesis_1']['TS']['reoptimize'] - d[ 'hypothesis_0']['TS']['reoptimize'] if self.verbosity: print 'For cutoff test, TS_cutoff = ', d['TS_cutoff'] except Exception, ex: print 'ERROR gtlike test cutoff: ', ex traceback.print_exc(file=sys.stdout) self.results = None