def gtlike_analysis(roi, hypothesis, upper_limit=False, cutoff=False): print 'Performing Gtlike crosscheck for %s' % hypothesis gtlike=Gtlike(roi) like=gtlike.like like.fit(covar=True) r=sourcedict(like, name) if upper_limit: r['upper_limit'] = powerlaw_upper_limit(like, name, emin=emin, emax=emax, cl=.95) if cutoff: r['test_cutoff']=test_cutoff(like,name) for kind, kwargs in [['4bpd',dict(bin_edges=np.logspace(2,5,13))], ['1bpd',dict(bin_edges=np.logspace(2,5,4))]]: print 'Making %s SED' % kind sed = SED(like, name, **kwargs) sed.plot('sed_gtlike_%s_%s_%s.png' % (kind,hypothesis,name)) sed.verbosity=True sed.save('sed_gtlike_%s_%s_%s.dat' % (kind,hypothesis,name)) return r
def gtlike_analysis(roi, hypothesis, upper_limit=False, cutoff=False): print "Performing Gtlike crosscheck for %s" % hypothesis gtlike = Gtlike(roi) like = gtlike.like like.fit(covar=True) r = sourcedict(like, name) if upper_limit: r["upper_limit"] = powerlaw_upper_limit(like, name, emin=emin, emax=emax, cl=0.95) if cutoff: r["test_cutoff"] = test_cutoff(like, name) for kind, kwargs in [ ["4bpd", dict(bin_edges=np.logspace(2, 5, 13))], ["1bpd", dict(bin_edges=np.logspace(2, 5, 4))], ]: print "Making %s SED" % kind sed = SED(like, name, **kwargs) sed.plot("sed_gtlike_%s_%s_%s.png" % (kind, hypothesis, name)) sed.verbosity = True sed.save("sed_gtlike_%s_%s_%s.dat" % (kind, hypothesis, name)) return r
def gtlike_analysis(roi, name, emin, emax, hypothesis, snrsize, upper_limit=False): """ perform spectral fit with gtlike to crosscheck the point-like anlaysis. """ print '\n\nPerforming Gtlike analysis\n\n' gtlike=Gtlike(roi,binsz=1./8) like=gtlike.like like.fit(covar=True) results = sourcedict(like,name,emin=emin,emax=emax) if upper_limit: # *) Perform upper limits assuming spectral index 2 # N.B., for the E>10GeV analysis we are very much in the Poisson instead # of Gaussian regime. The likelihood function will be VERY linear. As a result, # delta_log_like_limits = 50 should be much more reasonable (not quite sure # how to quantify this right now...) results['upper_limit'] = powerlaw_upper_limit(like,name, delta_log_like_limits=50, verbosity=2, emin=emin, emax=emax) return results
if localize: try: roi.localize(name, update=True) except Exception, ex: print 'ERROR localizing: ', ex fit() if fit_extension: try: roi.fit_extension(name) roi.localize(name, update=True) except Exception, ex: print 'ERROR localizing: ', ex fit() p = sourcedict(roi, name) if extension_ul: print 'UNABLE To Calculate Extension Upper limit' if upper_limit: p['upper_limit'] = powerlaw_upper_limit(roi, name, emin=emin, emax=emax, cl=.95) if cutoff: p['test_cutoff']=test_cutoff(roi,name) roi.plot_sed(which=name,filename='sed_pointlike_%s_%s.pdf' % (hypothesis,name), use_ergs=True) roi.save('roi_%s_%s.dat' % (hypothesis,name)) #plot(roi, hypothesis) return p
try: print 'First, localize with GridLocalize (helps with convergence)' size=max(snrsize,0.5) grid=GridLocalize(roi,which=name,size=size,pixelsize=size/10) skydir = grid.best_position() print 'Using Grid Localize, best position is (l,b)=(%.3f,%.3f)' % (skydir.l(),skydir.b()) roi.modify(which=name, skydir=skydir) roi.localize(which=name, update=True) except Exception, ex: print 'ERROR localizing: ',ex if fit_extension: fit() roi.fit_extension(which=name) fit() print 'Final Spectral Model for %s hypothesis:' % hypothesis roi.print_summary(galactic=True) results=sourcedict(roi,name,emin=emin,emax=emax) if upper_limit: results['upper_limit'] = powerlaw_upper_limit(roi,name, emin=emin, emax=emax, verbosity=2) roi.save('roi_%s.dat' % hypothesis) return results