def pointlike_sed_to_dict(bandflux, flux_units='erg', energy_units='MeV'): results = defaultdict(lambda:defaultdict(list)) ce=lambda e: units.convert(e,'MeV',energy_units) cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units) results['Energy']['Units'] = energy_units results['dNdE']['Units'] = 'ph/cm^2/s/%s' % flux_units results['Significant'] = [] for r in bandflux.rec: results['Energy']['Lower'].append(ce(r.elow)) results['Energy']['Upper'].append(ce(r.ehigh)) results['Energy']['Value'].append(ce(np.sqrt(r.elow*r.ehigh))) # Undo scaling in the bandflux recarray fac = r.elow*r.ehigh*bandflux.scale_factor if r.flux > 0: results['Significant'].append(True) results['dNdE']['Value'].append(cp(r.flux/fac)) results['dNdE']['Average_Error'].append(cp((r.uflux/fac - r.lflux/fac)/2)) results['dNdE']['Lower_Error'].append(cp((r.flux-r.lflux)/fac)) results['dNdE']['Upper_Error'].append(cp((r.uflux-r.flux)/fac)) results['dNdE']['Upper_Limit'].append(np.nan) else: results['Significant'].append(False) results['dNdE']['Value'].append(np.nan) results['dNdE']['Average_Error'].append(np.nan) results['dNdE']['Lower_Error'].append(np.nan) results['dNdE']['Upper_Error'].append(np.nan) results['dNdE']['Upper_Limit'].append(cp(r.uflux/fac)) return tolist(results)
def _condense_results(self): # convert results to standard self.results dict get = lambda a,b: np.asarray([i[a][b] for i in self.raw_results]) get_units = lambda a,b: self.raw_results[0][a][b] get_limit = lambda a,b: np.asarray([i[a][b] if i.has_key(a) else np.nan for i in self.raw_results]) self.results['Energy'] = dict( Lower=get('energy','emin'), Upper=get('energy','emax'), Value=get('energy','emiddle'), Units=get_units('energy','energy_units')) self.results['dNdE']=dict( Value=get('prefactor','prefactor'), Average_Error=(get('prefactor','prefactor_lower_err')+get('prefactor','prefactor_upper_err'))/2, Lower_Error=get('prefactor','prefactor_lower_err'), Upper_Error=get('prefactor','prefactor_upper_err'), Upper_Limit=get_limit('upper_limit','prefactor'), Units=get_units('prefactor','prefactor_units')) self.results['Ph_Flux']=dict( Value=get('flux','flux'), Average_Error=get('flux','flux_err'), Upper_Limit=get_limit('upper_limit','flux'), Units=get_units('flux','flux_units')) self.results['En_Flux']=dict( Value=get('flux','eflux'), Average_Error=get('flux','eflux_err'), Upper_Limit=get_limit('upper_limit','eflux'), Units=get_units('flux','eflux_units')) self.results['Test_Statistic']=get('TS','reoptimize') self.results['Significant']=get('TS','reoptimize')>self.min_ts self.results = tolist(self.results) if self.save_hesse_errors: self.results['dNdE']['HESS_Average_Error'] = get('prefactor', 'prefactor_err')
def pointlike_model_to_flux(model, emin, emax, flux_units='erg', energy_units='MeV', errors=True, include_prefactor=False, prefactor_energy=None): cef=lambda e: units.convert(e,'MeV',flux_units) ce=lambda e: units.convert(e,'MeV',energy_units) f=dict() if errors: f['flux'],f['flux_err']=model.i_flux(emin=emin,emax=emax,error=True) ef,ef_err=model.i_flux(emin=emin,emax=emax,e_weight=1,error=True) f['eflux'],f['eflux_err']=cef(ef),cef(ef_err) else: f['flux']=model.i_flux(emin=emin,emax=emax,error=False) ef=model.i_flux(emin=emin,emax=emax,e_weight=1,error=False) f['eflux']=cef(ef) f['flux_units']='ph/cm^2/s' f['eflux_units']='%s/cm^2/s' % flux_units f['energy_units']=energy_units f['emin'],f['emax']=ce(emin),ce(emax) if include_prefactor: assert prefactor_energy is not None cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units) f['prefactor'] = cp(model(prefactor_energy)) f['prefactor_units'] = 'ph/cm^2/s/%s' % flux_units f['prefactor_energy'] = ce(prefactor_energy) return tolist(f)
def pointlike_flux_dict(roi, which, emin=None, emax=None, *args, **kwargs): if emin is None and emax is None: emin, emax = get_full_energy_range(roi) model=roi.get_model(which) return tolist(pointlike_model_to_flux(model, emin, emax, *args, **kwargs))
def gtlike_source_dict(like, name, emin=None, emax=None, flux_units='erg', energy_units='MeV', errors=True, minos_errors=False, covariance_matrix=True, save_TS=True, add_diffuse_dict=True, verbosity=True): if emin is None and emax is None: emin, emax = get_full_energy_range(like) d=dict( logLikelihood=logLikelihood(like), ) d['energy'] = energy_dict(emin=emin, emax=emax, energy_units=energy_units) d['spectrum']= name_to_spectral_dict(like, name, errors=errors, minos_errors=minos_errors, covariance_matrix=covariance_matrix) if save_TS: d['TS']=gtlike_ts_dict(like, name, verbosity=verbosity) d['flux']=flux_dict(like,name, emin=emin, emax=emax, flux_units=flux_units, energy_units=energy_units, errors=errors) if add_diffuse_dict: d['diffuse'] = diffuse_dict(like) return tolist(d)
def pointlike_spectrum_to_dict(model, errors=False): """ Package of a spectral model into a handy python dictionary. >>> m=PowerLaw(norm=1, index=-.5) >>> d=spectrum_to_dict(m) >>> print d['Norm'] 1.0 >>> print d['Index'] -0.5 >>> d=spectrum_to_dict(m, errors=False) >>> d.has_key('Index_err') False Note, the way to save a ComositeModel is a little different. >>> from uw.like.Models import SumModel,LogParabola >>> pl=PowerLaw() >>> lp=LogParabola() >>> c = SumModel(pl,lp) >>> s=spectrum_to_dict(c) >>> s.keys() ['spectrum', 'method', 'name'] >>> print s['method'] pointlike >>> len(s['spectrum']) 2 >>> s['spectrum'][0] == spectrum_to_dict(pl) True >>> s['spectrum'][1] == spectrum_to_dict(lp) True """ d = dict(name = model.name, method='pointlike') if isinstance(model,CompositeModel): d['spectrum'] = map(pointlike_spectrum_to_dict,model.models) return tolist(d) else: for p in model.param_names: d[p] = model[p] if errors: d['%s_err' % p] = model.error(p) for p in model.default_extra_params.keys(): d[p] = getattr(model,p) if d['name'] == 'FileFunction': d['file'] = model.file return tolist(d)
def diffuse_dict(like_or_roi): """ Save out all diffuse sources. """ f = dict() bgs = get_background(like_or_roi) for name in bgs: f[name] = name_to_spectral_dict(like_or_roi, name, errors=True) return tolist(f)
def todict(self): return tolist(dict( sed_points = self.sed_points, model0 = spectrum_to_dict(self.model0), model1 = spectrum_to_dict(self.comprehensive_model), TS_comp = self.TS_comp, ll_1 = self.ll_1, ll_0 = self.ll_0, ))
def todict(self): return tolist( dict( sed_points=self.sed_points, model0=spectrum_to_dict(self.model0), model1=spectrum_to_dict(self.comprehensive_model), TS_comp=self.TS_comp, ll_1=self.ll_1, ll_0=self.ll_0, ))
def find_offpeak(ft1,name,skydir, pwncat1phase, emax=100000): # First, find energy and radius that maximize H test. opt = OptimizePhases(ft1,skydir, emax=emax, verbose=True) print 'optimal energy=%s & radius=%s, h=%s' % (opt.optimal_emin,opt.optimal_radius,opt.optimal_h) # Get optimal phases phases = get_phases(ft1, skydir, opt.optimal_emin, emax, opt.optimal_radius) # compute bayesian blocks on the optimized list of phases off_peak_bb = OffPeakBB(phases) global results results=tolist( dict( name=name, pwncat1phase = pwncat1phase.tolist() if pwncat1phase is not None else None, off_peak_phase = off_peak_bb.off_peak.tolist(), blocks = off_peak_bb.blocks, optimal_emin = opt.optimal_emin, emax = emax, optimal_radius = opt.optimal_radius, ncpPrior=off_peak_bb.ncpPrior, actual_ncpPrior=off_peak_bb.actual_ncpPrior, ) ) yaml.dump(results,open('results_%s.yaml' % name,'w')) plot_phaseogram_blocks(ft1, repeat_phase=False, skydir = skydir, emin = opt.optimal_emin, emax = emax, radius = opt.optimal_radius, phase_range = off_peak_bb.off_peak, blocks_kwargs=dict(color='green'), phase_range_kwargs=dict(color='green', label='blocks'), data_kwargs=dict(color='red'), blocks = off_peak_bb.blocks) if pwncat1phase is not None: PhaseRange(pwncat1phase).axvspan(label='pwncat1', alpha=0.25, color='blue') P.legend() P.title(name) P.savefig('results_%s.pdf' % name) P.savefig('results_%s.png' % name)
def pointlike_sed_to_dict(bandflux, flux_units='erg', energy_units='MeV'): results = defaultdict(lambda: defaultdict(list)) ce = lambda e: units.convert(e, 'MeV', energy_units) cp = lambda e: units.convert(e, '1/MeV', '1/%s' % flux_units) results['Energy']['Units'] = energy_units results['dNdE']['Units'] = 'ph/cm^2/s/%s' % flux_units results['Significant'] = [] for r in bandflux.rec: results['Energy']['Lower'].append(ce(r.elow)) results['Energy']['Upper'].append(ce(r.ehigh)) results['Energy']['Value'].append(ce(np.sqrt(r.elow * r.ehigh))) # Undo scaling in the bandflux recarray fac = r.elow * r.ehigh * bandflux.scale_factor if r.flux > 0: results['Significant'].append(True) results['dNdE']['Value'].append(cp(r.flux / fac)) results['dNdE']['Average_Error'].append( cp((r.uflux / fac - r.lflux / fac) / 2)) results['dNdE']['Lower_Error'].append( cp((r.flux - r.lflux) / fac)) results['dNdE']['Upper_Error'].append( cp((r.uflux - r.flux) / fac)) results['dNdE']['Upper_Limit'].append(np.nan) else: results['Significant'].append(False) results['dNdE']['Value'].append(np.nan) results['dNdE']['Average_Error'].append(np.nan) results['dNdE']['Lower_Error'].append(np.nan) results['dNdE']['Upper_Error'].append(np.nan) results['dNdE']['Upper_Limit'].append(cp(r.uflux / fac)) return tolist(results)
def pointlike_source_dict(roi, name, emin=None, emax=None, flux_units='erg', energy_units='MeV', errors=True, covariance_matrix=True, save_TS=True, add_diffuse_dict=True, verbosity=True): d={} if emin is None and emax is None: emin, emax = get_full_energy_range(roi) old_quiet = roi.quiet; roi.quiet=True if save_TS: d['TS']=pointlike_ts_dict(roi,name) roi.quiet = old_quiet d['logLikelihood']=logLikelihood(roi) d['energy'] = energy_dict(emin=emin, emax=emax, energy_units=energy_units) d['flux']=flux_dict(roi, name, emin=emin, emax=emax, flux_units=flux_units, energy_units=energy_units, errors=errors) d['spectrum']= name_to_spectral_dict(roi, name, errors=errors, covariance_matrix=covariance_matrix) # Source position source = roi.get_source(name) d['position'] = skydirdict(source.skydir) if add_diffuse_dict: d['diffuse'] = diffuse_dict(roi) d['spatial_model'] = spatial_model_to_dict(source, roi, errors=errors) return tolist(d)
def _condense_results(self): # convert results to standard self.results dict get = lambda a, b: np.asarray([i[a][b] for i in self.raw_results]) get_units = lambda a, b: self.raw_results[0][a][b] get_limit = lambda a, b: np.asarray( [i[a][b] if i.has_key(a) else np.nan for i in self.raw_results]) self.results['Energy'] = dict(Lower=get('energy', 'emin'), Upper=get('energy', 'emax'), Value=get('energy', 'emiddle'), Units=get_units('energy', 'energy_units')) self.results['dNdE'] = dict( Value=get('prefactor', 'prefactor'), Average_Error=(get('prefactor', 'prefactor_lower_err') + get('prefactor', 'prefactor_upper_err')) / 2, Lower_Error=get('prefactor', 'prefactor_lower_err'), Upper_Error=get('prefactor', 'prefactor_upper_err'), Upper_Limit=get_limit('upper_limit', 'prefactor'), Units=get_units('prefactor', 'prefactor_units')) self.results['Ph_Flux'] = dict(Value=get('flux', 'flux'), Average_Error=get('flux', 'flux_err'), Upper_Limit=get_limit( 'upper_limit', 'flux'), Units=get_units('flux', 'flux_units')) self.results['En_Flux'] = dict(Value=get('flux', 'eflux'), Average_Error=get('flux', 'eflux_err'), Upper_Limit=get_limit( 'upper_limit', 'eflux'), Units=get_units('flux', 'eflux_units')) self.results['Test_Statistic'] = get('TS', 'reoptimize') self.results['Significant'] = get('TS', 'reoptimize') > self.min_ts self.results = tolist(self.results) if self.save_hesse_errors: self.results['dNdE']['HESS_Average_Error'] = get( 'prefactor', 'prefactor_err')
def spatial_model_to_dict(source, roi, errors=True): f = dict() if isinstance(source,ExtendedSource): # Extended Source parameters spatial_model = source.spatial_model for param in spatial_model.param_names: f[param]=spatial_model[param] if errors: f[param + '_err']=spatial_model.error(param) f['r68'] = spatial_model.r68() f['r99'] = spatial_model.r99() f['name'] = spatial_model.name if hasattr(source,'localization'): f['ellipse'] = source.localization else: # add elliptical error, if they exist. # N.B. If no localization performed, this will return # an empty dictionary. # N.B. This method will do the wrong thing if you have recently relocalized # another source. This is rarely the case. f['ellipse'] = roi.get_ellipse() return tolist(f)
def todict(self): d=dict(sigma=self.extension_list, TS_spectral=self.TS_spectral, TS_bandfits=self.TS_bandfits) return tolist(d)
def skydirdict(skydir): return tolist(dict( gal = [skydir.l(),skydir.b()], equ = [skydir.ra(),skydir.dec()]))
def todict(self): """ Pacakge up the results of the SED fit into a nice dictionary. """ return tolist(self.results)
f['eflux_err']=cef(like.energyFluxError(name,emin=emin,emax=emax)) except Exception, ex: print 'ERROR calculating flux error: ', ex traceback.print_exc(file=sys.stdout) f['flux_err']=-1 f['eflux_err']=-1 if include_prefactor: assert prefactor_energy is not None source = like.logLike.getSource(name) spectrum = source.spectrum() cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units) f['prefactor'] = cp(SpectrumPlotter.get_dnde_mev(spectrum,prefactor_energy)) f['prefactor_units'] = 'ph/cm^2/s/%s' % flux_units f['prefactor_energy'] = ce(prefactor_energy) return tolist(f) def gtlike_powerlaw_prefactor_dict(like, name, flux_units='erg', errors=True, minos_errors=False): cp = lambda e: units.convert(e,'1/MeV','1/%s' % flux_units) source = like.logLike.getSource(name) spectrum = source.spectrum() assert spectrum.genericName() == 'PowerLaw' pref = spectrum.getParam('Prefactor') scale = spectrum.getParam('Scale') d=dict() d['prefactor'] = cp(pref.getTrueValue()) if errors: d['prefactor_err'] = cp(pref.error()*pref.getScale()) if minos_errors: