def __init__(self, roi, which, **kwargs): """ Compute an upper limit on the source extension, by the "PDG Method". """ keyword_options.process(self, kwargs) self.roi = roi self.which = which self.init_ts = roi.TS(which, quick=False) if self.init_ts < 4: # Bunt on extension upper limits for completely insignificant sources print 'Unable to compute extension upper limit for point-like source with too-small TS' self.extension_limit = None else: if not isinstance(self.spatial_model, type): raise Exception( "The spatial model bust be a type, like Gaussian, not an instance, like Gaussian()" ) # Note, since the input is the class, not the instance, the # position parmaeters have not yet been added on. n = self.spatial_model.param_names assert len(n) == 1 and n[0] == 'Sigma' self.saved_state = PointlikeState(roi) self.spatial_low_lim, self.spatial_hi_lim = self.spatial_model.default_limits[ 0] results = self._compute() self.saved_state.restore()
def _compute(self): roi = self.roi name = self.name model = roi.get_model(name) saved_state = PointlikeState(roi) try: ful = FluxUpperLimit(roi=roi, which=name, confidence=self.cl, simps_points=self.simps_points, verbosity=self.verbosity) model = ful.upper_limit_model self.results = pointlike_model_to_flux(model, emin=self.emin, emax=self.emax, flux_units=self.flux_units, energy_units=self.energy_units, errors=False, include_prefactor=self.include_prefactor, prefactor_energy=self.prefactor_energy, ) self.results['confidence'] = self.cl self.results['spectrum'] = spectrum_to_dict(model) except Exception, ex: print 'ERROR pointlike upper limit: ', ex traceback.print_exc(file=sys.stdout) self.results = None
def _compute(self): roi = self.roi which = self.which state = PointlikeState(roi) ll_0 = roi.logLikelihood(roi.parameters()) source = roi.get_source(which) if self.verbosity: print 'Computing upper limit for source %s with %s spectral model' % ( source.name, source.model.name) if not hasattr(source, 'model'): raise Exception( "upper_limit can only calculate upper limits of point and extended sources." ) model = source.model integral_min, integral_max = self.get_integration_range(model) if self.verbosity: print 'For source %s, setting integration range from' % model.name print ' * integration minimum = :', integral_min print ' * integration maximum = :', integral_max # Unbound flux temporarily to avoid parameter limits model.set_mapper(0, LinearMapper) def like(norm): model.setp(0, norm) return np.exp(ll_0 - roi.logLikelihood(roi.parameters())) npoints = int( math.ceil(self.simps_points * (np.log10(integral_max) - np.log10(integral_min)))) points = np.logspace(np.log10(integral_min), np.log10(integral_max), npoints * 2 + 1) y = np.array([like(x) * x for x in points]) trapz1 = integrate.cumtrapz(y[::2]) trapz2 = integrate.cumtrapz(y)[::2] cumsimps = (4 * trapz2 - trapz1) / 3. cumsimps /= cumsimps[-1] i1 = np.where(cumsimps < .95)[0][-1] i2 = np.where(cumsimps > .95)[0][0] x1, x2 = points[::2][i1], points[::2][i2] y1, y2 = cumsimps[i1], cumsimps[i2] #Linear interpolation should be good enough at this point limit = x1 + ((x2 - x1) / (y2 - y1)) * (self.confidence - y1) model.setp(0, limit) self.uflux = model.i_flux(**self.flux_kwargs) self.upper_limit_model = model.copy() state.restore(just_spectra=True)
def __init__(self, roi, which, model0, model1, **kwargs): keyword_options.process(self, kwargs) state = PointlikeState(roi) self.roi = roi self.which = which self.model0 = model0.copy() self.model1 = model1.copy() self.compute() state.restore()
def _compute(self): if self.verbosity: print 'calculating pointlike cutoff upper limit' roi = self.roi name = self.name saved_state = PointlikeState(roi) cutoff_model = PLSuperExpCutoff(Index=self.Index, Cutoff=self.Cutoff, b=self.b) roi.modify(which=name, model=cutoff_model, keep_old_flux=True) super(PointlikeCutoffUpperLimit,self)._compute() saved_state.restore(just_spectra=True)
def fill(self): roi = self.roi state = PointlikeState(roi) if not roi.quiet: print 'Calculating extension profile for %s' % self.source.name init_p = roi.get_parameters().copy() # Keep the TS function quiet old_quiet = roi.quiet roi.quiet=True sigma = self.spatial_model['sigma'] sigma_err = self.spatial_model.error('sigma') upper_limit = min(sigma + max(3*sigma_err,sigma),3) if self.upper_limit is None else self.upper_limit # make the bottom point ~ 0.1xfirst point lower_limit = float(upper_limit)/self.num_points/10.0 if self.lower_limit is None else self.lower_limit self.extension_list=np.linspace(lower_limit,upper_limit,self.num_points) self.TS_spectral=np.empty_like(self.extension_list) self.TS_bandfits=np.empty_like(self.extension_list) roi.setup_energy_bands() if not old_quiet: print '%20s %20s %20s' % ('sigma','TS_spectral','TS_bandfits') for i,sigma in enumerate(self.extension_list): roi.modify(which=self.which, sigma=sigma) roi.fit(**self.fit_kwargs) params=roi.parameters() ll_a=-1*roi.logLikelihood(roi.parameters()) roi.update_counts(init_p) roi.fit(**self.fit_kwargs) ll_b=-1*roi.logLikelihood(roi.parameters()) if ll_a > ll_b: roi.update_counts(params) self.TS_spectral[i]=roi.TS(**self.ts_kwargs) self.TS_bandfits[i]=roi.TS(bandfits=True,**self.ts_kwargs) if not old_quiet: print 'sigma=%.2f ts_spec=%.1f, ts_band=%.1f' % (sigma, self.TS_spectral[i],self.TS_bandfits[i]) state.restore()
def _calculate(self): self.results = dict() self.init_state = PointlikeState(self.roi) model = self.roi.get_model(self.name) init_mapper = model.get_mapper(self.param_name) param_val = model[self.param_name] assert param_val >= self.param_min and param_val <= self.param_max if isinstance(init_mapper, LimitMapper): assert init_mapper.min <= self.param_min and init_mapper.max >= self.param_max model.set_mapper( self.param_name, LimitMapper(self.param_min, self.param_max, scale=param_val)) self.roi.modify(which=self.name, model=model) if self.verbosity: print 'Before fit' self.roi.print_summary() self.results['fit_before'] = source_dict( self.roi, self.name, energy_units=self.energy_units, flux_units=self.flux_units) self.roi.fit(**self.fit_kwargs) if self.verbosity: print 'After fit' self.roi.print_summary() self.results['fit_after'] = source_dict(self.roi, self.name, energy_units=self.energy_units, flux_units=self.flux_units) if self.keep_best: model = self.roi.get_model(self.name) model.set_mapper(self.param_name, init_mapper) else: self.init_state.restore(just_spectra=True)
def __init__(self, roi, name, *args, **kwargs): self.roi = roi keyword_options.process(self, kwargs) self.pointlike_fit_kwargs = dict(use_gradient=False) self.name = name self._setup_savedir() self._setup_time_bins() saved_state = PointlikeState(roi) self._test_variability() saved_state.restore()
def _compute(self): if self.verbosity: print 'Calculating pointlike upper limit' roi = self.roi name = self.name saved_state = PointlikeState(roi) """ Note keep old flux, because it is important to have the spectral model pushed into the upper_limit code reasonably close to the best fit flux. This is because initial likelihood (ll_0) is used to scale the likelihood so it has to be reasonably close to the best value. """ model = PowerLaw(index=self.powerlaw_index) roi.modify(which=name, model=model, keep_old_flux=True) super(PointlikePowerLawUpperLimit,self)._compute() saved_state.restore(just_spectra=True)
def __init__(self, roi, bin_edges, nrows=1, grid_kwargs=dict(), **kwargs): default_grid_kwargs = dict(axes_pad=0.1, cbar_location="top", cbar_mode="each", cbar_size="7%", cbar_pad="2%") self.grid_kwargs = default_grid_kwargs.copy() self.grid_kwargs.update(grid_kwargs) self.roi = roi keyword_options.process(self, kwargs) self.nrows = nrows self.bin_edges = bin_edges self.nplots = len(self.bin_edges) - 1 self.ncols = int(math.ceil(float(self.nplots) / self.nrows)) for e in bin_edges: if not np.any(np.abs(e - roi.bin_edges) < 0.5): raise Exception( "Energy %.1f inconsistent with ROI energy binning." % e) self.lower_energies = bin_edges[:-1] self.upper_energies = bin_edges[1:] state = PointlikeState(roi) # step 1, test consistentcy of each energy with binning in pointlike kwargs['title'] = '' # dont title the subplots self.maps = [] for i, (lower, upper) in enumerate( zip(self.lower_energies, self.upper_energies)): roi.change_binning(fit_emin=lower, fit_emax=upper) self.maps.append(self.object(roi, **kwargs)) state.restore()
def _calculate(self): roi = self.roi name = self.name param_name = self.param_name self.init_state = PointlikeState(roi) self.results = dict(name=name, param_name=param_name, param_vals=self.param_vals, grid=[]) if self.verbosity: print 'Performing grid over parameter %s for source %s' % ( name, param_name) best_state = None self.best_ll = -np.inf model = roi.get_model(which=name) old_free = model.get_free(param_name) for i, p in enumerate(self.param_vals): if self.verbosity: print 'looping for param %s=%s (%d/%d)' % ( param_name, p, i + 1, len(self.param_vals)) self.init_state.restore(just_spectra=True) model = roi.get_model(which=name) model[param_name] = p model.set_free(param_name, False) roi.modify(which=name, model=model, keep_old_flux=False) if self.verbosity: roi.print_summary() roi.fit(**self.fit_kwargs) if self.verbosity: roi.print_summary() d = source_dict(roi, name, energy_units=self.energy_units, flux_units=self.flux_units) self.results['grid'].append(d) ll = self.results['grid'][-1]['logLikelihood'] if ll > self.best_ll: self.best_state = PointlikeState(roi) self.best_ll = ll self.best_d = d self.results['best'] = self.best_d if self.keep_best: self.best_state.restore(just_spectra=True) model = roi.get_model(which=name) model.set_free(param_name, old_free) roi.modify(which=name, model=model, keep_old_flux=False) else: self.init_state.restore(just_spectra=True)
def _calculate(self): roi = self.roi name = self.name if self.verbosity: print 'Testing cutoff in pointlike' emin, emax = get_full_energy_range(roi) self.results = d = dict(energy=energy_dict( emin=emin, emax=emax, energy_units=self.energy_units)) saved_state = PointlikeState(roi) old_flux = roi.get_model(name).i_flux(emin, emax) if not isinstance(roi.get_model(name), PowerLaw): powerlaw_model = PowerLaw(norm=1e-11, index=2, e0=np.sqrt(emin * emax)) powerlaw_model.set_mapper('Index', PowerLaw.default_limits['Index']) powerlaw_model.set_flux(old_flux, emin=emin, emax=emax) if self.verbosity: print "powerlaw_model is ", powerlaw_model roi.modify(which=name, model=powerlaw_model, keep_old_flux=False) fit = lambda: roi.fit(**self.fit_kwargs) def ts(): old_quiet = roi.quiet roi.quiet = True ts = roi.TS(name, quick=False) roi.quiet = old_quiet return ts spectrum = lambda: spectrum_to_dict(roi.get_model(name), errors=True) if self.verbosity: print 'About to fit powerlaw_model' roi.print_summary() fit() if self.verbosity: print 'Done fitting powerlaw_model' roi.print_summary() d['hypothesis_0'] = source_dict(roi, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) if self.cutoff_model is not None: pass else: self.cutoff_model = PLSuperExpCutoff(norm=1e-9, index=1, cutoff=1000, e0=1000, b=1) # Note, don't limit the normalization parameter for p in ['Index', 'Cutoff', 'b']: self.cutoff_model.set_mapper( p, PLSuperExpCutoff.default_limits[p]) self.cutoff_model.set_free('b', False) self.cutoff_model.set_flux(old_flux, emin=emin, emax=emax) if self.verbosity: print "cutoff_model is ", self.cutoff_model roi.modify(which=name, model=self.cutoff_model, keep_old_flux=False) if self.verbosity: print 'About to fit cutoff_model' roi.print_summary() fit() ll = -roi.logLikelihood(roi.parameters()) if ll < d['hypothesis_0']['logLikelihood']: # if fit is worse than PowerLaw fit, then # restart fit with parameters almost # equal to best fit powerlaw self.cutoff_plaw = PLSuperExpCutoff(b=1) self.cutoff_plaw.set_free('b', False) self.cutoff_plaw.setp('norm', d['hypothesis_0']['spectrum']['Norm']) self.cutoff_plaw.setp('index', d['hypothesis_0']['spectrum']['Index']) self.cutoff_plaw.setp('e0', d['hypothesis_0']['spectrum']['e0']) self.cutoff_plaw.setp('cutoff', 1e6) roi.modify(which=name, model=self.cutoff_plaw, keep_old_flux=False) fit() if self.verbosity: print 'Redoing fit with cutoff same as plaw' print 'Before:' roi.print_summary() print fit() if self.verbosity: print 'Done fitting cutoff_model' roi.print_summary() d['hypothesis_1'] = source_dict(roi, name, emin=emin, emax=emax, flux_units=self.flux_units, energy_units=self.energy_units, verbosity=self.verbosity) d['TS_cutoff'] = d['hypothesis_1']['TS']['noquick'] - d[ 'hypothesis_0']['TS']['noquick'] saved_state.restore()