def run(self): """Run the algorithm to compute the differential sensitivity as explained in the document of the class. """ # Creation of the spectral shape norm = 1 * u.Unit('cm-2 s-1 TeV-1') index = self.slope ref = 1 * u.TeV model = PowerLaw(index=index, amplitude=norm, reference=ref) # Get the bins in reconstructed energy reco_energy = self.irf.bkg.energy # Start the computation bkg_counts = self.get_bkg(self.irf.bkg) if self.random < 1: excess_counts = self.get_excess(bkg_counts) else: ex = self.get_excess(np.random.poisson(bkg_counts)) for ii in range(self.random - 1): ex += self.get_excess(np.random.poisson(bkg_counts)) excess_counts = ex / float(self.random) phi_0 = self.get_1TeV_differential_flux(excess_counts, model, self.irf.aeff, self.irf.rmf) energy = reco_energy.log_center() dnde_model = model.evaluate(energy=energy, index=index, amplitude=1, reference=ref) diff_flux = (phi_0 * dnde_model * energy ** 2).to('erg / (cm2 s)') self.energy = reco_energy.log_center() self.diff_sens = diff_flux
def setup(self): self.nbins = 30 binning = np.logspace(-1, 1, self.nbins + 1) * u.TeV self.source_model = PowerLaw(index=2, amplitude=1e5 / u.TeV, reference=0.1 * u.TeV) self.bkg_model = PowerLaw(index=3, amplitude=1e4 / u.TeV, reference=0.1 * u.TeV) self.alpha = 0.1 random_state = get_random_state(23) npred = self.source_model.integral(binning[:-1], binning[1:]) source_counts = random_state.poisson(npred) self.src = CountsSpectrum(energy_lo=binning[:-1], energy_hi=binning[1:], data=source_counts) # Currently it's necessary to specify a lifetime self.src.livetime = 1 * u.s npred_bkg = self.bkg_model.integral(binning[:-1], binning[1:]) bkg_counts = random_state.poisson(npred_bkg) off_counts = random_state.poisson(npred_bkg * 1.0 / self.alpha) self.bkg = CountsSpectrum(energy_lo=binning[:-1], energy_hi=binning[1:], data=bkg_counts) self.off = CountsSpectrum(energy_lo=binning[:-1], energy_hi=binning[1:], data=off_counts)
def test_absorption(): # absorption values for given redshift redshift = 0.117 absorption = Absorption.read_builtin("dominguez") # Spectral model corresponding to PKS 2155-304 (quiescent state) index = 3.53 amplitude = 1.81 * 1e-12 * u.Unit("cm-2 s-1 TeV-1") reference = 1 * u.TeV pwl = PowerLaw(index=index, amplitude=amplitude, reference=reference) # EBL + PWL model model = AbsorbedSpectralModel( spectral_model=pwl, absorption=absorption, parameter=redshift ) # Test if the absorption factor at the reference energy # corresponds to the ratio of the absorbed flux # divided by the flux of the spectral model kwargs = dict( index=index, amplitude=amplitude, reference=reference, redshift=redshift ) model_ref_energy = model.evaluate(energy=reference, **kwargs) pwl_ref_energy = pwl.evaluate( energy=reference, index=index, amplitude=amplitude, reference=reference ) desired = absorption.evaluate(energy=reference, parameter=redshift) actual = model_ref_energy / pwl_ref_energy assert_quantity_allclose(actual, desired)
def setup(self): self.nbins = 30 binning = np.logspace(-1, 1, self.nbins + 1) * u.TeV self.source_model = PowerLaw(index=2.1, amplitude=1e5 / u.TeV / u.s, reference=0.1 * u.TeV) self.livetime = 100 * u.s bkg_rate = np.ones(self.nbins) / u.s bkg_expected = bkg_rate * self.livetime self.bkg = CountsSpectrum(energy_lo=binning[:-1], energy_hi=binning[1:], data=bkg_expected) random_state = get_random_state(23) self.npred = (self.source_model.integral(binning[:-1], binning[1:]) * self.livetime) self.npred += bkg_expected source_counts = random_state.poisson(self.npred) self.src = CountsSpectrum(energy_lo=binning[:-1], energy_hi=binning[1:], data=source_counts) self.dataset = SpectrumDataset( model=self.source_model, counts=self.src, livetime=self.livetime, background=self.bkg, )
def run(self): """Run the algorithm to compute the differential sensitivity as explained in the document of the class. """ # Creation of the spectral shape norm = 1 * u.Unit('cm-2 s-1 TeV-1') index = self.slope ref = 1 * u.TeV model = PowerLaw(index=index, amplitude=norm, reference=ref) # Get the bins in reconstructed energy reco_energy = self.irf.bkg.energy # Start the computation bkg_counts = self.get_bkg(self.irf.bkg) if self.random < 1: excess_counts = self.get_excess(bkg_counts) else: ex = self.get_excess(np.random.poisson(bkg_counts)) for ii in range(self.random - 1): ex += self.get_excess(np.random.poisson(bkg_counts)) excess_counts = ex / float(self.random) phi_0 = self.get_1TeV_differential_flux(excess_counts, model, self.irf.aeff, self.irf.rmf) energy = reco_energy.log_center() dnde_model = model.evaluate(energy=energy, index=index, amplitude=1, reference=ref) diff_flux = (phi_0 * dnde_model * energy**2).to('erg / (cm2 s)') self.energy = reco_energy.log_center() self.diff_sens = diff_flux
def test_no_likelihood_contribution(): dataset = simulate_spectrum_dataset(PowerLaw()) dataset.model = PowerLaw() dataset.mask_safe = np.zeros(dataset.data_shape, dtype=bool) fpe = FluxPointsEstimator([dataset], e_edges=[1, 10] * u.TeV) with pytest.raises(ValueError) as excinfo: fpe.run() assert "No dataset contributes" in str(excinfo.value)
class TestSimpleFit: """Test fit on counts spectra without any IRFs""" def setup(self): self.nbins = 30 binning = np.logspace(-1, 1, self.nbins + 1) * u.TeV self.source_model = PowerLaw(index=2, amplitude=1e5 / u.TeV, reference=0.1 * u.TeV) self.bkg_model = PowerLaw(index=3, amplitude=1e4 / u.TeV, reference=0.1 * u.TeV) self.alpha = 0.1 random_state = get_random_state(23) npred = self.source_model.integral(binning[:-1], binning[1:]) source_counts = random_state.poisson(npred) self.src = CountsSpectrum(energy_lo=binning[:-1], energy_hi=binning[1:], data=source_counts) # Currently it's necessary to specify a lifetime self.src.livetime = 1 * u.s npred_bkg = self.bkg_model.integral(binning[:-1], binning[1:]) bkg_counts = random_state.poisson(npred_bkg) off_counts = random_state.poisson(npred_bkg * 1.0 / self.alpha) self.bkg = CountsSpectrum(energy_lo=binning[:-1], energy_hi=binning[1:], data=bkg_counts) self.off = CountsSpectrum(energy_lo=binning[:-1], energy_hi=binning[1:], data=off_counts) def test_wstat(self): """WStat with on source and background spectrum""" on_vector = self.src.copy() on_vector.data += self.bkg.data obs = SpectrumDatasetOnOff( counts=on_vector, counts_off=self.off, acceptance=1, acceptance_off=1 / self.alpha, ) obs.model = self.source_model self.source_model.parameters.index = 1.12 fit = Fit(obs) result = fit.run() pars = self.source_model.parameters assert_allclose(pars["index"].value, 1.997342, rtol=1e-3) assert_allclose(pars["amplitude"].value, 100245.187067, rtol=1e-3) assert_allclose(result.total_stat, 30.022316, rtol=1e-3)
def test_integrate_spectrum(): """ Test numerical integration against analytical solution. """ emin = Quantity(1, "TeV") emax = Quantity(10, "TeV") pwl = PowerLaw(index=2.3) ref = pwl.integral(emin=emin, emax=emax) val = integrate_spectrum(pwl, emin, emax) assert_quantity_allclose(val, ref)
def make_image(self, exposures, spectrum=None): """Make a 2D PSF from a PSF kernel. The PSF Kernel is first weighed with a spectrum and an array of exposures. The PSF is also normalised after summation. Parameters ---------- exposures : `~numpy.ndarray` An array of exposures for the same true energies as the PSF kernel spectrum : `~gammapy.spectrum.models.SpectralModel` Spectral model to compute the weights. Default is power-law with spectral index of 2. Returns ------- psf2D : `~gammapy.maps.Map` Weighted 2D psf """ if spectrum is None: spectrum = PowerLaw(index=2.0) energy_axis = self.psf_kernel_map.geom.get_axis_by_name("energy") energy_width = np.diff(energy_axis.edges) weights = spectrum(energy_axis.center) * energy_width * exposures weights /= weights.sum() psf_weighted = self.psf_kernel_map.copy() for img, idx in psf_weighted.iter_by_image(): img *= weights[idx].value psf2D = psf_weighted.sum_over_axes() psf2D.data = psf2D.data / psf2D.data.sum() return PSFKernel(psf2D)
def test_fake(self): """Test the fake dataset""" source_model = PowerLaw() dataset = SpectrumDatasetOnOff( counts=self.on_counts, counts_off=self.off_counts, model=source_model, aeff=self.aeff, livetime=self.livetime, edisp=self.edisp, acceptance=1, acceptance_off=10, ) real_dataset = dataset.copy() # Define background model counts elo = self.on_counts.energy.edges[:-1] ehi = self.on_counts.energy.edges[1:] data = np.ones(self.on_counts.data.shape) background_model = CountsSpectrum(elo, ehi, data) dataset.fake(background_model=background_model, random_state=314) assert real_dataset.counts.data.shape == dataset.counts.data.shape assert real_dataset.counts_off.data.shape == dataset.counts_off.data.shape assert (real_dataset.counts.energy.center.mean() == dataset.counts.energy.center.mean()) assert real_dataset.acceptance.mean() == dataset.acceptance.mean() assert real_dataset.acceptance_off.mean( ) == dataset.acceptance_off.mean() assert dataset.counts_off.data.sum() == 39 assert dataset.counts.data.sum() == 5
def test_power_law(): model = PowerLaw( amplitude=Q(1e-11, 'cm^-2 s^-1 TeV^-1'), reference=Q(1, 'TeV'), index=2, ) test_model(model)
def simulate_map_dataset(random_state=0): irfs = load_cta_irfs( "$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits" ) skydir = SkyCoord("0 deg", "0 deg", frame="galactic") edges = np.logspace(-1, 2, 15) * u.TeV energy_axis = MapAxis.from_edges(edges=edges, name="energy") geom = WcsGeom.create(skydir=skydir, width=(4, 4), binsz=0.1, axes=[energy_axis], coordsys="GAL") gauss = SkyGaussian("0 deg", "0 deg", "0.4 deg", frame="galactic") pwl = PowerLaw(amplitude="1e-11 cm-2 s-1 TeV-1") skymodel = SkyModel(spatial_model=gauss, spectral_model=pwl, name="source") dataset = simulate_dataset( skymodel=skymodel, geom=geom, pointing=skydir, irfs=irfs, random_state=random_state, ) return dataset
def get_npred_map(): position = SkyCoord(0.0, 0.0, frame="galactic", unit="deg") energy_axis = MapAxis.from_bounds(1, 100, nbin=30, unit="TeV", name="energy", interp="log") exposure = Map.create( binsz=0.02, map_type="wcs", skydir=position, width="5 deg", axes=[energy_axis], coordsys="GAL", unit="cm2 s", ) spatial_model = SkyGaussian("0 deg", "0 deg", sigma="0.2 deg") spectral_model = PowerLaw(amplitude="1e-11 cm-2 s-1 TeV-1") skymodel = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model) exposure.data = 1e14 * np.ones(exposure.data.shape) evaluator = MapEvaluator(model=skymodel, exposure=exposure) npred = evaluator.compute_npred() return npred
def get_model_gammapy(config): if config['model']['template'] == 'Shell2D': spatial_model = Shell2D( amplitude=1, x_0=config['model']['ra'], y_0=config['model']['dec'], r_in=config['model']['rin'], width=config['model']['width'], # Note: for now we need spatial models that are normalised # to integrate to 1 or results will be incorrect!!! normed=True, ) if config['model']['template'] == 'Sphere2D': spatial_model = Sphere2D( amplitude=1, x_0=config['model']['ra'], y_0=config['model']['dec'], r_0=config['model']['rad'], # Note: for now we need spatial models that are normalised # to integrate to 1 or results will be incorrect!!! normed=True, ) if config['model']['template'] == 'Gauss2D': spatial_model = Gauss2DPDF( #amplitude=1, #x_0=config['model']['ra'], #y_0=config['model']['dec'], sigma=config['model']['sigma'], # Note: for now we need spatial models that are normalised # to integrate to 1 or results will be incorrect!!! #normed=True, ) if config['model']['spectrum'] == 'pl': spectral_model = PowerLaw( amplitude=config['model']['prefactor'] * u.Unit('cm-2 s-1 TeV-1'), index=config['model']['index'], reference=config['model']['pivot_energy'] * u.Unit('TeV'), ) if config['model']['spectrum'] == 'ecpl': spectral_model = ExponentialCutoffPowerLaw( amplitude=config['model']['prefactor'] * u.Unit('cm-2 s-1 TeV-1'), index=config['model']['index'], reference=config['model']['pivot_energy'] * u.Unit('TeV'), lambda_=config['model']['cutoff'] * u.Unit('TeV-1'), ) if config['model']['spectrum'] == 'LogParabola': spectral_model = LogParabola( amplitude=config['model']['prefactor'] * u.Unit('cm-2 s-1 TeV-1'), alpha=config['model']['alphapar'], beta=config['model']['beta'], reference=config['model']['pivot_energy'] * u.Unit('TeV'), ) return CombinedModel3D( spatial_model=spatial_model, spectral_model=spectral_model, )
def spectral_model(self): """Best fit spectral model (`~gammapy.spectrum.models.SpectralModel`).""" d = self.data spec_type = self.data["SpectrumType"].strip() pars, errs = {}, {} pars["amplitude"] = d["Flux_Density"] errs["amplitude"] = d["Unc_Flux_Density"] pars["reference"] = d["Pivot_Energy"] if spec_type == "PowerLaw": pars["index"] = d["PowerLaw_Index"] errs["index"] = d["Unc_PowerLaw_Index"] model = PowerLaw(**pars) elif spec_type == "LogParabola": pars["alpha"] = d["Spectral_Index"] pars["beta"] = d["beta"] errs["alpha"] = d["Unc_Spectral_Index"] errs["beta"] = d["Unc_beta"] model = LogParabola(**pars) else: raise ValueError("Invalid spec_type: {!r}".format(spec_type)) model.parameters.set_parameter_errors(errs) return model
def sky_model(): spatial_model = SkyGaussian(lon_0="0.2 deg", lat_0="0.1 deg", sigma="0.2 deg") spectral_model = PowerLaw(index=3, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV") return SkyModel(spatial_model=spatial_model, spectral_model=spectral_model)
def test_pwl_pivot_energy(): pwl = PowerLaw(amplitude="5.35510540e-11 TeV-1 cm-1 s-1") pwl.parameters.covariance = np.array( [[0.0318377 ** 2, 6.56889442e-14, 0], [6.56889442e-14, 0, 0], [0, 0, 0]] ) assert_quantity_allclose(pwl.pivot_energy, 3.3540034240210987 * u.TeV)
def get_test_cases(): e_true = Quantity(np.logspace(-1, 2, 120), "TeV") e_reco = Quantity(np.logspace(-1, 2, 100), "TeV") return [ dict(model=PowerLaw(amplitude="1e2 TeV-1"), e_true=e_true, npred=999), dict( model=PowerLaw2(amplitude="1", emin="0.1 TeV", emax="100 TeV"), e_true=e_true, npred=1, ), dict( model=PowerLaw(amplitude="1e-11 TeV-1 cm-2 s-1"), aeff=EffectiveAreaTable.from_parametrization(e_true), livetime="10 h", npred=1448.05960, ), dict( model=PowerLaw(reference="1 GeV", amplitude="1e-11 GeV-1 cm-2 s-1"), aeff=EffectiveAreaTable.from_parametrization(e_true), livetime="30 h", npred=4.34417881, ), dict( model=PowerLaw(amplitude="1e-11 TeV-1 cm-2 s-1"), aeff=EffectiveAreaTable.from_parametrization(e_true), edisp=EnergyDispersion.from_gauss(e_reco=e_reco, e_true=e_true, bias=0, sigma=0.2), livetime="10 h", npred=1437.450076, ), dict( model=TableModel( energy=[0.1, 0.2, 0.3, 0.4] * u.TeV, values=[4.0, 3.0, 1.0, 0.1] * u.Unit("TeV-1"), ), e_true=[0.1, 0.2, 0.3, 0.4] * u.TeV, npred=0.554513062, ), ]
def table_model(): energy_edges = energy_logspace(0.1 * u.TeV, 100 * u.TeV, 1000) energy = np.sqrt(energy_edges[:-1] * energy_edges[1:]) index = 2.3 * u.Unit("") amplitude = 4 / u.cm ** 2 / u.s / u.TeV reference = 1 * u.TeV pl = PowerLaw(index, amplitude, reference) flux = pl(energy) return TableModel(energy, flux, 1 * u.Unit(""))
def test_sky_model_init(): with pytest.raises(ValueError) as excinfo: spatial_model = SkyGaussian("0 deg", "0 deg", "0.1 deg") _ = SkyModel(spectral_model=1234, spatial_model=spatial_model) assert "Spectral model" in str(excinfo.value) with pytest.raises(ValueError) as excinfo: _ = SkyModel(spectral_model=PowerLaw(), spatial_model=1234) assert "Spatial model" in str(excinfo.value)
def test_pwl_index_2_error(): pars, errs = {}, {} pars["amplitude"] = 1e-12 * u.Unit("TeV-1 cm-2 s-1") pars["reference"] = 1 * u.Unit("TeV") pars["index"] = 2 * u.Unit("") errs["amplitude"] = 0.1e-12 * u.Unit("TeV-1 cm-2 s-1") pwl = PowerLaw(**pars) pwl.parameters.set_parameter_errors(errs) val, val_err = pwl.evaluate_error(1 * u.TeV) assert_quantity_allclose(val, 1e-12 * u.Unit("TeV-1 cm-2 s-1")) assert_quantity_allclose(val_err, 0.1e-12 * u.Unit("TeV-1 cm-2 s-1")) flux, flux_err = pwl.integral_error(1 * u.TeV, 10 * u.TeV) assert_quantity_allclose(flux, 9e-13 * u.Unit("cm-2 s-1")) assert_quantity_allclose(flux_err, 9e-14 * u.Unit("cm-2 s-1")) eflux, eflux_err = pwl.energy_flux_error(1 * u.TeV, 10 * u.TeV) assert_quantity_allclose(eflux, 2.302585e-12 * u.Unit("TeV cm-2 s-1")) assert_quantity_allclose(eflux_err, 0.2302585e-12 * u.Unit("TeV cm-2 s-1"))
def _get_spec_model(self, data): from uncertainties import ufloat spec_type = data['spec_type'] # TODO: what about systematic errors? index = ufloat(data['spec_index'], data['spec_index_err']) amplitude = ufloat(data['spec_norm'], data['spec_norm_err']) reference = data['spec_ref'] if spec_type == 'pl': model = PowerLaw(index, amplitude, reference) elif spec_type == 'pl2': model = PowerLaw2(amplitude, index, reference, 1E10) elif spec_type == 'ecpl': lambda_ = 1. / ufloat(data['spec_ecut'], data['spec_ecut_err']) model = ExponentialCutoffPowerLaw(index, amplitude, reference, lambda_) else: # return generic model, as all parameters are NaN it will # evaluate to NaN model = PowerLaw(index, amplitude, reference) return model
def test_verify_npred(self): """Veryfing npred is preserved during the stacking""" pwl = PowerLaw(index=2, amplitude=2e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV) self.obs_stacker.stacked_obs.model = pwl npred_stacked = self.obs_stacker.stacked_obs.npred().data npred_summed = np.zeros_like(npred_stacked) for obs in self.obs_list: obs.model = pwl npred_summed[obs.mask_safe] += obs.npred().data[obs.mask_safe] assert_allclose(npred_stacked, npred_summed)
def test_plot_fit(self): model = PowerLaw() dataset = SpectrumDatasetOnOff( counts=self.on_counts, counts_off=self.off_counts, model=model, aeff=self.aeff, livetime=self.livetime, edisp=self.edisp, acceptance=1, acceptance_off=10, ) with mpl_plot_check(): dataset.plot_fit()
def test_str(self): model = PowerLaw() dataset = SpectrumDatasetOnOff( counts=self.on_counts, counts_off=self.off_counts, model=model, aeff=self.aeff, livetime=self.livetime, edisp=self.edisp, acceptance=1, acceptance_off=10, ) assert "SpectrumDatasetOnOff" in str(dataset) assert "wstat" in str(dataset)
def calc_bk(self, lon0, lat0, sig, amp): """ returns the computed b_k and the diffuse model template. """ # Define sky model to fit the data ind = 2.0 spatial_model = SkyGaussian(lon_0=lon0, lat_0=lat0, sigma=sig) spectral_model = PowerLaw(index=ind, amplitude=amp, reference="1 TeV") model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model) # For simulations, we can have the same npred map b_k = [] Sk_list = [] for count, bkg, exp in zip(self.count_list, self.background_list, self.exposure_list): evaluator = MapEvaluator(model=model, exposure=exp) npred = evaluator.compute_npred() geom = exp.geom diffuse_map = WcsNDMap(geom, npred) #This is Sk Bk = bkg.data Sk = diffuse_map.data Nk = count.data not_has_exposure = ~(exp.data > 0) not_has_bkg = ~(Bk > 0) S_B = np.divide(Sk, Bk) S_B[not_has_exposure] = 0.0 S_B[not_has_bkg] = 0.0 #Sk is nan for large sep.. to be investigated. temp soln #if np.isnan(np.sum(S_B)): # S_B=np.zeros(S_B.shape) delta = np.power(np.sum(Nk) / np.sum(Bk), 2.0) - 4.0 * np.sum(S_B) / np.sum(Bk) #print(np.sum(Nk),np.sum(Bk),np.sum(Sk),np.sum(S_B), delta) #print("delta is %f for obs no %s",delta,k) #bk1=(np.sum(Nk)/np.sum(Bk) - np.sqrt(delta))/2.0 bk2 = (np.sum(Nk) / np.sum(Bk) + np.sqrt(delta)) / 2.0 b_k.append(bk2) Sk_list.append(diffuse_map) return Sk_list, b_k
def _get_spectral_model(self, idx): pars, errs = {}, {} data = self.data label = "spec{}_".format(idx) pars["amplitude"] = data[label + "dnde"] errs["amplitude"] = data[label + "dnde_err"] pars["index"] = data[label + "index"] errs["index"] = data[label + "index_err"] pars["reference"] = "7 TeV" model = PowerLaw(**pars) model.parameters.set_parameter_errors(errs) return model
def get_spectrum_datasets(): model = PowerLaw() dataset_1 = simulate_spectrum_dataset(model=model, random_state=0) dataset_1.counts.meta = { "t_start": Time("2010-01-01T00:00:00"), "t_stop": Time("2010-01-01T01:00:00"), } dataset_2 = simulate_spectrum_dataset(model, random_state=1) dataset_2.counts.meta = { "t_start": Time("2010-01-01T01:00:00"), "t_stop": Time("2010-01-01T02:00:00"), } return [dataset_1, dataset_2]
def get_sky_model(): spatial_model = SkyGaussian2D( lon_0='0.2 deg', lat_0='0.1 deg', sigma='0.2 deg', ) spectral_model = PowerLaw( index=3, amplitude='1e-11 cm-2 s-1 TeV-1', reference='1 TeV', ) return SkyModel( spatial_model=spatial_model, spectral_model=spectral_model, )
def simulate_spectrum_dataset(model, random_state=0): energy = np.logspace(-0.5, 1.5, 21) * u.TeV aeff = EffectiveAreaTable.from_parametrization(energy=energy) bkg_model = PowerLaw(index=2.5, amplitude="1e-12 cm-2 s-1 TeV-1") dataset = SpectrumDatasetOnOff(aeff=aeff, model=model, livetime=100 * u.h, acceptance=1, acceptance_off=5) eval = SpectrumEvaluator(model=bkg_model, aeff=aeff, livetime=100 * u.h) bkg_model = eval.compute_npred() dataset.fake(random_state=random_state, background_model=bkg_model) return dataset
def setup(self): path = "$GAMMAPY_DATA/joint-crab/spectra/hess/" obs1 = SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23523.fits") obs2 = SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23592.fits") self.obs_list = [obs1, obs2] self.pwl = PowerLaw(index=2, amplitude=1e-12 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV) self.ecpl = ExponentialCutoffPowerLaw( index=2, amplitude=1e-12 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV, lambda_=0.1 / u.TeV, )