def test_stat_surface(): dataset = MyDataset() fit = Fit() fit.run([dataset]) x_values = [1, 2, 3] y_values = [2e2, 3e2, 4e2] result = fit.stat_surface(datasets=[dataset], x="x", y="y", x_values=x_values, y_values=y_values) assert_allclose(result["x_scan"], x_values, atol=1e-7) assert_allclose(result["y_scan"], y_values, atol=1e-7) expected_stat = [ [1.0001e04, 1.0000e00, 1.0001e04], [1.0000e04, 0.0000e00, 1.0000e04], [1.0001e04, 1.0000e00, 1.0001e04], ] assert_allclose(list(result["stat_scan"]), expected_stat, atol=1e-7) assert len(result["fit_results"]) == 0 # Check that original value state wasn't changed assert_allclose(dataset.models.parameters["x"].value, 2) assert_allclose(dataset.models.parameters["y"].value, 3e2)
def test_stat_surface_reoptimize(): dataset = MyDataset() fit = Fit() fit.run([dataset]) dataset.models.parameters["z"].value = 0 x_values = [1, 2, 3] y_values = [2e2, 3e2, 4e2] result = fit.stat_surface(datasets=[dataset], x="x", y="y", x_values=x_values, y_values=y_values, reoptimize=True) assert_allclose(result["x_scan"], x_values, atol=1e-7) assert_allclose(result["y_scan"], y_values, atol=1e-7) expected_stat = [ [1.0001e04, 1.0000e00, 1.0001e04], [1.0000e04, 0.0000e00, 1.0000e04], [1.0001e04, 1.0000e00, 1.0001e04], ] assert_allclose(list(result["stat_scan"]), expected_stat, atol=1e-7) assert_allclose(result["fit_results"][0][0].total_stat, result["stat_scan"][0][0], atol=1e-7)
def test_ecpl_fit(self): self.set_model(self.ecpl) fit = Fit() fit.run([self.datasets[0]]) actual = self.datasets.parameters["lambda_"].quantity assert actual.unit == "TeV-1" assert_allclose(actual.value, 0.145215, rtol=1e-2)
def test_no_edisp(self): dataset = self.datasets[0].copy() dataset.edisp = None dataset.models = self.pwl fit = Fit() fit.run(datasets=[dataset]) assert_allclose(self.pwl.spectral_model.index.value, 2.7961, atol=0.02)
def test_joint_fit(self): self.set_model(self.pwl) fit = Fit() fit.run(self.datasets) actual = self.datasets.parameters["index"].value assert_allclose(actual, 2.7806, rtol=1e-3) actual = self.datasets.parameters["amplitude"].quantity assert actual.unit == "cm-2 s-1 TeV-1" assert_allclose(actual.value, 5.200e-11, rtol=1e-3)
def test_stat_profile_reoptimize(): dataset = MyDataset() fit = Fit([dataset]) fit.run() dataset.models.parameters["y"].value = 0 result = fit.stat_profile("x", nvalues=3, reoptimize=True) assert_allclose(result["values"], [0, 2, 4], atol=1e-7) assert_allclose(result["stat"], [4, 0, 4], atol=1e-7)
def test_likelihood_profile_reoptimize(): dataset = MyDataset() fit = Fit(dataset) fit.run() dataset.parameters["y"].value = 0 result = fit.likelihood_profile("x", nvalues=3, reoptimize=True) assert_allclose(result["values"], [0, 2, 4], atol=1e-7) assert_allclose(result["likelihood"], [4, 0, 4], atol=1e-7)
def test_compound(self): model = self.pwl * 2 self.set_model(model) fit = Fit(self.obs_list[0]) fit.run() pars = fit.datasets.parameters assert_allclose(pars["index"].value, 2.8166, rtol=1e-3) p = pars["amplitude"] assert p.unit == "cm-2 s-1 TeV-1" assert_allclose(p.value, 5.0714e-12, rtol=1e-3)
def test_likelihood_profile(): dataset = MyDataset() fit = Fit(dataset) fit.run() result = fit.likelihood_profile("x", nvalues=3) assert_allclose(result["values"], [0, 2, 4], atol=1e-7) assert_allclose(result["likelihood"], [4, 0, 4], atol=1e-7) # Check that original value state wasn't changed assert_allclose(dataset.parameters["x"].value, 2)
def test_stat_profile(): dataset = MyDataset() fit = Fit([dataset]) fit.run() result = fit.stat_profile("x", nvalues=3) assert_allclose(result["values"], [0, 2, 4], atol=1e-7) assert_allclose(result["stat"], [4, 0, 4], atol=1e-7) # Check that original value state wasn't changed assert_allclose(dataset.models.parameters["x"].value, 2)
def test_stat_profile(): dataset = MyDataset() fit = Fit() fit.run([dataset]) result = fit.stat_profile(datasets=[dataset], parameter="x", nvalues=3) assert_allclose(result["x_scan"], [0, 2, 4], atol=1e-7) assert_allclose(result["stat_scan"], [4, 0, 4], atol=1e-7) assert len(result["fit_results"]) == 0 # Check that original value state wasn't changed assert_allclose(dataset.models.parameters["x"].value, 2)
def test_stacked_fit(self): dataset = self.datasets[0].copy() dataset.stack(self.datasets[1]) dataset.models = SkyModel(PowerLawSpectralModel()) fit = Fit() fit.run(datasets=[dataset]) pars = dataset.models.parameters assert_allclose(pars["index"].value, 2.7767, rtol=1e-3) assert u.Unit(pars["amplitude"].unit) == "cm-2 s-1 TeV-1" assert_allclose(pars["amplitude"].value, 5.191e-11, rtol=1e-3)
def data_fitting(instrument, npoints): log.info("Running fit ...") # First define model crab_model = define_model() if instrument != "joint": datasets = read_datasets_and_set_model(instrument, crab_model) else: log.info("Performing joint analysis") ds_list = [] for inst in AVAILABLE_DATA[:-1]: datasets = read_datasets_and_set_model(inst, crab_model) ds_list = [*ds_list, *datasets] datasets = Datasets(ds_list) # Perform fit fit = Fit(datasets) result = fit.run(optimize_opts={"tol": 0.1, "strategy": 0}) log.info(result.parameters.to_table()) path = f"results/fit_{instrument}.rst" log.info(f"Writing {path}") result.parameters.to_table().write(path, format="ascii.rst", overwrite=True) contours = make_contours(fit, result, npoints) with open(f"results/contours_{instrument}.yaml", "w") as file: yaml.dump(contours, file)
def test_optimize_backend_and_covariance(backend): dataset = MyDataset() if backend == "scipy": kwargs = {"method": "L-BFGS-B"} else: kwargs = {} kwargs["backend"] = backend fit = Fit(optimize_opts=kwargs) result = fit.run([dataset]) assert result is not None pars = dataset.models.parameters assert_allclose(pars["x"].value, 2, rtol=1e-3) assert_allclose(pars["y"].value, 3e2, rtol=1e-3) assert_allclose(pars["z"].value, 4e-2, rtol=1e-2) assert_allclose(pars["x"].error, 1, rtol=1e-7) assert_allclose(pars["y"].error, 1, rtol=1e-7) assert_allclose(pars["z"].error, 1, rtol=1e-7) correlation = dataset.models.covariance.correlation assert_allclose(correlation[0, 1], 0, atol=1e-7) assert_allclose(correlation[0, 2], 0, atol=1e-7) assert_allclose(correlation[1, 2], 0, atol=1e-7)
def test_map_fit_one_energy_bin(sky_model, geom_image): dataset = get_map_dataset(sky_model, geom_image, geom_image, edisp=False) sky_model.spectral_model.index.value = 3.0 sky_model.spectral_model.index.frozen = True dataset.background_model.norm.value = 0.5 dataset.counts = dataset.npred() # Move a bit away from the best-fit point, to make sure the optimiser runs sky_model.parameters["sigma"].value = 0.21 dataset.background_model.parameters["norm"].frozen = True fit = Fit([dataset]) result = fit.run() assert result.success npred = dataset.npred().data.sum() assert_allclose(npred, 4076.779039, rtol=1e-3) assert_allclose(result.total_stat, 5722.439112, rtol=1e-3) pars = result.parameters assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2) assert_allclose(pars.error("lon_0"), 0.00407, rtol=1e-2) assert_allclose(pars["sigma"].value, 0.2, rtol=1e-2) assert_allclose(pars.error("sigma"), 0.00237, rtol=1e-2) assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2) assert_allclose(pars.error("amplitude"), 1.901406e-13, rtol=1e-2)
def test_stat_profile(dataset): fit = Fit() result = fit.run(datasets=dataset) result = result["optimize_result"] model = dataset.models[0].spectral_model model.amplitude.scan_n_values = 3 model.amplitude.scan_n_sigma = 1 profile = fit.stat_profile( datasets=dataset, parameter="amplitude", ) ts_diff = profile["stat_scan"] - result.total_stat assert_allclose(ts_diff, [174.358204, 0., 174.418515], rtol=1e-2, atol=1e-7) value = result.parameters["amplitude"].value err = result.parameters["amplitude"].error model.amplitude.scan_values = np.array([value - err, value, value + err]) profile = fit.stat_profile( datasets=dataset, parameter="amplitude", ) ts_diff = profile["stat_scan"] - result.total_stat assert_allclose(ts_diff, [174.358204, 0., 174.418515], rtol=1e-2, atol=1e-7)
def test_wstat(self): """WStat with on source and background spectrum""" on_vector = self.src.copy() on_vector.data += self.bkg.data acceptance = RegionNDMap.from_geom(self.src.geom, data=1) acceptance_off = RegionNDMap.from_geom(self.bkg.geom, data=1 / self.alpha) dataset = SpectrumDatasetOnOff( counts=on_vector, counts_off=self.off, exposure=self.exposure, acceptance=acceptance, acceptance_off=acceptance_off, ) dataset.models = self.source_model self.source_model.parameters.index = 1.12 fit = Fit() result = fit.run(datasets=[dataset]) pars = self.source_model.parameters assert_allclose(pars["index"].value, 1.997342, rtol=1e-3) assert_allclose(pars["amplitude"].value, 100245.187067, rtol=1e-3) assert_allclose(result.total_stat, 30.022316, rtol=1e-3)
def test_stat_profile_reoptimize(): dataset = MyDataset() fit = Fit() fit.run([dataset]) dataset.models.parameters["y"].value = 0 result = fit.stat_profile(datasets=[dataset], parameter="x", nvalues=3, reoptimize=True) assert_allclose(result["x_scan"], [0, 2, 4], atol=1e-7) assert_allclose(result["stat_scan"], [4, 0, 4], atol=1e-7) assert_allclose(result["fit_results"][0].total_stat, result["stat_scan"][0], atol=1e-7)
def fit_lc(datasets): spatial_model1 = GaussianSpatialModel(lon_0="0.2 deg", lat_0="0.1 deg", sigma="0.3 deg", frame="galactic") spatial_model1.parameters["lon_0"].frozen = False spatial_model1.parameters["lat_0"].frozen = False spatial_model1.parameters["sigma"].frozen = True spectral_model1 = PowerLawSpectralModel(index=3, amplitude="2e-11 cm-2 s-1 TeV-1", reference="1 TeV") temporal_model1 = ExpDecayTemporalModel(t0="10 h", t_ref=gti_t0.mjd * u.d) model_fit = SkyModel( spatial_model=spatial_model1, spectral_model=spectral_model1, temporal_model=temporal_model1, name="fit", ) for dataset in datasets: dataset.models = [ model_fit, FoVBackgroundModel(dataset_name=dataset.name) ] dataset.background_model.parameters["norm"].frozen = True fit = Fit() result = fit.run(datasets=datasets) print(result.success) print(result.parameters.to_table())
def make_background_fit(dataset): """Fit the FoV background model on the dataset counts data Parameters ---------- dataset : `~gammapy.datasets.MapDataset` Input dataset. Returns ------- dataset : `~gammapy.datasets.MapDataset` Map dataset with fitted background model """ # freeze all model components not related to background model models = dataset.models with models.restore_status(restore_values=False): models.select(tag="sky-model").freeze() fit = Fit([dataset]) fit_result = fit.run() if not fit_result.success: log.warning( f"FoVBackgroundMaker failed. Fit did not converge for {dataset.name}" ) return dataset
def test_map_fit_one_energy_bin(sky_model, geom_image): energy_axis = geom_image.get_axis_by_name("energy") geom_etrue = geom_image.to_image().to_cube([energy_axis.copy(name="energy_true")]) dataset = get_map_dataset(sky_model, geom_image, geom_etrue) sky_model.spectral_model.index.value = 3.0 sky_model.spectral_model.index.frozen = True dataset.background_model.norm.value = 0.5 dataset.counts = dataset.npred() # Move a bit away from the best-fit point, to make sure the optimiser runs sky_model.parameters["sigma"].value = 0.21 dataset.background_model.parameters["norm"].frozen = True fit = Fit([dataset]) result = fit.run() assert result.success npred = dataset.npred().data.sum() assert_allclose(npred, 16538.124036, rtol=1e-3) assert_allclose(result.total_stat, -34844.125047, rtol=1e-3) pars = result.parameters assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2) assert_allclose(pars["lon_0"].error, 0.001689, rtol=1e-2) assert_allclose(pars["sigma"].value, 0.2, rtol=1e-2) assert_allclose(pars["sigma"].error, 0.00092, rtol=1e-2) assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2) assert_allclose(pars["amplitude"].error, 8.127593e-14, rtol=1e-2)
def test_warning_no_covariance(backend, caplog): dataset = MyDataset() fit = Fit(backend=backend) result = fit.run([dataset]) assert caplog.records[-1].levelname == "WARNING" assert (caplog.records[-1].message == "No covariance estimate - not supported by this backend.")
def test_map_fit(sky_model, geom, geom_etrue): dataset_1 = get_map_dataset(sky_model, geom, geom_etrue, name="test-1") dataset_1.background_model.norm.value = 0.5 dataset_1.counts = dataset_1.npred() dataset_2 = get_map_dataset(sky_model, geom, geom_etrue, name="test-2") dataset_2.counts = dataset_2.npred() sky_model.parameters["sigma"].frozen = True dataset_1.background_model.norm.value = 0.49 dataset_2.background_model.norm.value = 0.99 fit = Fit([dataset_1, dataset_2]) result = fit.run() assert result.success assert "minuit" in repr(result) npred = dataset_1.npred().data.sum() assert_allclose(npred, 7525.790688, rtol=1e-3) assert_allclose(result.total_stat, 21700.253246, rtol=1e-3) pars = result.parameters assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2) assert_allclose(pars["lon_0"].error, 0.002244, rtol=1e-2) assert_allclose(pars["index"].value, 3, rtol=1e-2) assert_allclose(pars["index"].error, 0.024277, rtol=1e-2) assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2) assert_allclose(pars["amplitude"].error, 4.216154e-13, rtol=1e-2) # background norm 1 assert_allclose(pars[8].value, 0.5, rtol=1e-2) assert_allclose(pars[8].error, 0.015811, rtol=1e-2) # background norm 2 assert_allclose(pars[11].value, 1, rtol=1e-2) assert_allclose(pars[11].error, 0.02147, rtol=1e-2) # test mask_safe evaluation mask_safe = geom.energy_mask(emin=1 * u.TeV) dataset_1.mask_safe = Map.from_geom(geom, data=mask_safe) dataset_2.mask_safe = Map.from_geom(geom, data=mask_safe) stat = fit.datasets.stat_sum() assert_allclose(stat, 14824.173099, rtol=1e-5) region = sky_model.spatial_model.to_region() with mpl_plot_check(): dataset_1.plot_residuals(region=region) # test model evaluation outside image dataset_1.models[0].spatial_model.lon_0.value = 150 dataset_1.npred() assert not dataset_1._evaluators[dataset_1.models[0]].contributes
def test_map_fit(sky_model, geom, geom_etrue): dataset_1 = get_map_dataset(sky_model, geom, geom_etrue, evaluation_mode="local") dataset_1.background_model.norm.value = 0.5 dataset_1.counts = dataset_1.npred() dataset_2 = get_map_dataset( sky_model, geom, geom_etrue, evaluation_mode="global", likelihood="cstat" ) dataset_2.counts = dataset_2.npred() sky_model.parameters["sigma"].frozen = True dataset_1.background_model.norm.value = 0.49 dataset_2.background_model.norm.value = 0.99 fit = Fit([dataset_1, dataset_2]) result = fit.run() assert result.success assert "minuit" in repr(result) npred = dataset_1.npred().data.sum() assert_allclose(npred, 6220.529956, rtol=1e-3) assert_allclose(result.total_stat, 11802.750562, rtol=1e-3) pars = result.parameters assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2) assert_allclose(pars.error("lon_0"), 0.002651, rtol=1e-2) assert_allclose(pars["index"].value, 3, rtol=1e-2) assert_allclose(pars.error("index"), 0.021222, rtol=1e-2) assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2) assert_allclose(pars.error("amplitude"), 3.117271e-13, rtol=1e-2) # background norm 1 assert_allclose(pars[8].value, 0.5, rtol=1e-2) assert_allclose(pars.error(pars[8]), 0.015759, rtol=1e-2) # background norm 2 assert_allclose(pars[11].value, 1, rtol=1e-2) assert_allclose(pars.error(pars[11]), 0.02147, rtol=1e-2) # test mask_safe evaluation mask_safe = geom.energy_mask(emin=1 * u.TeV) dataset_1.mask_safe = mask_safe dataset_2.mask_safe = mask_safe stat = fit.datasets.likelihood() assert_allclose(stat, 6425.389198) # test model evaluation outside image with pytest.raises(ValueError): dataset_1.model.skymodels[0].spatial_model.lon_0.value = 150 dataset_1.npred() with mpl_plot_check(): dataset_1.plot_residuals()
def test_map_fit(sky_model, geom, geom_etrue): dataset_1 = get_map_dataset(sky_model, geom, geom_etrue, evaluation_mode="local") dataset_1.background_model.norm.value = 0.5 dataset_1.counts = dataset_1.npred() dataset_2 = get_map_dataset(sky_model, geom, geom_etrue, evaluation_mode="global") dataset_2.counts = dataset_2.npred() sky_model.parameters["sigma"].frozen = True dataset_1.background_model.norm.value = 0.49 dataset_2.background_model.norm.value = 0.99 fit = Fit([dataset_1, dataset_2]) result = fit.run() assert result.success assert "minuit" in repr(result) npred = dataset_1.npred().data.sum() assert_allclose(npred, 6220.529956, rtol=1e-3) assert_allclose(result.total_stat, 27725.577785, rtol=1e-3) pars = result.parameters assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2) assert_allclose(pars.error("lon_0"), 0.002651, rtol=1e-2) assert_allclose(pars["index"].value, 3, rtol=1e-2) assert_allclose(pars.error("index"), 0.021222, rtol=1e-2) assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2) assert_allclose(pars.error("amplitude"), 3.117271e-13, rtol=1e-2) # background norm 1 assert_allclose(pars[8].value, 0.5, rtol=1e-2) assert_allclose(pars.error(pars[8]), 0.015759, rtol=1e-2) # background norm 2 assert_allclose(pars[11].value, 1, rtol=1e-2) assert_allclose(pars.error(pars[11]), 0.02147, rtol=1e-2) # test mask_safe evaluation mask_safe = geom.energy_mask(emin=1 * u.TeV) dataset_1.mask_safe = Map.from_geom(geom, data=mask_safe) dataset_2.mask_safe = Map.from_geom(geom, data=mask_safe) stat = fit.datasets.stat_sum() assert_allclose(stat, 15254.470527) # test model evaluation outside image dataset_1.model[0].spatial_model.lon_0.value = 150 dataset_1.npred() assert not dataset_1._evaluators[0].contributes with mpl_plot_check(): dataset_1.plot_residuals()
def test_no_edisp(self): dataset = self.datasets[0].copy() dataset.edisp = None dataset.models = self.pwl fit = Fit() result = fit.run(datasets=[dataset]) assert_allclose(result.parameters["index"].value, 2.7961, atol=0.02)
def _fit_dataset( dataset_loop, nuisance, run, ch, mass, stat_profile_opts=None, optimize_opts=None, covariance_opts=None, ): """Fit loop dataset model to fake realization and calculate parameter value for upper limit.""" log.info(f"----") log.info(f"Run: {run}") log.info(f"Channel: {ch}") log.info(f"Mass: {mass}") j_best = None stat_profile_opts["parameter"] = "sv" dataset_loop.models.parameters["sv"].frozen = False if nuisance and dataset_loop.nuisance: prior = dataset_loop.nuisance["j"].value halfrange = (dataset_loop.nuisance["width"] * dataset_loop.nuisance["sigmaj"].value) dataset_loop.models.parameters["jfactor"].frozen = False dataset_loop.models.parameters["jfactor"].min = prior - halfrange dataset_loop.models.parameters["jfactor"].max = prior + halfrange fit = Fit([dataset_loop]) fit_result = fit.run("minuit", optimize_opts, covariance_opts) sv_best = fit_result.parameters["sv"].value j_best = fit_result.parameters["jfactor"].value likemin = dataset_loop.stat_sum() statprofile = fit.stat_profile(reoptimize=True, **stat_profile_opts) else: dataset_loop.models.parameters["jfactor"].frozen = True fit = Fit([dataset_loop]) fit_result = fit.run("minuit", optimize_opts, covariance_opts) sv_best = fit_result.parameters["sv"].value likemin = dataset_loop.stat_sum() statprofile = fit.stat_profile(**stat_profile_opts) return j_best, sv_best, likemin, statprofile
def test_stats(self): dataset = self.datasets[0].copy() dataset.models = self.pwl fit = Fit() result = fit.run(datasets=[dataset]) stats = dataset.stat_array() actual = np.sum(stats[dataset.mask_safe]) desired = result.total_stat assert_allclose(actual, desired)
def test_stats(self): dataset = self.obs_list[0] dataset.model = self.pwl fit = Fit([dataset]) result = fit.run() stats = dataset.likelihood_per_bin() actual = np.sum(stats[dataset.mask_safe]) desired = result.total_stat assert_allclose(actual, desired)
def test_stacked_fit(self): dataset = self.obs_list[0].copy() dataset.stack(self.obs_list[1]) dataset.model = self.pwl fit = Fit([dataset]) result = fit.run() pars = result.parameters assert_allclose(pars["index"].value, 2.7767, rtol=1e-3) assert u.Unit(pars["amplitude"].unit) == "cm-2 s-1 TeV-1" assert_allclose(pars["amplitude"].value, 5.191e-11, rtol=1e-3)