def test_validation_checks(): config = AnalysisConfig() analysis = Analysis(config) analysis.settings["observations"]["datastore"] = "other" with pytest.raises(FileNotFoundError): analysis.get_observations() config = AnalysisConfig.from_template("1d") analysis = Analysis(config) assert analysis.get_flux_points() is False assert analysis.run_fit() is False assert analysis.set_model() is False assert analysis.get_datasets() is False analysis.get_observations() analysis.settings["datasets"]["dataset-type"] = "not assigned" assert analysis.get_datasets() is False analysis.settings["datasets"]["dataset-type"] = "SpectrumDatasetOnOff" analysis.get_observations() analysis.get_datasets() model_str = Path(MODEL_FILE).read_text() analysis.set_model(model=model_str) assert isinstance(analysis.model, SkyModels) is True assert analysis.set_model() is False analysis.run_fit() del analysis.settings["flux-points"] assert analysis.get_flux_points() is False
def test_analysis_1d(): cfg = """ observations: datastore: $GAMMAPY_DATA/hess-dl3-dr1 obs_ids: [23523, 23526] obs_time: { start: [J2004.92654346, J2004.92658453, J2004.92663655], stop: [J2004.92658453, J2004.92663655, J2004.92670773] } datasets: type: 1d background: method: reflected geom: axes: energy_true: {min: 0.01 TeV, max: 300 TeV, nbins: 109} on_region: {frame: icrs, lon: 83.633 deg, lat: 22.014 deg, radius: 0.11 deg} safe_mask: methods: [aeff-default, edisp-bias] parameters: {bias_percent: 10.0} containment_correction: false flux_points: energy: {min: 1 TeV, max: 50 TeV, nbins: 4} light_curve: energy_edges: {min: 1 TeV, max: 50 TeV, nbins: 1} time_intervals: { start: [J2004.92654346, J2004.92658453, J2004.92663655], stop: [J2004.92658453, J2004.92663655, J2004.92670773] } """ config = get_example_config("1d") analysis = Analysis(config) analysis.update_config(cfg) analysis.get_observations() analysis.get_datasets() analysis.read_models(MODEL_FILE_1D) analysis.run_fit() analysis.get_flux_points() analysis.get_light_curve() assert len(analysis.datasets) == 3 table = analysis.flux_points.data.to_table(sed_type="dnde") assert len(table) == 4 dnde = table["dnde"].quantity assert dnde.unit == "cm-2 s-1 TeV-1" assert_allclose(dnde[0].value, 8.116854e-12, rtol=1e-2) assert_allclose(dnde[2].value, 3.444475e-14, rtol=1e-2) axis = analysis.light_curve.geom.axes["time"] assert axis.nbin == 3 assert_allclose(axis.time_min.mjd, [53343.92, 53343.935, 53343.954]) flux = analysis.light_curve.flux.data[:, :, 0, 0] assert_allclose(flux, [[1.688954e-11], [2.347870e-11], [1.604152e-11]], rtol=1e-4)
def main(config_path, models_path, output, reference): config = AnalysisConfig.read(config_path) analysis = Analysis(config) log.info(config) analysis.get_observations() log.info(analysis) log.info(dir(analysis)) log.info(analysis.datasets) log.info(analysis.datasets[0].counts) analysis.get_datasets() analysis.read_models(models_path) # stacked fit and flux estimation analysis.run_fit() analysis.get_flux_points() # Plot flux points ax_sed, ax_residuals = analysis.flux_points.plot_fit() if reference: plot_kwargs = { "energy_range": [ analysis.config.flux_points.energy.min, analysis.config.flux_points.energy.max, ], "energy_power": 2, "flux_unit": "erg-1 cm-2 s-1", } create_crab_spectral_model(reference).plot( **plot_kwargs, ax=ax_sed, label="Crab reference" ) ax_sed.legend() ax_sed.set_ylim(1e-12, 1e-9) base_out = Path(output) ax_sed.get_figure().savefig(base_out.with_suffix(".pdf").as_posix()) plt.clf() analysis.models.write(base_out.with_suffix(".yaml").as_posix(), overwrite=True) analysis.flux_points.write( base_out.with_suffix(".fits").as_posix(), overwrite=True ) ax_excess = analysis.datasets["stacked"].plot_excess() ax_excess.get_figure().savefig(base_out.with_suffix(".excess.pdf").as_posix()) plt.clf() config.datasets.stack = False analysis.get_observations() analysis.get_datasets() analysis.read_models(models_path) lc_maker_low = LightCurveEstimator( energy_edges=[.2, 5] * u.TeV, source=config.flux_points.source, reoptimize=False ) lc_low = lc_maker_low.run(analysis.datasets) ax_lc = lc_low.plot(marker="o", label="1D") ax_lc.get_figure().savefig(base_out.with_suffix(".lc.pdf").as_posix()) plt.clf()
def test_usage_errors(): config = get_example_config("1d") analysis = Analysis(config) with pytest.raises(RuntimeError): analysis.get_datasets() with pytest.raises(RuntimeError): analysis.read_models(MODEL_FILE) with pytest.raises(RuntimeError): analysis.run_fit() with pytest.raises(RuntimeError): analysis.get_flux_points()
def run_3d(name): """Run 3D analysis for one source.""" logging.info(f"run3d: {name}") mode = "3d" config_file = f"config{mode}.yaml" target_config_file = f"targets.yaml" model_file = f"model{mode}_{name}.yaml" outdir = f"results/{name}" config = target_config3d(config_file, target_config_file, name) analysis = Analysis(config) analysis.get_observations() conf = config.settings["observations"]["filters"][0] nb, lon, lat, rad = ( len(analysis.observations.ids), conf["lon"], conf["lat"], conf["radius"], ) logging.info(f"{nb} observations found in {rad} around {lon}, {lat} ") analysis.get_datasets() # test plt.figure(figsize=(5, 5)) analysis.datasets["stacked"].counts.sum_over_axes().plot(add_cbar=True) plt.savefig(f"{outdir}/{name}_{mode}_counts.png", bbox_inches="tight") analysis.set_model(filename=model_file) logging.info(analysis.model) analysis.run_fit() logging.info(analysis.fit_result.parameters.to_table()) analysis.fit_result.parameters.to_table().write( f"{outdir}/{name}_{mode}_bestfit.dat", format="ascii", overwrite=True) analysis.get_flux_points(source=f"{name}") analysis.flux_points.write(f"{outdir}/{name}_{mode}_fluxpoints.fits") plt.figure(figsize=(5, 5)) analysis.datasets["stacked"].counts.sum_over_axes().plot(add_cbar=True) plt.savefig(f"{outdir}/{name}_{mode}_counts.png", bbox_inches="tight") plt.figure(figsize=(5, 5)) analysis.datasets["stacked"].plot_residuals(method="diff/sqrt(model)", vmin=-0.5, vmax=0.5) plt.savefig(f"{outdir}/{name}_{mode}_residuals.png", bbox_inches="tight") plt.figure(figsize=(8, 5)) ax_sed, ax_residuals = analysis.flux_points.peek() plt.savefig(f"{outdir}/{name}_{mode}_fluxpoints.png", bbox_inches="tight")
def test_analysis_1d(config_analysis_data): config = AnalysisConfig.from_template("1d") analysis = Analysis(config) analysis.config.update_settings(config_analysis_data) analysis.get_observations() analysis.get_datasets() analysis.set_model(filename=MODEL_FILE) analysis.run_fit() analysis.get_flux_points() assert len(analysis.datasets) == 2 assert len(analysis.flux_points.data.table) == 4 dnde = analysis.flux_points.data.table["dnde"].quantity assert dnde.unit == "cm-2 s-1 TeV-1" assert_allclose(dnde[0].value, 8.03604e-12, rtol=1e-2) assert_allclose(dnde[-1].value, 4.780021e-21, rtol=1e-2)
def analysis_3d_data_reduction(target): log.info(f"analysis_3d_data_reduction: {target}") opts = yaml.safe_load(open("targets.yaml"))[target] txt = Path("config_template.yaml").read_text() txt = txt.format_map(opts) config = AnalysisConfig.from_yaml(txt) analysis = Analysis(config) analysis.get_observations() log.info("Running data reduction") analysis.get_datasets() # TODO: write datasets and separate fitting to next function # Not implemented in Gammapy yet, coming very soon. log.info("Running fit ...") analysis.read_models(f"{target}/model_3d.yaml") logging.info(analysis.models) analysis.run_fit() logging.info(analysis.fit_result.parameters.to_table()) path = f"{target}/{target}_3d_bestfit.rst" log.info(f"Writing {path}") analysis.fit_result.parameters.to_table().write(path, format="ascii.rst", overwrite=True) # analysis.get_flux_points(source=f"{target}") # path = f"{target}/{target}_3d_fluxpoints.fits" # log.info(f"Writing {path}") # analysis.flux_points.write(path, overwrite=True) analysis.get_flux_points(source=f"{target}") path = f"{target}/{target}_3d_fluxpoints.ecsv" log.info(f"Writing {path}") keys = [ "e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn", "is_ul" ] analysis.flux_points.data.table_formatted[keys].write(path, format="ascii.ecsv", overwrite=True) return analysis # will write to disk when possible
def test_usage_errors(): config = get_example_config("1d") analysis = Analysis(config) with pytest.raises(RuntimeError): analysis.get_datasets() with pytest.raises(RuntimeError): analysis.read_datasets() with pytest.raises(RuntimeError): analysis.write_datasets() with pytest.raises(TypeError): analysis.read_models() with pytest.raises(RuntimeError): analysis.write_models() with pytest.raises(RuntimeError): analysis.run_fit() with pytest.raises(RuntimeError): analysis.get_flux_points() with pytest.raises(ValidationError): analysis.config.datasets.type = "None"
def test_analysis_3d(): config = get_example_config("3d") analysis = Analysis(config) analysis.get_observations() analysis.get_datasets() analysis.read_models(MODEL_FILE) analysis.datasets["stacked"].background_model.spectral_model.tilt.frozen = False analysis.run_fit() analysis.get_flux_points() assert len(analysis.datasets) == 1 assert len(analysis.fit_result.parameters) == 8 res = analysis.fit_result.parameters assert res["amplitude"].unit == "cm-2 s-1 TeV-1" assert len(analysis.flux_points.data.table) == 2 dnde = analysis.flux_points.data.table["dnde"].quantity assert_allclose(dnde[0].value, 1.376879e-11, rtol=1e-2) assert_allclose(dnde[-1].value, 2.691466e-13, rtol=1e-2) assert_allclose(res["index"].value, 3.097613, rtol=1e-2) assert_allclose(res["tilt"].value, -0.207792, rtol=1e-2)
def test_analysis_3d(): config = AnalysisConfig.from_template("3d") analysis = Analysis(config) analysis.get_observations() analysis.get_datasets() analysis.set_model(filename=MODEL_FILE) analysis.datasets["stacked"].background_model.tilt.frozen = False analysis.run_fit() analysis.get_flux_points() assert len(analysis.datasets) == 1 assert len(analysis.fit_result.parameters) == 8 res = analysis.fit_result.parameters assert res[3].unit == "cm-2 s-1 TeV-1" assert len(analysis.flux_points.data.table) == 2 dnde = analysis.flux_points.data.table["dnde"].quantity assert_allclose(dnde[0].value, 1.182768e-11, rtol=1e-1) assert_allclose(dnde[-1].value, 4.051367e-13, rtol=1e-1) assert_allclose(res["index"].value, 2.76607, rtol=1e-1) assert_allclose(res["tilt"].value, -0.143204, rtol=1e-1)
def test_analysis_1d(): cfg = """ observations: datastore: $GAMMAPY_DATA/hess-dl3-dr1 obs_ids: [23523, 23526] datasets: type: 1d background: method: reflected geom: axes: energy_true: {min: 0.01 TeV, max: 300 TeV, nbins: 109} on_region: {frame: icrs, lon: 83.633 deg, lat: 22.014 deg, radius: 0.11 deg} safe_mask: methods: [aeff-default, edisp-bias] parameters: {bias_percent: 10.0} containment_correction: false flux_points: energy: {min: 1 TeV, max: 50 TeV, nbins: 4} """ config = get_example_config("1d") analysis = Analysis(config) analysis.update_config(cfg) analysis.get_observations() analysis.get_datasets() analysis.read_models(MODEL_FILE_1D) analysis.run_fit() analysis.get_flux_points() assert len(analysis.datasets) == 2 assert len(analysis.flux_points.data.table) == 4 dnde = analysis.flux_points.data.table["dnde"].quantity assert dnde.unit == "cm-2 s-1 TeV-1" assert_allclose(dnde[0].value, 8.116854e-12, rtol=1e-2) assert_allclose(dnde[2].value, 3.547128e-14, rtol=1e-2)
# This is how we can write the model back to file again: # In[ ]: filename = path / "model-best-fit.yaml" analysis.models.write(filename, overwrite=True) # In[ ]: get_ipython().system('cat analysis_1/model-best-fit.yaml') # ### Flux points # In[ ]: analysis.get_flux_points(source="crab") # In[ ]: plt.figure(figsize=(8, 5)) ax_sed, ax_residuals = analysis.flux_points.peek() # The flux points can be exported to a fits table following the format defined [here](https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/flux_points/index.html) # In[ ]: filename = path / "flux-points.fits" analysis.flux_points.write(filename, overwrite=True) # ## What's next #
def run_analysis(method, target_dict, debug, skip_flux_points): """If the method is "1d", runs joint spectral analysis for the selected target. If instead it is "3d", runs stacked 3D analysis.""" tag = target_dict["tag"] log.info(f"Running {method} analysis, {tag}") path_res = Path(tag + "/results/") log.info("Reading config") txt = Path(f"config_{method}.yaml").read_text() txt = txt.format_map(target_dict) config = AnalysisConfig.from_yaml(txt) if debug: config.observations.obs_ids = [target_dict["debug_run"]] config.flux_points.energy.nbins = 1 if method == "3d": config.datasets.geom.axes.energy_true.nbins = 10 analysis = Analysis(config) log.info("Running observations selection") analysis.get_observations() log.info(f"Running data reduction") analysis.get_datasets() log.info(f"Setting the model") txt = Path("model_config.yaml").read_text() txt = txt.format_map(target_dict) analysis.set_models(txt) if method == "3d": analysis.datasets[ 0].background_model.spectral_model.norm.frozen = False analysis.datasets[ 0].background_model.spectral_model.tilt.frozen = False # Impose min and max values to ensure position does not diverge delta = 1.5 lon = analysis.models[0].spatial_model.lon_0.value lat = analysis.models[0].spatial_model.lat_0.value analysis.models[0].spatial_model.lat_0.min = lat - delta analysis.models[0].spatial_model.lat_0.max = lat + delta analysis.models[0].spatial_model.lon_0.min = lon - delta analysis.models[0].spatial_model.lon_0.max = lon + delta if target_dict["spatial_model"] == "DiskSpatialModel": analysis.models[0].spatial_model.e.frozen = False analysis.models[0].spatial_model.phi.frozen = False analysis.models[0].spatial_model.r_0.value = 0.3 log.info(f"Running fit ...") analysis.run_fit(optimize_opts={"print_level": 3}) # TODO: This is a workaround. Set covariance automatically log.info(f"Writing {path_res}") write_fit_summary(analysis.models[0].parameters, str(path_res / f"result-{method}.yaml")) if not skip_flux_points: log.info(f"Running flux points estimation") # Freeze all parameters except the backround norm if method == "3d": dataset = analysis.datasets[0] for parameter in dataset.models.parameters: if parameter is not dataset.background_model.spectral_model.norm: parameter.frozen = True analysis.get_flux_points() flux_points = analysis.flux_points.data flux_points.table["is_ul"] = flux_points.table["ts"] < 4 keys = [ "e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn", "is_ul", "dnde_ul", ] log.info(f"Writing {path_res}") flux_points.table_formatted[keys].write(path_res / f"flux-points-{method}.ecsv", format="ascii.ecsv")
def run_analysis(method, target_dict, debug): """If the method is "1d", runs joint spectral analysis for the selected target. If instead it is "3d", runs stacked 3D analysis.""" tag = target_dict["tag"] log.info(f"Running {method} analysis, {tag}") path_res = Path(tag + "/results/") log.info("Reading config") txt = Path(f"config_{method}.yaml").read_text() txt = txt.format_map(target_dict) config = AnalysisConfig.from_yaml(txt) if debug: config.observations.obs_ids = [target_dict["debug_run"]] config.flux_points.energy.nbins = 1 if method == "3d": config.datasets.geom.axes.energy_true.nbins = 10 analysis = Analysis(config) log.info("Running observations selection") analysis.get_observations() log.info(f"Running data reduction") analysis.get_datasets() # TODO: This is a workaround. We should somehow apply the safe mask (run by run) from the HLI from gammapy.cube import SafeMaskMaker datasets = [] maker_safe_mask = SafeMaskMaker(methods=["edisp-bias", "bkg-peak"], bias_percent=10) for dataset in analysis.datasets: dataset = maker_safe_mask.run(dataset) datasets.append(dataset) analysis.datasets = datasets log.info(f"Setting the model") txt = Path("model_config.yaml").read_text() txt = txt.format_map(target_dict) log.info(txt) analysis.set_models(txt) if method == "3d" and target_dict["spatial_model"] == "DiskSpatialModel": analysis.models[0].spatial_model.e.frozen = False analysis.models[0].spatial_model.phi.frozen = False analysis.models[0].spatial_model.r_0.value = 0.3 log.info(f"Running fit ...") analysis.run_fit() # TODO: This is a workaround. Set covariance automatically results = analysis.fit_result names = ["spectral_model", "spatial_model"] for name in names: if name == "spatial_model" and method == "1d": continue model = getattr(analysis.models[0], name) model.parameters.covariance = results.parameters.get_subcovariance( model.parameters.names) log.info(f"Writing {path_res}") write_fit_summary(analysis.models[0].parameters, str(path_res / f"results-summary-fit-{method}.yaml")) log.info(f"Running flux points estimation") # TODO: For the 3D analysis, re-optimize the background norm in each energy # bin. For now, this is not possible from the HLI. analysis.get_flux_points(source=tag) flux_points = analysis.flux_points.data flux_points.table["is_ul"] = flux_points.table["ts"] < 4 keys = [ "e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn", "is_ul", "dnde_ul", ] log.info(f"Writing {path_res}") flux_points.table_formatted[keys].write(path_res / f"flux-points-{method}.ecsv", format="ascii.ecsv")