Пример #1
0
def test_set_models():
    config = get_example_config("1d")
    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()
    models_str = Path(MODEL_FILE).read_text()
    analysis.set_models(models=models_str)
    assert isinstance(analysis.models, Models) is True
    with pytest.raises(TypeError):
        analysis.set_models(0)
Пример #2
0
def run_analysis(estimate):
    """Run analysis from observation selection to model fitting."""
    config = AnalysisConfig.read(f"{estimate}/config.yaml")
    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()

    models = Models.read(f"{estimate}/models.yaml")
    analysis.set_models(models)
    analysis.run_fit()
    return analysis
Пример #3
0
def test_set_models():
    config = get_example_config("3d")
    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()
    models_str = Path(MODEL_FILE).read_text()
    analysis.set_models(models=models_str)
    assert isinstance(analysis.models, DatasetModels)
    assert len(analysis.models) == 2
    assert analysis.models.names == ['source', 'stacked-bkg']
    with pytest.raises(TypeError):
        analysis.set_models(0)

    new_source = analysis.models["source"].copy(name="source2")
    analysis.set_models(models=[new_source], extend=False)
    assert len(analysis.models) == 2
    assert analysis.models.names == ['source2', 'stacked-bkg']
Пример #4
0
      value: 1.0e-12
      unit: cm-2 s-1 TeV-1
    - name: index
      value: 2.0
      unit: ''
    - name: reference
      value: 1.0
      unit: TeV
      frozen: true
"""

# Now we set the model on the analysis object:

# In[ ]:

analysis.set_models(model_config)

# Finally we run the fit:

# In[ ]:

analysis.run_fit()

# In[ ]:

print(analysis.fit_result)

# This is how we can write the model back to file again:

# In[ ]:
Пример #5
0
def run_analysis(method, target_dict, debug, skip_flux_points):
    """If the method is "1d", runs joint spectral analysis for the selected target. If
    instead it is "3d", runs stacked 3D analysis."""
    tag = target_dict["tag"]
    log.info(f"Running {method} analysis, {tag}")
    path_res = Path(tag + "/results/")

    log.info("Reading config")
    txt = Path(f"config_{method}.yaml").read_text()
    txt = txt.format_map(target_dict)
    config = AnalysisConfig.from_yaml(txt)
    if debug:
        config.observations.obs_ids = [target_dict["debug_run"]]
        config.flux_points.energy.nbins = 1
        if method == "3d":
            config.datasets.geom.axes.energy_true.nbins = 10
    analysis = Analysis(config)

    log.info("Running observations selection")
    analysis.get_observations()

    log.info(f"Running data reduction")
    analysis.get_datasets()

    log.info(f"Setting the model")
    txt = Path("model_config.yaml").read_text()
    txt = txt.format_map(target_dict)
    analysis.set_models(txt)
    if method == "3d":
        analysis.datasets[
            0].background_model.spectral_model.norm.frozen = False
        analysis.datasets[
            0].background_model.spectral_model.tilt.frozen = False

        # Impose min and max values to ensure position does not diverge
        delta = 1.5
        lon = analysis.models[0].spatial_model.lon_0.value
        lat = analysis.models[0].spatial_model.lat_0.value
        analysis.models[0].spatial_model.lat_0.min = lat - delta
        analysis.models[0].spatial_model.lat_0.max = lat + delta
        analysis.models[0].spatial_model.lon_0.min = lon - delta
        analysis.models[0].spatial_model.lon_0.max = lon + delta

        if target_dict["spatial_model"] == "DiskSpatialModel":
            analysis.models[0].spatial_model.e.frozen = False
            analysis.models[0].spatial_model.phi.frozen = False
            analysis.models[0].spatial_model.r_0.value = 0.3
    log.info(f"Running fit ...")
    analysis.run_fit(optimize_opts={"print_level": 3})

    # TODO: This is a workaround. Set covariance automatically
    log.info(f"Writing {path_res}")
    write_fit_summary(analysis.models[0].parameters,
                      str(path_res / f"result-{method}.yaml"))

    if not skip_flux_points:
        log.info(f"Running flux points estimation")
        # Freeze all parameters except the backround norm
        if method == "3d":
            dataset = analysis.datasets[0]
            for parameter in dataset.models.parameters:
                if parameter is not dataset.background_model.spectral_model.norm:
                    parameter.frozen = True
        analysis.get_flux_points()
        flux_points = analysis.flux_points.data
        flux_points.table["is_ul"] = flux_points.table["ts"] < 4
        keys = [
            "e_ref",
            "e_min",
            "e_max",
            "dnde",
            "dnde_errp",
            "dnde_errn",
            "is_ul",
            "dnde_ul",
        ]
        log.info(f"Writing {path_res}")
        flux_points.table_formatted[keys].write(path_res /
                                                f"flux-points-{method}.ecsv",
                                                format="ascii.ecsv")
Пример #6
0
def run_analysis(method, target_dict, debug):
    """If the method is "1d", runs joint spectral analysis for the selected target. If
    instead it is "3d", runs stacked 3D analysis."""
    tag = target_dict["tag"]
    log.info(f"Running {method} analysis, {tag}")
    path_res = Path(tag + "/results/")

    log.info("Reading config")
    txt = Path(f"config_{method}.yaml").read_text()
    txt = txt.format_map(target_dict)
    config = AnalysisConfig.from_yaml(txt)
    if debug:
        config.observations.obs_ids = [target_dict["debug_run"]]
        config.flux_points.energy.nbins = 1
        if method == "3d":
            config.datasets.geom.axes.energy_true.nbins = 10
    analysis = Analysis(config)

    log.info("Running observations selection")
    analysis.get_observations()

    log.info(f"Running data reduction")
    analysis.get_datasets()

    # TODO: This is a workaround. We should somehow apply the safe mask (run by run) from the HLI
    from gammapy.cube import SafeMaskMaker
    datasets = []
    maker_safe_mask = SafeMaskMaker(methods=["edisp-bias", "bkg-peak"],
                                    bias_percent=10)
    for dataset in analysis.datasets:
        dataset = maker_safe_mask.run(dataset)
        datasets.append(dataset)
    analysis.datasets = datasets

    log.info(f"Setting the model")
    txt = Path("model_config.yaml").read_text()
    txt = txt.format_map(target_dict)
    log.info(txt)
    analysis.set_models(txt)
    if method == "3d" and target_dict["spatial_model"] == "DiskSpatialModel":
        analysis.models[0].spatial_model.e.frozen = False
        analysis.models[0].spatial_model.phi.frozen = False
        analysis.models[0].spatial_model.r_0.value = 0.3

    log.info(f"Running fit ...")
    analysis.run_fit()

    # TODO: This is a workaround. Set covariance automatically
    results = analysis.fit_result
    names = ["spectral_model", "spatial_model"]
    for name in names:
        if name == "spatial_model" and method == "1d":
            continue
        model = getattr(analysis.models[0], name)
        model.parameters.covariance = results.parameters.get_subcovariance(
            model.parameters.names)

    log.info(f"Writing {path_res}")
    write_fit_summary(analysis.models[0].parameters,
                      str(path_res / f"results-summary-fit-{method}.yaml"))

    log.info(f"Running flux points estimation")
    # TODO:  For the 3D analysis, re-optimize the background norm in each energy
    #  bin. For now, this is not possible from the HLI.
    analysis.get_flux_points(source=tag)
    flux_points = analysis.flux_points.data
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = [
        "e_ref",
        "e_min",
        "e_max",
        "dnde",
        "dnde_errp",
        "dnde_errn",
        "is_ul",
        "dnde_ul",
    ]
    log.info(f"Writing {path_res}")
    flux_points.table_formatted[keys].write(path_res /
                                            f"flux-points-{method}.ecsv",
                                            format="ascii.ecsv")
Пример #7
0
)

sky_model = SkyModel(spatial_model=spatial_model,
                     spectral_model=spectral_model,
                     name="crab")
# Now we freeze these parameters that we don't want the light curve estimator to change
sky_model.parameters["index"].frozen = True
sky_model.parameters["lon_0"].frozen = True
sky_model.parameters["lat_0"].frozen = True

# We assign them the model to be fitted to each dataset

# In[ ]:

models = SkyModels([sky_model])
analysis_3d.set_models(models)

# ## Light Curve estimation: by observation
#
# We can now create the light curve estimator.
#
# We pass it the list of datasets and the name of the model component for which we want to build the light curve.
# We can optionally ask for parameters reoptimization during fit, that is most of the time to fit background normalization in each time bin.
#
# If we don't set any time interval, the `~gammapy.time.LightCurveEstimator` is determines the flux of each dataset and places it at the corresponding time in the light curve.
# Here one dataset equals to one observing run.

# In[ ]:

lc_maker_3d = LightCurveEstimator(analysis_3d.datasets,
                                  source="crab",
Пример #8
0
def run_analyses(targets):
    log.info("Run small source extension check.")

    info = {}

    targets = list(AVAILABLE_TARGETS) if targets == "all-targets" else [
        targets
    ]

    for target in targets:
        t = time.time()

        config = AnalysisConfig.read(f"configs/config_{target}.yaml")
        analysis = Analysis(config)
        analysis.get_observations()
        info["data_preparation"] = time.time() - t

        t = time.time()

        analysis.get_datasets()
        info["data_reduction"] = time.time() - t

        models = Models.read(f"models/model_{target}.yaml")

        point_models = Models(define_model_pointlike(models[0]))
        analysis.set_models(point_models)

        t = time.time()
        analysis.run_fit()

        info["point_model_fitting"] = time.time() - t
        log.info(f"\n{point_models.to_parameters_table()}")

        log.info("Fitting extended gaussian source.")

        analysis.datasets.models = []
        analysis.set_models(models)
        t = time.time()

        analysis.run_fit()

        info["gauss_model_fitting"] = time.time() - t

        log.info(analysis.fit_result)

        log.info(f"\n{models.to_parameters_table()}")

        log.info("Extract size error, UL and stat profile.")

        t = time.time()
        analysis.models[0].spatial_model.lon_0.frozen = True
        analysis.models[0].spatial_model.lat_0.frozen = True
        analysis.models[0].spectral_model.index.frozen = True

        size_est = ExtensionEstimator(
            source=models[0].name,
            energy_edges=[0.2, 10.0] * u.TeV,
            selection_optional=["errn-errp", "ul", "scan"],
            size_min="0.08 deg",
            size_max="0.12 deg",
            size_n_values=20,
            reoptimize=True)
        res = size_est.run(analysis.datasets)

        info["estimator"] = time.time() - t
        t = time.time()

        log.info(res)
        plot_profile(res[0], target)

        Path(f"bench_{target}.yaml").write_text(
            yaml.dump(info, sort_keys=False, indent=4))
        analysis.models.to_parameters_table().write(
            f"results/{target}_results.ecsv", overwrite=True)