コード例 #1
0
def target_config3d(config_file, target_config_file, tag):
    """Create analyis configuration for out source."""
    targets_config_ = yaml.safe_load(open(target_config_file))
    targets_config = {}
    for conf in targets_config_:  # define tag as key
        targets_config[conf["tag"]] = conf

    config = AnalysisConfig.from_yaml(config_file)
    config_dict = config.settings

    config_dict["observations"]["filters"][0]["frame"] = targets_config[tag][
        "frame"]
    config_dict["observations"]["filters"][0]["lon"] = targets_config[tag][
        "lon"]
    config_dict["observations"]["filters"][0]["lat"] = targets_config[tag][
        "lat"]
    config_dict["observations"]["filters"][0]["radius"] = targets_config[tag][
        "radius"]
    config_dict["observations"]["filters"][0]["border"] = targets_config[tag][
        "radius"]

    config_dict["datasets"]["geom"]["skydir"] = [
        float(targets_config[tag]["lon"].strip(" deg")),
        float(targets_config[tag]["lat"].strip(" deg")),
    ]
    config_dict["datasets"]["geom"]["axes"][0]["lo_bnd"] = targets_config[tag][
        "emin"]
    config_dict["datasets"]["geom"]["axes"][0]["hi_bnd"] = targets_config[tag][
        "emax"]
    config_dict["datasets"]["geom"]["axes"][0]["nbin"] = targets_config[tag][
        "nbin"]
    config_dict["datasets"]["geom"]["axes"][0]["nbin"] = targets_config[tag][
        "nbin"]

    config_dict["flux-points"]["fp_binning"]["lo_bnd"] = targets_config[tag][
        "emin"]
    config_dict["flux-points"]["fp_binning"]["hi_bnd"] = targets_config[tag][
        "emax"]
    config_dict["flux-points"]["fp_binning"]["nbin"] = targets_config[tag][
        "nbin"]

    config_dict["flux-points"]["fp_binning"]["lo_bnd"] = targets_config[tag][
        "emin"]
    config_dict["flux-points"]["fp_binning"]["hi_bnd"] = targets_config[tag][
        "emax"]
    config_dict["flux-points"]["fp_binning"]["nbin"] = targets_config[tag][
        "nbin"]

    config_dict["fit"]["fit_range"]["min"] = str(
        targets_config[tag]["emin"]) + " TeV"
    config_dict["fit"]["fit_range"]["max"] = str(
        targets_config[tag]["emax"]) + " TeV"

    config.update_settings(config=config_dict)

    return config
コード例 #2
0
def setup_analysis(target):
    log.info(f"analysis_3d_data_reduction: {target}")

    opts = yaml.safe_load(open("targets.yaml"))[target]
    txt = Path("config_template.yaml").read_text()
    print(opts)
    txt = txt.format_map(opts)

    config = AnalysisConfig.from_yaml(txt)
    config.flux_points.source = target
    config.datasets.safe_mask.parameters = {"offset_max": 5 * u.deg}

    return Analysis(config)
コード例 #3
0
def analysis_3d_data_reduction(target):
    log.info(f"analysis_3d_data_reduction: {target}")

    opts = yaml.safe_load(open("targets.yaml"))[target]

    txt = Path("config_template.yaml").read_text()
    txt = txt.format_map(opts)
    config = AnalysisConfig.from_yaml(txt)

    analysis = Analysis(config)
    analysis.get_observations()
    log.info("Running data reduction")
    analysis.get_datasets()

    # TODO: write datasets and separate fitting to next function
    # Not implemented in Gammapy yet, coming very soon.
    log.info("Running fit ...")
    analysis.read_models(f"{target}/model_3d.yaml")
    logging.info(analysis.models)
    analysis.run_fit()
    logging.info(analysis.fit_result.parameters.to_table())
    path = f"{target}/{target}_3d_bestfit.rst"
    log.info(f"Writing {path}")
    analysis.fit_result.parameters.to_table().write(path,
                                                    format="ascii.rst",
                                                    overwrite=True)

    #    analysis.get_flux_points(source=f"{target}")
    #    path = f"{target}/{target}_3d_fluxpoints.fits"
    #    log.info(f"Writing {path}")
    #    analysis.flux_points.write(path, overwrite=True)

    analysis.get_flux_points(source=f"{target}")
    path = f"{target}/{target}_3d_fluxpoints.ecsv"
    log.info(f"Writing {path}")
    keys = [
        "e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn", "is_ul"
    ]
    analysis.flux_points.data.table_formatted[keys].write(path,
                                                          format="ascii.ecsv",
                                                          overwrite=True)

    return analysis  # will write to disk when possible
コード例 #4
0
def test_analysis_1d_stacked_no_fit_range():
    cfg = """
    observations:
        datastore: $GAMMAPY_DATA/hess-dl3-dr1
        obs_cone: {frame: icrs, lon: 83.633 deg, lat: 22.014 deg, radius: 5 deg}
        obs_ids: [23592, 23559]

    datasets:
        type: 1d
        stack: false
        geom:
            axes:
                energy: {min: 0.01 TeV, max: 100 TeV, nbins: 73}
                energy_true: {min: 0.03 TeV, max: 100 TeV, nbins: 50}
        on_region: {frame: icrs, lon: 83.633 deg, lat: 22.014 deg, radius: 0.1 deg}
        containment_correction: true
        background:
            method: reflected
    """
    config = AnalysisConfig.from_yaml(cfg)
    analysis = Analysis(config)
    analysis.update_config(cfg)
    analysis.config.datasets.stack = True
    analysis.get_observations()
    analysis.get_datasets()
    analysis.read_models(MODEL_FILE_1D)
    analysis.run_fit()
    with pytest.raises(ValueError):
        analysis.get_excess_map()

    assert len(analysis.datasets) == 1
    assert_allclose(analysis.datasets["stacked"].counts.data.sum(), 184)
    pars = analysis.models.parameters
    assert_allclose(analysis.datasets[0].mask_fit.data, True)

    assert_allclose(pars["index"].value, 2.76913, rtol=1e-2)
    assert_allclose(pars["amplitude"].value, 5.479729e-11, rtol=1e-2)
コード例 #5
0
ファイル: make.py プロジェクト: adonath/gammapy-benchmarks
def run_analysis(method, target_dict, debug, skip_flux_points):
    """If the method is "1d", runs joint spectral analysis for the selected target. If
    instead it is "3d", runs stacked 3D analysis."""
    tag = target_dict["tag"]
    log.info(f"Running {method} analysis, {tag}")
    path_res = Path(tag + "/results/")

    log.info("Reading config")
    txt = Path(f"config_{method}.yaml").read_text()
    txt = txt.format_map(target_dict)
    config = AnalysisConfig.from_yaml(txt)
    if debug:
        config.observations.obs_ids = [target_dict["debug_run"]]
        config.flux_points.energy.nbins = 1
        if method == "3d":
            config.datasets.geom.axes.energy_true.nbins = 10
    analysis = Analysis(config)

    log.info("Running observations selection")
    analysis.get_observations()

    log.info(f"Running data reduction")
    analysis.get_datasets()

    log.info(f"Setting the model")
    txt = Path("model_config.yaml").read_text()
    txt = txt.format_map(target_dict)
    analysis.set_models(txt)
    if method == "3d":
        analysis.datasets[
            0].background_model.spectral_model.norm.frozen = False
        analysis.datasets[
            0].background_model.spectral_model.tilt.frozen = False

        # Impose min and max values to ensure position does not diverge
        delta = 1.5
        lon = analysis.models[0].spatial_model.lon_0.value
        lat = analysis.models[0].spatial_model.lat_0.value
        analysis.models[0].spatial_model.lat_0.min = lat - delta
        analysis.models[0].spatial_model.lat_0.max = lat + delta
        analysis.models[0].spatial_model.lon_0.min = lon - delta
        analysis.models[0].spatial_model.lon_0.max = lon + delta

        if target_dict["spatial_model"] == "DiskSpatialModel":
            analysis.models[0].spatial_model.e.frozen = False
            analysis.models[0].spatial_model.phi.frozen = False
            analysis.models[0].spatial_model.r_0.value = 0.3
    log.info(f"Running fit ...")
    analysis.run_fit(optimize_opts={"print_level": 3})

    # TODO: This is a workaround. Set covariance automatically
    log.info(f"Writing {path_res}")
    write_fit_summary(analysis.models[0].parameters,
                      str(path_res / f"result-{method}.yaml"))

    if not skip_flux_points:
        log.info(f"Running flux points estimation")
        # Freeze all parameters except the backround norm
        if method == "3d":
            dataset = analysis.datasets[0]
            for parameter in dataset.models.parameters:
                if parameter is not dataset.background_model.spectral_model.norm:
                    parameter.frozen = True
        analysis.get_flux_points()
        flux_points = analysis.flux_points.data
        flux_points.table["is_ul"] = flux_points.table["ts"] < 4
        keys = [
            "e_ref",
            "e_min",
            "e_max",
            "dnde",
            "dnde_errp",
            "dnde_errn",
            "is_ul",
            "dnde_ul",
        ]
        log.info(f"Writing {path_res}")
        flux_points.table_formatted[keys].write(path_res /
                                                f"flux-points-{method}.ecsv",
                                                format="ascii.ecsv")
コード例 #6
0
def run_analysis(method, target_dict, debug):
    """If the method is "1d", runs joint spectral analysis for the selected target. If
    instead it is "3d", runs stacked 3D analysis."""
    tag = target_dict["tag"]
    log.info(f"Running {method} analysis, {tag}")
    path_res = Path(tag + "/results/")

    log.info("Reading config")
    txt = Path(f"config_{method}.yaml").read_text()
    txt = txt.format_map(target_dict)
    config = AnalysisConfig.from_yaml(txt)
    if debug:
        config.observations.obs_ids = [target_dict["debug_run"]]
        config.flux_points.energy.nbins = 1
        if method == "3d":
            config.datasets.geom.axes.energy_true.nbins = 10
    analysis = Analysis(config)

    log.info("Running observations selection")
    analysis.get_observations()

    log.info(f"Running data reduction")
    analysis.get_datasets()

    # TODO: This is a workaround. We should somehow apply the safe mask (run by run) from the HLI
    from gammapy.cube import SafeMaskMaker
    datasets = []
    maker_safe_mask = SafeMaskMaker(methods=["edisp-bias", "bkg-peak"],
                                    bias_percent=10)
    for dataset in analysis.datasets:
        dataset = maker_safe_mask.run(dataset)
        datasets.append(dataset)
    analysis.datasets = datasets

    log.info(f"Setting the model")
    txt = Path("model_config.yaml").read_text()
    txt = txt.format_map(target_dict)
    log.info(txt)
    analysis.set_models(txt)
    if method == "3d" and target_dict["spatial_model"] == "DiskSpatialModel":
        analysis.models[0].spatial_model.e.frozen = False
        analysis.models[0].spatial_model.phi.frozen = False
        analysis.models[0].spatial_model.r_0.value = 0.3

    log.info(f"Running fit ...")
    analysis.run_fit()

    # TODO: This is a workaround. Set covariance automatically
    results = analysis.fit_result
    names = ["spectral_model", "spatial_model"]
    for name in names:
        if name == "spatial_model" and method == "1d":
            continue
        model = getattr(analysis.models[0], name)
        model.parameters.covariance = results.parameters.get_subcovariance(
            model.parameters.names)

    log.info(f"Writing {path_res}")
    write_fit_summary(analysis.models[0].parameters,
                      str(path_res / f"results-summary-fit-{method}.yaml"))

    log.info(f"Running flux points estimation")
    # TODO:  For the 3D analysis, re-optimize the background norm in each energy
    #  bin. For now, this is not possible from the HLI.
    analysis.get_flux_points(source=tag)
    flux_points = analysis.flux_points.data
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = [
        "e_ref",
        "e_min",
        "e_max",
        "dnde",
        "dnde_errp",
        "dnde_errn",
        "is_ul",
        "dnde_ul",
    ]
    log.info(f"Writing {path_res}")
    flux_points.table_formatted[keys].write(path_res /
                                            f"flux-points-{method}.ecsv",
                                            format="ascii.ecsv")
コード例 #7
0
def run_analysis_3d(target_dict, fluxp_edges, debug):
    """Run stacked 3D analysis for the selected target.

    Notice that, for the sake of time saving, we run a stacked analysis, as opposed
     to the joint analysis that is performed in the reference paper.
    """
    tag = target_dict["tag"]
    log.info(f"running 3d analysis, {tag}")

    path_res = Path(tag + "/results/")

    txt = Path("config_template.yaml").read_text()
    txt = txt.format_map(target_dict)
    config = AnalysisConfig.from_yaml(txt)

    log.info(f"Running observations selection")
    analysis = Analysis(config)
    analysis.get_observations()

    log.info(f"Running data reduction")
    analysis.get_datasets()

    # TODO: Improve safe mask handling in Analysis. the mask should be applied run-by-run
    maker_safe_mask = SafeMaskMaker(methods=["edisp-bias", "bkg-peak"])
    stacked = maker_safe_mask.run(analysis.datasets[0])

    log.info(f"Running fit ...")
    ra = target_dict["ra"]
    dec = target_dict["dec"]
    e_decorr = target_dict["e_decorr"]
    spectral_model = Model.create("PowerLawSpectralModel", reference=e_decorr)
    spatial_model = Model.create(target_dict["spatial_model"],
                                 lon_0=ra,
                                 lat_0=dec)
    if target_dict["spatial_model"] == "DiskSpatialModel":
        spatial_model.e.frozen = False
    sky_model = SkyModel(spatial_model=spatial_model,
                         spectral_model=spectral_model,
                         name=tag)

    stacked.models = sky_model
    stacked.background_model.norm.frozen = False
    fit = Fit([stacked])
    result = fit.run()

    parameters = stacked.models.parameters
    model_npars = len(sky_model.parameters.names)
    parameters.covariance = result.parameters.covariance[0:model_npars,
                                                         0:model_npars]
    log.info(f"Writing {path_res}")
    write_fit_summary(parameters,
                      str(path_res / "results-summary-fit-3d.yaml"))

    log.info("Running flux points estimation")
    # TODO: This is a workaround to re-optimize the bkg. Remove it once it's added to the Analysis class
    for par in stacked.parameters:
        if par is not stacked.background_model.norm:
            par.frozen = True

    reoptimize = True if debug is False else False
    fpe = FluxPointsEstimator(datasets=[stacked],
                              e_edges=fluxp_edges,
                              source=tag,
                              reoptimize=reoptimize)

    flux_points = fpe.run()
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = [
        "e_ref",
        "e_min",
        "e_max",
        "dnde",
        "dnde_errp",
        "dnde_errn",
        "is_ul",
        "dnde_ul",
    ]
    log.info(f"Writing {path_res}")
    flux_points.table_formatted[keys].write(path_res / "flux-points-3d.ecsv",
                                            format="ascii.ecsv")
コード例 #8
0
def test_docs_file():
    config = AnalysisConfig.from_yaml(DOC_FILE)
    assert config.validate() is None