Exemplo n.º 1
0
def test_validation_checks():
    config = AnalysisConfig()
    analysis = Analysis(config)
    analysis.settings["observations"]["datastore"] = "other"
    with pytest.raises(FileNotFoundError):
        analysis.get_observations()

    config = AnalysisConfig.from_template("1d")
    analysis = Analysis(config)
    assert analysis.get_flux_points() is False
    assert analysis.run_fit() is False
    assert analysis.set_model() is False
    assert analysis.get_datasets() is False

    analysis.get_observations()
    analysis.settings["datasets"]["dataset-type"] = "not assigned"
    assert analysis.get_datasets() is False

    analysis.settings["datasets"]["dataset-type"] = "SpectrumDatasetOnOff"
    analysis.get_observations()
    analysis.get_datasets()
    model_str = Path(MODEL_FILE).read_text()
    analysis.set_model(model=model_str)
    assert isinstance(analysis.model, SkyModels) is True
    assert analysis.set_model() is False

    analysis.run_fit()
    del analysis.settings["flux-points"]
    assert analysis.get_flux_points() is False
Exemplo n.º 2
0
def test_analysis_1d_stacked():
    cfg = """
    datasets:
        geom:
            axes:
                energy_true: {min: 0.03 TeV, max: 100 TeV, nbins: 50}
        background:
            method: reflected
    """

    config = get_example_config("1d")
    analysis = Analysis(config)
    analysis.update_config(cfg)
    analysis.config.datasets.stack = True
    analysis.get_observations()
    analysis.get_datasets()
    analysis.read_models(MODEL_FILE_1D)
    analysis.run_fit()

    assert len(analysis.datasets) == 1
    assert_allclose(analysis.datasets["stacked"].counts.data.sum(), 184)
    pars = analysis.fit_result.parameters

    assert_allclose(pars["index"].value, 2.76913, rtol=1e-2)
    assert_allclose(pars["amplitude"].value, 5.496388e-11, rtol=1e-2)
Exemplo n.º 3
0
def test_analysis_1d():
    cfg = """
    observations:
        datastore: $GAMMAPY_DATA/hess-dl3-dr1
        obs_ids: [23523, 23526]
        obs_time: {
            start: [J2004.92654346, J2004.92658453, J2004.92663655],
            stop: [J2004.92658453, J2004.92663655, J2004.92670773]
        }
    datasets:
        type: 1d
        background:
            method: reflected
        geom:
            axes:
                energy_true: {min: 0.01 TeV, max: 300 TeV, nbins: 109}
        on_region: {frame: icrs, lon: 83.633 deg, lat: 22.014 deg, radius: 0.11 deg}
        safe_mask:
            methods: [aeff-default, edisp-bias]
            parameters: {bias_percent: 10.0}
        containment_correction: false
    flux_points:
        energy: {min: 1 TeV, max: 50 TeV, nbins: 4}
    light_curve:
        energy_edges: {min: 1 TeV, max: 50 TeV, nbins: 1}
        time_intervals: {
            start: [J2004.92654346, J2004.92658453, J2004.92663655],
            stop: [J2004.92658453, J2004.92663655, J2004.92670773]
        }
    """
    config = get_example_config("1d")
    analysis = Analysis(config)
    analysis.update_config(cfg)
    analysis.get_observations()
    analysis.get_datasets()
    analysis.read_models(MODEL_FILE_1D)
    analysis.run_fit()
    analysis.get_flux_points()
    analysis.get_light_curve()

    assert len(analysis.datasets) == 3
    table = analysis.flux_points.data.to_table(sed_type="dnde")

    assert len(table) == 4
    dnde = table["dnde"].quantity
    assert dnde.unit == "cm-2 s-1 TeV-1"

    assert_allclose(dnde[0].value, 8.116854e-12, rtol=1e-2)
    assert_allclose(dnde[2].value, 3.444475e-14, rtol=1e-2)

    axis = analysis.light_curve.geom.axes["time"]
    assert axis.nbin == 3
    assert_allclose(axis.time_min.mjd, [53343.92, 53343.935, 53343.954])

    flux = analysis.light_curve.flux.data[:, :, 0, 0]
    assert_allclose(flux, [[1.688954e-11], [2.347870e-11], [1.604152e-11]],
                    rtol=1e-4)
Exemplo n.º 4
0
def main(config_path, models_path, output, reference):
    config = AnalysisConfig.read(config_path)
    analysis = Analysis(config)
    log.info(config)

    analysis.get_observations()
    log.info(analysis)
    log.info(dir(analysis))
    log.info(analysis.datasets)
    log.info(analysis.datasets[0].counts)
    analysis.get_datasets()
    analysis.read_models(models_path)

    # stacked fit and flux estimation
    analysis.run_fit()
    analysis.get_flux_points()

    # Plot flux points
    ax_sed, ax_residuals = analysis.flux_points.plot_fit()
    if reference:
        plot_kwargs = {
            "energy_range": [
                analysis.config.flux_points.energy.min,
                analysis.config.flux_points.energy.max,
            ],
            "energy_power": 2,
            "flux_unit": "erg-1 cm-2 s-1",
        }
        create_crab_spectral_model(reference).plot(
            **plot_kwargs, ax=ax_sed, label="Crab reference"
        )
        ax_sed.legend()
        ax_sed.set_ylim(1e-12, 1e-9)
    

    base_out = Path(output)
    ax_sed.get_figure().savefig(base_out.with_suffix(".pdf").as_posix())
    plt.clf()
    analysis.models.write(base_out.with_suffix(".yaml").as_posix(), overwrite=True)
    analysis.flux_points.write(
        base_out.with_suffix(".fits").as_posix(), overwrite=True
    )
    ax_excess = analysis.datasets["stacked"].plot_excess()
    ax_excess.get_figure().savefig(base_out.with_suffix(".excess.pdf").as_posix())
    plt.clf()
        
    config.datasets.stack = False
    analysis.get_observations()
    analysis.get_datasets()
    analysis.read_models(models_path)
    lc_maker_low = LightCurveEstimator(
        energy_edges=[.2, 5] * u.TeV, source=config.flux_points.source, reoptimize=False
    )
    lc_low = lc_maker_low.run(analysis.datasets)
    ax_lc = lc_low.plot(marker="o", label="1D")
    ax_lc.get_figure().savefig(base_out.with_suffix(".lc.pdf").as_posix())
    plt.clf()
Exemplo n.º 5
0
def test_usage_errors():
    config = get_example_config("1d")
    analysis = Analysis(config)
    with pytest.raises(RuntimeError):
        analysis.get_datasets()
    with pytest.raises(RuntimeError):
        analysis.read_models(MODEL_FILE)
    with pytest.raises(RuntimeError):
        analysis.run_fit()
    with pytest.raises(RuntimeError):
        analysis.get_flux_points()
Exemplo n.º 6
0
def run_analysis(estimate):
    """Run analysis from observation selection to model fitting."""
    config = AnalysisConfig.read(f"{estimate}/config.yaml")
    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()

    models = Models.read(f"{estimate}/models.yaml")
    analysis.set_models(models)
    analysis.run_fit()
    return analysis
Exemplo n.º 7
0
def run_3d(name):
    """Run 3D analysis for one source."""
    logging.info(f"run3d: {name}")
    mode = "3d"
    config_file = f"config{mode}.yaml"
    target_config_file = f"targets.yaml"
    model_file = f"model{mode}_{name}.yaml"

    outdir = f"results/{name}"

    config = target_config3d(config_file, target_config_file, name)
    analysis = Analysis(config)
    analysis.get_observations()

    conf = config.settings["observations"]["filters"][0]
    nb, lon, lat, rad = (
        len(analysis.observations.ids),
        conf["lon"],
        conf["lat"],
        conf["radius"],
    )
    logging.info(f"{nb} observations found in {rad} around {lon}, {lat} ")

    analysis.get_datasets()

    # test
    plt.figure(figsize=(5, 5))
    analysis.datasets["stacked"].counts.sum_over_axes().plot(add_cbar=True)
    plt.savefig(f"{outdir}/{name}_{mode}_counts.png", bbox_inches="tight")

    analysis.set_model(filename=model_file)
    logging.info(analysis.model)
    analysis.run_fit()
    logging.info(analysis.fit_result.parameters.to_table())
    analysis.fit_result.parameters.to_table().write(
        f"{outdir}/{name}_{mode}_bestfit.dat", format="ascii", overwrite=True)

    analysis.get_flux_points(source=f"{name}")
    analysis.flux_points.write(f"{outdir}/{name}_{mode}_fluxpoints.fits")

    plt.figure(figsize=(5, 5))
    analysis.datasets["stacked"].counts.sum_over_axes().plot(add_cbar=True)
    plt.savefig(f"{outdir}/{name}_{mode}_counts.png", bbox_inches="tight")

    plt.figure(figsize=(5, 5))
    analysis.datasets["stacked"].plot_residuals(method="diff/sqrt(model)",
                                                vmin=-0.5,
                                                vmax=0.5)
    plt.savefig(f"{outdir}/{name}_{mode}_residuals.png", bbox_inches="tight")

    plt.figure(figsize=(8, 5))
    ax_sed, ax_residuals = analysis.flux_points.peek()
    plt.savefig(f"{outdir}/{name}_{mode}_fluxpoints.png", bbox_inches="tight")
Exemplo n.º 8
0
def test_analysis_1d_stacked():
    config = AnalysisConfig.from_template("1d")
    analysis = Analysis(config)
    analysis.settings["datasets"]["stack-datasets"] = True
    analysis.get_observations()
    analysis.get_datasets()
    analysis.set_model(filename=MODEL_FILE)
    analysis.run_fit()

    assert len(analysis.datasets) == 1
    assert_allclose(analysis.datasets["stacked"].counts.data.sum(), 404)
    pars = analysis.fit_result.parameters

    assert_allclose(pars["index"].value, 2.689559, rtol=1e-3)
    assert_allclose(pars["amplitude"].value, 2.81629e-11, rtol=1e-3)
Exemplo n.º 9
0
def test_analysis_1d(config_analysis_data):
    config = AnalysisConfig.from_template("1d")
    analysis = Analysis(config)
    analysis.config.update_settings(config_analysis_data)
    analysis.get_observations()
    analysis.get_datasets()
    analysis.set_model(filename=MODEL_FILE)
    analysis.run_fit()
    analysis.get_flux_points()

    assert len(analysis.datasets) == 2
    assert len(analysis.flux_points.data.table) == 4
    dnde = analysis.flux_points.data.table["dnde"].quantity
    assert dnde.unit == "cm-2 s-1 TeV-1"

    assert_allclose(dnde[0].value, 8.03604e-12, rtol=1e-2)
    assert_allclose(dnde[-1].value, 4.780021e-21, rtol=1e-2)
Exemplo n.º 10
0
def test_usage_errors():
    config = get_example_config("1d")
    analysis = Analysis(config)
    with pytest.raises(RuntimeError):
        analysis.get_datasets()
    with pytest.raises(RuntimeError):
        analysis.read_datasets()
    with pytest.raises(RuntimeError):
        analysis.write_datasets()
    with pytest.raises(TypeError):
        analysis.read_models()
    with pytest.raises(RuntimeError):
        analysis.write_models()
    with pytest.raises(RuntimeError):
        analysis.run_fit()
    with pytest.raises(RuntimeError):
        analysis.get_flux_points()
    with pytest.raises(ValidationError):
        analysis.config.datasets.type = "None"
Exemplo n.º 11
0
def analysis_3d_data_reduction(target):
    log.info(f"analysis_3d_data_reduction: {target}")

    opts = yaml.safe_load(open("targets.yaml"))[target]

    txt = Path("config_template.yaml").read_text()
    txt = txt.format_map(opts)
    config = AnalysisConfig.from_yaml(txt)

    analysis = Analysis(config)
    analysis.get_observations()
    log.info("Running data reduction")
    analysis.get_datasets()

    # TODO: write datasets and separate fitting to next function
    # Not implemented in Gammapy yet, coming very soon.
    log.info("Running fit ...")
    analysis.read_models(f"{target}/model_3d.yaml")
    logging.info(analysis.models)
    analysis.run_fit()
    logging.info(analysis.fit_result.parameters.to_table())
    path = f"{target}/{target}_3d_bestfit.rst"
    log.info(f"Writing {path}")
    analysis.fit_result.parameters.to_table().write(path,
                                                    format="ascii.rst",
                                                    overwrite=True)

    #    analysis.get_flux_points(source=f"{target}")
    #    path = f"{target}/{target}_3d_fluxpoints.fits"
    #    log.info(f"Writing {path}")
    #    analysis.flux_points.write(path, overwrite=True)

    analysis.get_flux_points(source=f"{target}")
    path = f"{target}/{target}_3d_fluxpoints.ecsv"
    log.info(f"Writing {path}")
    keys = [
        "e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn", "is_ul"
    ]
    analysis.flux_points.data.table_formatted[keys].write(path,
                                                          format="ascii.ecsv",
                                                          overwrite=True)

    return analysis  # will write to disk when possible
Exemplo n.º 12
0
def test_analysis_3d():
    config = AnalysisConfig.from_template("3d")
    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()
    analysis.set_model(filename=MODEL_FILE)
    analysis.datasets["stacked"].background_model.tilt.frozen = False
    analysis.run_fit()
    analysis.get_flux_points()

    assert len(analysis.datasets) == 1
    assert len(analysis.fit_result.parameters) == 8
    res = analysis.fit_result.parameters
    assert res[3].unit == "cm-2 s-1 TeV-1"
    assert len(analysis.flux_points.data.table) == 2
    dnde = analysis.flux_points.data.table["dnde"].quantity

    assert_allclose(dnde[0].value, 1.182768e-11, rtol=1e-1)
    assert_allclose(dnde[-1].value, 4.051367e-13, rtol=1e-1)
    assert_allclose(res["index"].value, 2.76607, rtol=1e-1)
    assert_allclose(res["tilt"].value, -0.143204, rtol=1e-1)
Exemplo n.º 13
0
def test_analysis_3d():
    config = get_example_config("3d")
    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()
    analysis.read_models(MODEL_FILE)
    analysis.datasets["stacked"].background_model.spectral_model.tilt.frozen = False
    analysis.run_fit()
    analysis.get_flux_points()

    assert len(analysis.datasets) == 1
    assert len(analysis.fit_result.parameters) == 8
    res = analysis.fit_result.parameters
    assert res["amplitude"].unit == "cm-2 s-1 TeV-1"
    assert len(analysis.flux_points.data.table) == 2
    dnde = analysis.flux_points.data.table["dnde"].quantity

    assert_allclose(dnde[0].value, 1.376879e-11, rtol=1e-2)
    assert_allclose(dnde[-1].value, 2.691466e-13, rtol=1e-2)
    assert_allclose(res["index"].value, 3.097613, rtol=1e-2)
    assert_allclose(res["tilt"].value, -0.207792, rtol=1e-2)
Exemplo n.º 14
0
def test_analysis_1d_stacked_no_fit_range():
    cfg = """
    observations:
        datastore: $GAMMAPY_DATA/hess-dl3-dr1
        obs_cone: {frame: icrs, lon: 83.633 deg, lat: 22.014 deg, radius: 5 deg}
        obs_ids: [23592, 23559]

    datasets:
        type: 1d
        stack: false
        geom:
            axes:
                energy: {min: 0.01 TeV, max: 100 TeV, nbins: 73}
                energy_true: {min: 0.03 TeV, max: 100 TeV, nbins: 50}
        on_region: {frame: icrs, lon: 83.633 deg, lat: 22.014 deg, radius: 0.1 deg}
        containment_correction: true
        background:
            method: reflected
    """
    config = AnalysisConfig.from_yaml(cfg)
    analysis = Analysis(config)
    analysis.update_config(cfg)
    analysis.config.datasets.stack = True
    analysis.get_observations()
    analysis.get_datasets()
    analysis.read_models(MODEL_FILE_1D)
    analysis.run_fit()
    with pytest.raises(ValueError):
        analysis.get_excess_map()

    assert len(analysis.datasets) == 1
    assert_allclose(analysis.datasets["stacked"].counts.data.sum(), 184)
    pars = analysis.models.parameters
    assert_allclose(analysis.datasets[0].mask_fit.data, True)

    assert_allclose(pars["index"].value, 2.76913, rtol=1e-2)
    assert_allclose(pars["amplitude"].value, 5.479729e-11, rtol=1e-2)
Exemplo n.º 15
0
def test_analysis_1d():
    cfg = """
    observations:
        datastore: $GAMMAPY_DATA/hess-dl3-dr1
        obs_ids: [23523, 23526]
    datasets:
        type: 1d
        background:
            method: reflected
        geom:
            axes:
                energy_true: {min: 0.01 TeV, max: 300 TeV, nbins: 109}
        on_region: {frame: icrs, lon: 83.633 deg, lat: 22.014 deg, radius: 0.11 deg}
        safe_mask:
            methods: [aeff-default, edisp-bias]
            parameters: {bias_percent: 10.0}
        containment_correction: false
    flux_points:
        energy: {min: 1 TeV, max: 50 TeV, nbins: 4}
    """
    config = get_example_config("1d")
    analysis = Analysis(config)
    analysis.update_config(cfg)
    analysis.get_observations()
    analysis.get_datasets()
    analysis.read_models(MODEL_FILE_1D)
    analysis.run_fit()
    analysis.get_flux_points()

    assert len(analysis.datasets) == 2
    assert len(analysis.flux_points.data.table) == 4
    dnde = analysis.flux_points.data.table["dnde"].quantity
    assert dnde.unit == "cm-2 s-1 TeV-1"

    assert_allclose(dnde[0].value, 8.116854e-12, rtol=1e-2)
    assert_allclose(dnde[2].value, 3.547128e-14, rtol=1e-2)
Exemplo n.º 16
0
      value: 1.0
      unit: TeV
      frozen: true
"""

# Now we set the model on the analysis object:

# In[ ]:

analysis.set_models(model_config)

# Finally we run the fit:

# In[ ]:

analysis.run_fit()

# In[ ]:

print(analysis.fit_result)

# This is how we can write the model back to file again:

# In[ ]:

filename = path / "model-best-fit.yaml"
analysis.models.write(filename, overwrite=True)

# In[ ]:

get_ipython().system('cat analysis_1/model-best-fit.yaml')
Exemplo n.º 17
0
def run_analysis_3d(target_dict):
    """Run 3D analysis for the selected target"""
    tag = target_dict["tag"]
    name = target_dict["name"]
    log.info(f"running 3d analysis, {tag}")

    path_res = Path(tag + "/results/")

    ra = target_dict["ra"]
    dec = target_dict["dec"]
    e_decorr = target_dict["e_decorr"]

    config_str = f"""
    general:
        logging:
            level: INFO
        outdir: .

    observations:
        datastore: $GAMMAPY_DATA/hess-dl3-dr1/
        filters:
            - filter_type: par_value
              value_param: {name}
              variable: TARGET_NAME

    datasets:
        dataset-type: MapDataset
        stack-datasets: true
        offset-max: 2.5 deg
        geom:
            skydir: [{ra}, {dec}]
            width: [5, 5]
            binsz: 0.02
            coordsys: CEL
            proj: TAN
            axes:
              - name: energy
                hi_bnd: 100
                lo_bnd: 0.1
                nbin: 24
                interp: log
                node_type: edges
                unit: TeV
        energy-axis-true:
            name: energy
            hi_bnd: 100
            lo_bnd: 0.1
            nbin: 72
            interp: log
            node_type: edges
            unit: TeV
    """
    print(config_str)
    config = AnalysisConfig(config_str)

    #  Observation selection
    analysis = Analysis(config)
    analysis.get_observations()

    if DEBUG is True:
        analysis.observations.list = [analysis.observations.list[0]]

    # Data reduction
    analysis.get_datasets()

    # Set runwise energy threshold. See reference paper, section 5.1.1.
    for dataset in analysis.datasets:
        # energy threshold given by the 10% edisp criterium
        e_thr_bias = dataset.edisp.get_bias_energy(0.1)

        # energy at which the background peaks
        background_model = dataset.background_model
        bkg_spectrum = background_model.map.get_spectrum()
        peak = bkg_spectrum.data.max()
        idx = list(bkg_spectrum.data).index(peak)
        e_thr_bkg = bkg_spectrum.energy.center[idx]

        esafe = max(e_thr_bias, e_thr_bkg)
        dataset.mask_fit = dataset.counts.geom.energy_mask(emin=esafe)

    # Model fitting
    spatial_model = target_dict["spatial_model"]
    model_config = f"""
    components:
        - name: {tag}
          type: SkyModel
          spatial:
            type: {spatial_model}
            frame: icrs
            parameters:
            - name: lon_0
              value: {ra}
              unit: deg
            - name: lat_0 
              value: {dec}    
              unit: deg
          spectral:
            type: PowerLawSpectralModel
            parameters:
            - name: amplitude      
              value: 1.0e-12
              unit: cm-2 s-1 TeV-1
            - name: index
              value: 2.0
              unit: ''
            - name: reference
              value: {e_decorr}
              unit: TeV
              frozen: true
    """
    model_npars = 5
    if spatial_model == "DiskSpatialModel":
        model_config = yaml.load(model_config)
        parameters = model_config["components"][0]["spatial"]["parameters"]
        parameters.append(
            {
                "name": "r_0",
                "value": 0.2,
                "unit": "deg",
                "frozen": False
            }
        )
        parameters.append(
            {
                "name": "e",
                "value": 0.8,
                "unit": "",
                "frozen": False
            }
        )
        parameters.append(
            {
                "name": "phi",
                "value": 150,
                "unit": "deg",
                "frozen": False
            }
        )
        parameters.append(
            {
                "name": "edge",
                "value": 0.01,
                "unit": "deg",
                "frozen": True
            }
        )
        model_npars += 4
    analysis.set_model(model=model_config)

    for dataset in analysis.datasets:
        dataset.background_model.norm.frozen = False

    analysis.run_fit()

    parameters = analysis.model.parameters
    parameters.covariance = analysis.fit_result.parameters.covariance[0:model_npars, 0:model_npars]
    write_fit_summary(parameters, str(path_res / "results-summary-fit-3d.yaml"))

    # Flux points
    # TODO: This is a workaround to re-optimize the bkg in each energy bin. Add has to be added to the Analysis class
    datasets = analysis.datasets.copy()
    for dataset in datasets:
        for par in dataset.parameters:
            if par is not dataset.background_model.norm:
                par.frozen = True

    reoptimize = True if DEBUG is False else False
    fpe = FluxPointsEstimator(
        datasets=datasets, e_edges=FLUXP_EDGES, source=tag, reoptimize=reoptimize
    )

    flux_points = fpe.run()
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = ["e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn"]
    flux_points.table_formatted[keys].write(
        path_res / "flux-points-3d.ecsv", format="ascii.ecsv"
    )
Exemplo n.º 18
0
def run_analysis(method, target_dict, debug, skip_flux_points):
    """If the method is "1d", runs joint spectral analysis for the selected target. If
    instead it is "3d", runs stacked 3D analysis."""
    tag = target_dict["tag"]
    log.info(f"Running {method} analysis, {tag}")
    path_res = Path(tag + "/results/")

    log.info("Reading config")
    txt = Path(f"config_{method}.yaml").read_text()
    txt = txt.format_map(target_dict)
    config = AnalysisConfig.from_yaml(txt)
    if debug:
        config.observations.obs_ids = [target_dict["debug_run"]]
        config.flux_points.energy.nbins = 1
        if method == "3d":
            config.datasets.geom.axes.energy_true.nbins = 10
    analysis = Analysis(config)

    log.info("Running observations selection")
    analysis.get_observations()

    log.info(f"Running data reduction")
    analysis.get_datasets()

    log.info(f"Setting the model")
    txt = Path("model_config.yaml").read_text()
    txt = txt.format_map(target_dict)
    analysis.set_models(txt)
    if method == "3d":
        analysis.datasets[
            0].background_model.spectral_model.norm.frozen = False
        analysis.datasets[
            0].background_model.spectral_model.tilt.frozen = False

        # Impose min and max values to ensure position does not diverge
        delta = 1.5
        lon = analysis.models[0].spatial_model.lon_0.value
        lat = analysis.models[0].spatial_model.lat_0.value
        analysis.models[0].spatial_model.lat_0.min = lat - delta
        analysis.models[0].spatial_model.lat_0.max = lat + delta
        analysis.models[0].spatial_model.lon_0.min = lon - delta
        analysis.models[0].spatial_model.lon_0.max = lon + delta

        if target_dict["spatial_model"] == "DiskSpatialModel":
            analysis.models[0].spatial_model.e.frozen = False
            analysis.models[0].spatial_model.phi.frozen = False
            analysis.models[0].spatial_model.r_0.value = 0.3
    log.info(f"Running fit ...")
    analysis.run_fit(optimize_opts={"print_level": 3})

    # TODO: This is a workaround. Set covariance automatically
    log.info(f"Writing {path_res}")
    write_fit_summary(analysis.models[0].parameters,
                      str(path_res / f"result-{method}.yaml"))

    if not skip_flux_points:
        log.info(f"Running flux points estimation")
        # Freeze all parameters except the backround norm
        if method == "3d":
            dataset = analysis.datasets[0]
            for parameter in dataset.models.parameters:
                if parameter is not dataset.background_model.spectral_model.norm:
                    parameter.frozen = True
        analysis.get_flux_points()
        flux_points = analysis.flux_points.data
        flux_points.table["is_ul"] = flux_points.table["ts"] < 4
        keys = [
            "e_ref",
            "e_min",
            "e_max",
            "dnde",
            "dnde_errp",
            "dnde_errn",
            "is_ul",
            "dnde_ul",
        ]
        log.info(f"Writing {path_res}")
        flux_points.table_formatted[keys].write(path_res /
                                                f"flux-points-{method}.ecsv",
                                                format="ascii.ecsv")
Exemplo n.º 19
0
def run_analysis(method, target_dict, debug):
    """If the method is "1d", runs joint spectral analysis for the selected target. If
    instead it is "3d", runs stacked 3D analysis."""
    tag = target_dict["tag"]
    log.info(f"Running {method} analysis, {tag}")
    path_res = Path(tag + "/results/")

    log.info("Reading config")
    txt = Path(f"config_{method}.yaml").read_text()
    txt = txt.format_map(target_dict)
    config = AnalysisConfig.from_yaml(txt)
    if debug:
        config.observations.obs_ids = [target_dict["debug_run"]]
        config.flux_points.energy.nbins = 1
        if method == "3d":
            config.datasets.geom.axes.energy_true.nbins = 10
    analysis = Analysis(config)

    log.info("Running observations selection")
    analysis.get_observations()

    log.info(f"Running data reduction")
    analysis.get_datasets()

    # TODO: This is a workaround. We should somehow apply the safe mask (run by run) from the HLI
    from gammapy.cube import SafeMaskMaker
    datasets = []
    maker_safe_mask = SafeMaskMaker(methods=["edisp-bias", "bkg-peak"],
                                    bias_percent=10)
    for dataset in analysis.datasets:
        dataset = maker_safe_mask.run(dataset)
        datasets.append(dataset)
    analysis.datasets = datasets

    log.info(f"Setting the model")
    txt = Path("model_config.yaml").read_text()
    txt = txt.format_map(target_dict)
    log.info(txt)
    analysis.set_models(txt)
    if method == "3d" and target_dict["spatial_model"] == "DiskSpatialModel":
        analysis.models[0].spatial_model.e.frozen = False
        analysis.models[0].spatial_model.phi.frozen = False
        analysis.models[0].spatial_model.r_0.value = 0.3

    log.info(f"Running fit ...")
    analysis.run_fit()

    # TODO: This is a workaround. Set covariance automatically
    results = analysis.fit_result
    names = ["spectral_model", "spatial_model"]
    for name in names:
        if name == "spatial_model" and method == "1d":
            continue
        model = getattr(analysis.models[0], name)
        model.parameters.covariance = results.parameters.get_subcovariance(
            model.parameters.names)

    log.info(f"Writing {path_res}")
    write_fit_summary(analysis.models[0].parameters,
                      str(path_res / f"results-summary-fit-{method}.yaml"))

    log.info(f"Running flux points estimation")
    # TODO:  For the 3D analysis, re-optimize the background norm in each energy
    #  bin. For now, this is not possible from the HLI.
    analysis.get_flux_points(source=tag)
    flux_points = analysis.flux_points.data
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = [
        "e_ref",
        "e_min",
        "e_max",
        "dnde",
        "dnde_errp",
        "dnde_errn",
        "is_ul",
        "dnde_ul",
    ]
    log.info(f"Writing {path_res}")
    flux_points.table_formatted[keys].write(path_res /
                                            f"flux-points-{method}.ecsv",
                                            format="ascii.ecsv")
Exemplo n.º 20
0
def run_analyses(targets):
    log.info("Run small source extension check.")

    info = {}

    targets = list(AVAILABLE_TARGETS) if targets == "all-targets" else [
        targets
    ]

    for target in targets:
        t = time.time()

        config = AnalysisConfig.read(f"configs/config_{target}.yaml")
        analysis = Analysis(config)
        analysis.get_observations()
        info["data_preparation"] = time.time() - t

        t = time.time()

        analysis.get_datasets()
        info["data_reduction"] = time.time() - t

        models = Models.read(f"models/model_{target}.yaml")

        point_models = Models(define_model_pointlike(models[0]))
        analysis.set_models(point_models)

        t = time.time()
        analysis.run_fit()

        info["point_model_fitting"] = time.time() - t
        log.info(f"\n{point_models.to_parameters_table()}")

        log.info("Fitting extended gaussian source.")

        analysis.datasets.models = []
        analysis.set_models(models)
        t = time.time()

        analysis.run_fit()

        info["gauss_model_fitting"] = time.time() - t

        log.info(analysis.fit_result)

        log.info(f"\n{models.to_parameters_table()}")

        log.info("Extract size error, UL and stat profile.")

        t = time.time()
        analysis.models[0].spatial_model.lon_0.frozen = True
        analysis.models[0].spatial_model.lat_0.frozen = True
        analysis.models[0].spectral_model.index.frozen = True

        size_est = ExtensionEstimator(
            source=models[0].name,
            energy_edges=[0.2, 10.0] * u.TeV,
            selection_optional=["errn-errp", "ul", "scan"],
            size_min="0.08 deg",
            size_max="0.12 deg",
            size_n_values=20,
            reoptimize=True)
        res = size_est.run(analysis.datasets)

        info["estimator"] = time.time() - t
        t = time.time()

        log.info(res)
        plot_profile(res[0], target)

        Path(f"bench_{target}.yaml").write_text(
            yaml.dump(info, sort_keys=False, indent=4))
        analysis.models.to_parameters_table().write(
            f"results/{target}_results.ecsv", overwrite=True)