コード例 #1
0
def data_reduction(instrument):
    log.info(f"data_reduction: {instrument}")
    config = AnalysisConfig.read(f"config.yaml")
    config.observations.datastore = f"$JOINT_CRAB/data/{instrument}"
    config.datasets.stack = instrument_opts[instrument]['stack']
    config.datasets.containment_correction = instrument_opts[instrument][
        'containment']
    config.datasets.on_region.radius = instrument_opts[instrument]['on_radius']

    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()

    # TODO remove when safe mask can be set on config
    if instrument is 'fact':
        from gammapy.datasets import SpectrumDatasetOnOff
        stacked = SpectrumDatasetOnOff.create(
            e_reco=analysis.datasets[0]._energy_axis.edges,
            e_true=analysis.datasets[0]._energy_axis.edges,
            region=None)
        for ds in analysis.datasets:
            ds.mask_safe[:] = True
            stacked.stack(ds)
        analysis.datasets = Datasets([stacked])

    analysis.datasets.write(f"reduced_{instrument}", overwrite=True)
コード例 #2
0
def cli_run_analysis(filename, out, overwrite):
    """Performs automated data reduction process."""
    config = AnalysisConfig.read(filename)
    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()
    analysis.datasets.write(out, overwrite=overwrite)
    log.info(f"Datasets stored in {out} folder.")
コード例 #3
0
def main(config_path, models_path, output, reference):
    config = AnalysisConfig.read(config_path)
    analysis = Analysis(config)
    log.info(config)

    analysis.get_observations()
    log.info(analysis)
    log.info(dir(analysis))
    log.info(analysis.datasets)
    log.info(analysis.datasets[0].counts)
    analysis.get_datasets()
    analysis.read_models(models_path)

    # stacked fit and flux estimation
    analysis.run_fit()
    analysis.get_flux_points()

    # Plot flux points
    ax_sed, ax_residuals = analysis.flux_points.plot_fit()
    if reference:
        plot_kwargs = {
            "energy_range": [
                analysis.config.flux_points.energy.min,
                analysis.config.flux_points.energy.max,
            ],
            "energy_power": 2,
            "flux_unit": "erg-1 cm-2 s-1",
        }
        create_crab_spectral_model(reference).plot(
            **plot_kwargs, ax=ax_sed, label="Crab reference"
        )
        ax_sed.legend()
        ax_sed.set_ylim(1e-12, 1e-9)
    

    base_out = Path(output)
    ax_sed.get_figure().savefig(base_out.with_suffix(".pdf").as_posix())
    plt.clf()
    analysis.models.write(base_out.with_suffix(".yaml").as_posix(), overwrite=True)
    analysis.flux_points.write(
        base_out.with_suffix(".fits").as_posix(), overwrite=True
    )
    ax_excess = analysis.datasets["stacked"].plot_excess()
    ax_excess.get_figure().savefig(base_out.with_suffix(".excess.pdf").as_posix())
    plt.clf()
        
    config.datasets.stack = False
    analysis.get_observations()
    analysis.get_datasets()
    analysis.read_models(models_path)
    lc_maker_low = LightCurveEstimator(
        energy_edges=[.2, 5] * u.TeV, source=config.flux_points.source, reoptimize=False
    )
    lc_low = lc_maker_low.run(analysis.datasets)
    ax_lc = lc_low.plot(marker="o", label="1D")
    ax_lc.get_figure().savefig(base_out.with_suffix(".lc.pdf").as_posix())
    plt.clf()
コード例 #4
0
ファイル: make.py プロジェクト: gammapy/gammapy-data
def run_analysis(estimate):
    """Run analysis from observation selection to model fitting."""
    config = AnalysisConfig.read(f"{estimate}/config.yaml")
    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()

    models = Models.read(f"{estimate}/models.yaml")
    analysis.set_models(models)
    analysis.run_fit()
    return analysis
コード例 #5
0
ファイル: make.py プロジェクト: adonath/gammapy-benchmarks
def data_reduction(instrument):
    log.info(f"data_reduction: {instrument}")
    config = AnalysisConfig.read(f"config.yaml")
    config.observations.datastore = str(Path().resolve().parent / "data" /
                                        "joint-crab" / instrument)
    config.datasets.stack = instrument_opts[instrument]["stack"]
    config.datasets.containment_correction = instrument_opts[instrument][
        "containment"]
    config.datasets.on_region.radius = instrument_opts[instrument]["on_radius"]

    if instrument == "fact":
        config.datasets.safe_mask.methods = ["aeff-default"]

    analysis = Analysis(config)
    analysis.get_observations()

    analysis.get_datasets()
    if instrument == "fact":
        counts = analysis.datasets[0].counts
        data = counts.geom.energy_mask(emin=0.4 * u.TeV)
        analysis.datasets[0].mask_safe = counts.copy(data=data)

    analysis.datasets.write(f"reduced_{instrument}", overwrite=True)
コード例 #6
0
config.flux_points.energy = {"min": "1 TeV", "max": "10 TeV", "nbins": 3}

# We're all set.
# But before we go on let's see how to save or import `AnalysisConfig` objects though YAML files.

# ### Using YAML configuration files
#
# One can export/import the `AnalysisConfig` to/from a YAML file.

# In[ ]:

config.write("config.yaml", overwrite=True)

# In[ ]:

config = AnalysisConfig.read("config.yaml")
print(config)

# ## Running the analysis
#
# We first create an `~gammapy.analysis.Analysis` object from our configuration.

# In[ ]:

analysis = Analysis(config)

# ###  Observation selection
#
# We can directly select and load the observations from disk using `~gammapy.analysis.Analysis.get_observations()`:

# In[ ]:
コード例 #7
0
def get_example_config(which):
    """Example config: which can be 1d or 3d."""
    return AnalysisConfig.read(CONFIG_PATH / f"example-{which}.yaml")
コード例 #8
0
path = Path("analysis_3d")
path.mkdir(exist_ok=True)
config_joint.write(path=path / "config_joint.yaml", overwrite=True)
config_stack.write(path=path / "config_stack.yaml", overwrite=True)

# ## Stacked analysis
#
# ### Data reduction
#
# We first show the steps for the stacked analysis and then repeat the same for the joint analysis later
#

# In[ ]:

# Reading yaml file:
config_stacked = AnalysisConfig.read(path=path / "config_stack.yaml")

# In[ ]:

analysis_stacked = Analysis(config_stacked)

# In[ ]:

get_ipython().run_cell_magic(
    'time', '',
    '# select observations:\nanalysis_stacked.get_observations()\n\n# run data reduction\nanalysis_stacked.get_datasets()'
)

# We have one final dataset, which you can print and explore

# In[ ]:
コード例 #9
0
ファイル: make.py プロジェクト: gammapy/gammapy-benchmarks
def run_analyses(targets):
    log.info("Run small source extension check.")

    info = {}

    targets = list(AVAILABLE_TARGETS) if targets == "all-targets" else [
        targets
    ]

    for target in targets:
        t = time.time()

        config = AnalysisConfig.read(f"configs/config_{target}.yaml")
        analysis = Analysis(config)
        analysis.get_observations()
        info["data_preparation"] = time.time() - t

        t = time.time()

        analysis.get_datasets()
        info["data_reduction"] = time.time() - t

        models = Models.read(f"models/model_{target}.yaml")

        point_models = Models(define_model_pointlike(models[0]))
        analysis.set_models(point_models)

        t = time.time()
        analysis.run_fit()

        info["point_model_fitting"] = time.time() - t
        log.info(f"\n{point_models.to_parameters_table()}")

        log.info("Fitting extended gaussian source.")

        analysis.datasets.models = []
        analysis.set_models(models)
        t = time.time()

        analysis.run_fit()

        info["gauss_model_fitting"] = time.time() - t

        log.info(analysis.fit_result)

        log.info(f"\n{models.to_parameters_table()}")

        log.info("Extract size error, UL and stat profile.")

        t = time.time()
        analysis.models[0].spatial_model.lon_0.frozen = True
        analysis.models[0].spatial_model.lat_0.frozen = True
        analysis.models[0].spectral_model.index.frozen = True

        size_est = ExtensionEstimator(
            source=models[0].name,
            energy_edges=[0.2, 10.0] * u.TeV,
            selection_optional=["errn-errp", "ul", "scan"],
            size_min="0.08 deg",
            size_max="0.12 deg",
            size_n_values=20,
            reoptimize=True)
        res = size_est.run(analysis.datasets)

        info["estimator"] = time.time() - t
        t = time.time()

        log.info(res)
        plot_profile(res[0], target)

        Path(f"bench_{target}.yaml").write_text(
            yaml.dump(info, sort_keys=False, indent=4))
        analysis.models.to_parameters_table().write(
            f"results/{target}_results.ecsv", overwrite=True)