Example #1
0
def plot_results(filename_model, obs_id, filename_dataset=None):
    """Plot the best-fit spectrum, the residual map and the residual significance distribution.

    Parameters
    ----------
    filename_model : str
        Filename of the model definition.
    filename_dataset : str
        Filename of the dataset.
    obs_id : int
        Observation ID.
    """
    log.info(f"Reading {filename_model}")
    model = Models.read(filename_model)

    path = get_filename_best_fit_model(filename_model, obs_id, LIVETIME)
    # model_best_fit = read_best_fit_model(path)
    model_best_fit = Models.read(path)

    plot_spectra(
        model[model.names[0]], model_best_fit[model.names[0]], obs_id, LIVETIME
    )

    dataset = read_dataset(filename_dataset, filename_model, obs_id)
    mod = Models(model_best_fit[model.names[0]])
    # We don't create background model here. It is probably not be needed since no bkg fitting is performed.
    dataset.models = mod
    plot_residuals(dataset, obs_id, LIVETIME, model.names[0])
    plot_residual_distribution(dataset, obs_id, LIVETIME)
Example #2
0
def plot_results(filename_model, obs_id, filename_dataset=None):
    """Plot the best-fit spectrum, the residual map and the residual significance distribution.

    Parameters
    ----------
    filename_model : str
        Filename of the model definition.
    filename_dataset : str
        Filename of the dataset.
    obs_id : int
        Observation ID.
    """
    log.info(f"Reading {filename_model}")
    model = Models.read(filename_model)

    path = get_filename_best_fit_model(filename_model, obs_id, LIVETIME)
    # model_best_fit = read_best_fit_model(path)
    model_best_fit = Models.read(path)

    plot_spectra(model[model.names[0]], model_best_fit[model.names[0]], obs_id,
                 LIVETIME)

    dataset = read_dataset(filename_dataset, filename_model, obs_id)
    mod = Models(model_best_fit[model.names[0]])
    dataset.models.extend(mod)
    plot_residuals(dataset, obs_id, LIVETIME, model.names[0])
    plot_residual_distribution(dataset, obs_id, LIVETIME)
Example #3
0
def test_sky_models_io(tmp_path):
    # TODO: maybe change to a test case where we create a model programatically?
    filename = get_pkg_data_filename("data/examples.yaml")
    models = Models.read(filename)

    models.write(tmp_path / "tmp.yaml")
    models = Models.read(tmp_path / "tmp.yaml")

    assert_allclose(models.parameters["lat_0"].min, -90.0)
Example #4
0
def test_sky_models_io(tmp_path):
    # TODO: maybe change to a test case where we create a model programatically?
    filename = get_pkg_data_filename("data/examples.yaml")
    models = Models.read(filename)
    models.covariance = np.eye(len(models.parameters))
    models.write(tmp_path / "tmp.yaml", full_output=True)
    models = Models.read(tmp_path / "tmp.yaml")
    assert models._covar_file == "tmp_covariance.dat"
    assert_allclose(models.covariance.data, np.eye(len(models.parameters)))
    assert_allclose(models.parameters["lat_0"].min, -90.0)
Example #5
0
    def read(cls, filedata, filemodel):
        """De-serialize datasets from YAML and FITS files.

        Parameters
        ----------
        filedata : str
            filepath to yaml datasets file
        filemodel : str
            filepath to yaml models file

        Returns
        -------
        dataset : 'gammapy.modeling.Datasets'
            Datasets
        """
        from . import DATASETS

        models = Models.read(make_path(filemodel))
        data_list = read_yaml(make_path(filedata))

        datasets = []
        for data in data_list["datasets"]:
            dataset = DATASETS.get_cls(data["type"]).from_dict(data, models)
            datasets.append(dataset)
        return cls(datasets)
Example #6
0
def fit_gather(model_name, livetime, binned=False):
    rows = []

    path = (
        BASE_PATH
        / f"results/models/{model_name}/fit_{livetime.value:.0f}{livetime.unit}"
    )
    if binned:
        path = Path(str(path).replace("/fit", "/fit_fake"))

    for filename in path.glob("*.yaml"):
        # model_best_fit = read_best_fit_model(filename)
        model_best_fit = Models.read(filename)
        model_best_fit = model_best_fit[model_name]
        row = {}

        for par in model_best_fit.parameters:
            row[par.name] = par.value
            row[par.name + "_err"] = par.error

        rows.append(row)

    table = table_from_row_data(rows)
    name = f"fit-results-all_{livetime.value:.0f}{livetime.unit}"
    if binned:
        name = "fit_binned-results-all"
    filename = f"results/models/{model_name}/{name}.fits.gz"
    log.info(f"Writing {filename}")
    table.write(str(filename), overwrite=True)
Example #7
0
def test_plot_models(caplog):
    models = Models.read("$GAMMAPY_DATA/tests/models/gc_example_models.yaml")

    with mpl_plot_check():
        models.plot_positions()
        models.plot_regions()

    assert models.wcs_geom.data_shape == (171, 147)

    regions = models.to_regions()
    assert len(regions) == 3

    p1 = Model.create(
        "pl-2",
        model_type="spectral",
    )
    g1 = Model.create("gauss", model_type="spatial")
    p2 = Model.create(
        "pl-2",
        model_type="spectral",
    )
    m1 = SkyModel(spectral_model=p1, spatial_model=g1, name="m1")
    m2 = SkyModel(spectral_model=p2, name="m2")
    models = Models([m1, m2])

    models.plot_regions()
    assert caplog.records[-1].levelname == "WARNING"
    assert (caplog.records[-1].message ==
            "Skipping model m2 - no spatial component present")
Example #8
0
def plot_pull_distribution(model_name, livetime, binned=False):
    name = f"fit-results-all_{livetime.value:.0f}{livetime.unit}"
    if binned:
        name = "fit_binned-results-all"
    filename = BASE_PATH / f"results/models/{model_name}/{name}.fits.gz"
    results = Table.read(str(filename))

    filename_ref = BASE_PATH / f"models/{model_name}.yaml"
    model_ref = Models.read(filename_ref)[0]
    names = [name for name in results.colnames if "err" not in name]

    plots = f"plots_{livetime.value:.0f}{livetime.unit}"
    if binned:
        plots = "plots_fake"
    for name in names:
        # TODO: report mean and stdev here as well
        values = results[name]
        values_err = results[name + "_err"]
        par = model_ref.parameters[name]

        if par.frozen:
            log.info(f"Skipping frozen parameter: {name}")
            continue

        pull = (values - par.value) / values_err

        # print("Number of fits beyond 5 sigmas: ",(np.where( (pull<-5) )))
        plt.hist(pull, bins=21, normed=True, range=(-5, 5))
        plt.xlim(-5, 5)
        plt.xlabel("(value - value_true) / error")
        plt.ylabel("PDF")
        plt.title(f"Pull distribution for {model_name}: {name} ")
        filename = f"results/models/{model_name}/{plots}/pull-distribution-{name}.png"
        save_figure(filename)
Example #9
0
def read_best_fit_model(filename):
    log.info(f"Reading {filename}")
    model_best_fit = Models.read(filename)

    path = get_filename_covariance(filename)
    log.info(f"Reading {path}")
    pars = model_best_fit.parameters
    pars.covariance = np.loadtxt(str(path))

    if model_best_fit[0].tag == "SkyDiffuseCube":
        spectral_model_best_fit = model_best_fit[0]
        covar = pars.get_subcovariance(spectral_model_best_fit.parameters)
        spectral_model_best_fit.parameters.covariance = covar

    #        spatial_model_best_fit = model_best_fit[0].spatial_model
    #        covar = pars.get_subcovariance(spatial_model_best_fit.parameters)
    #        spatial_model_best_fit.parameters.covariance = covar

    else:
        spectral_model_best_fit = model_best_fit[0].spectral_model
        covar = pars.get_subcovariance(spectral_model_best_fit.parameters)
        spectral_model_best_fit.parameters.covariance = covar

        spatial_model_best_fit = model_best_fit[0].spatial_model
        covar = pars.get_subcovariance(spatial_model_best_fit.parameters)
        spatial_model_best_fit.parameters.covariance = covar

    return model_best_fit
Example #10
0
def analysis_3d_summary(analysis, target):
    log.info(f"analysis_3d_summary: {target}")
    # TODO:
    # - how to plot a SkyModels ?
    # - PowerLawSpectralModel hardcoded need to find auto way

    path = f"{target}/{target}_3d_bestfit.rst"
    tab = Table.read(path, format="ascii")
    tab.add_index("name")
    dt = "U30"
    comp_tab = Table(names=("Param", "DC1 Ref", "gammapy 3d"),
                     dtype=[dt, dt, dt])

    ref_model = Models.read(f"{target}/reference/dc1_model_3d.yaml")
    pars = ref_model.parameters.names
    pars.remove("reference")  # need to find a better way to handle this

    for par in pars:
        ref = ref_model.parameters[par].value
        value = tab.loc[par]["value"]
        name = tab.loc[par]["name"]
        error = tab.loc[par]["error"]
        comp_tab.add_row([name, str(ref), f"{value}±{error}"])

    analysis.datasets["stacked"].counts.sum_over_axes().plot(add_cbar=True)
    plt.savefig(f"{target}/{target}_counts.png", bbox_inches="tight")
    plt.close()

    analysis.datasets["stacked"].plot_residuals_spatial(
        method="diff/sqrt(model)", vmin=-0.5, vmax=0.5)
    plt.savefig(f"{target}/{target}_residuals.png", bbox_inches="tight")
    plt.close()

    # Cannot specify flux_unit outputs in cm-2 s-1 TeV-1. Default to erg
    ax_sed, ax_residuals = analysis.flux_points.plot_fit()

    pwl = ref_model[target].spectral_model
    ax_sed = pwl.plot(
        (0.1, 50) * u.TeV,
        ax=ax_sed,
        sed_type="e2dnde",
        label="Reference",
        ls="--",
    )
    ax_sed.legend()
    plt.savefig(f"{target}/{target}_fluxpoints.png", bbox_inches="tight")
    plt.close()

    # Generate README.md file with table and plots
    path = f"{target}/spectral_comparison_table.md"
    comp_tab.write(path, format="ascii.html", overwrite=True)

    txt = Path(f"{target}/spectral_comparison_table.md").read_text()
    im1 = f"\n ![Spectra]({target}_fluxpoints.png)"
    im2 = f"\n ![Excess map]({target}_counts.png)"
    im3 = f"\n ![Residual map]({target}_residuals.png)"

    out = txt + im1 + im2 + im3
    Path(f"{target}/README.md").write_text(out)
Example #11
0
    def read(cls,
             path,
             filedata="_datasets.yaml",
             filemodel="_models.yaml",
             lazy=True,
             cache=True):
        """De-serialize datasets from YAML and FITS files.

        Parameters
        ----------
        path : str, Path
            Base directory of the datasets files.
        filedata : str
            file path or name of yaml datasets file
        filemodel : str
            file path or name of yaml models file
        lazy : bool
            Whether to lazy load data into memory
        cache : bool
            Whether to cache the data after loading.


        Returns
        -------
        dataset : `gammapy.datasets.Datasets`
            Datasets
        """
        from . import DATASET_REGISTRY

        path = make_path(path)

        if (path / filedata).exists():
            filedata = path / filedata
        else:
            filedata = make_path(filedata)
        if (path / filemodel).exists():
            filemodel = path / filemodel
        else:
            filemodel = make_path(filemodel)

        models = Models.read(filemodel)
        data_list = read_yaml(filedata)

        datasets = []
        for data in data_list["datasets"]:
            if (path / data["filename"]).exists():
                data["filename"] = str(make_path(path / data["filename"]))

            dataset_cls = DATASET_REGISTRY.get_cls(data["type"])
            dataset = dataset_cls.from_dict(data,
                                            models,
                                            lazy=lazy,
                                            cache=cache)
            datasets.append(dataset)
        return cls(datasets)
Example #12
0
def run_analysis(estimate):
    """Run analysis from observation selection to model fitting."""
    config = AnalysisConfig.read(f"{estimate}/config.yaml")
    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()

    models = Models.read(f"{estimate}/models.yaml")
    analysis.set_models(models)
    analysis.run_fit()
    return analysis
Example #13
0
def test_sky_model_spatial_none_io(tmpdir):
    pwl = PowerLawSpectralModel()
    model = SkyModel(spectral_model=pwl, name="test")
    models = Models([model])

    filename = tmpdir / "test-models-none.yaml"
    models.write(filename)

    models = Models.read(filename)

    assert models["test"].spatial_model is None
Example #14
0
    def from_hdulist(cls, hdulist, hdu_bands=None):
        """Create flux map dataset from list of HDUs.

        Parameters
        ----------
        hdulist : `~astropy.io.fits.HDUList`
            List of HDUs.
        hdu_bands : str
            Name of the HDU with the BANDS table. Default is 'BANDS'
            If set to None, each map should have its own hdu_band

        Returns
        -------
        flux_maps : `~gammapy.estimators.FluxMaps`
            Flux maps object.
        """
        try:
            sed_type = hdulist[0].header["SED_TYPE"]
        except KeyError:
            raise ValueError(
                f"Cannot determine SED type of flux map from primary header.")

        maps = {}

        for map_type in REQUIRED_MAPS[sed_type]:
            maps[map_type] = Map.from_hdulist(hdulist,
                                              hdu=map_type,
                                              hdu_bands=hdu_bands)

        for map_type in OPTIONAL_QUANTITIES[
                sed_type] + OPTIONAL_QUANTITIES_COMMON:
            if map_type.upper() in hdulist:
                maps[map_type] = Map.from_hdulist(hdulist,
                                                  hdu=map_type,
                                                  hdu_bands=hdu_bands)

        filename = hdulist[0].header.get("MODEL", None)

        if filename:
            reference_model = Models.read(filename)[0]
        else:
            reference_model = None

        if "GTI" in hdulist:
            gti = GTI(Table.read(hdulist["GTI"]))
        else:
            gti = None

        return cls.from_dict(maps=maps,
                             sed_type=sed_type,
                             reference_model=reference_model,
                             gti=gti)
Example #15
0
def param_sim_model(model):
    dico = dict_model(model)

    filename_ref = THIS_FOLDER / f"../results/models/{model}/fit_{int(LIVETIME.value)}h/best-fit-model_0000.yaml"
    model_ref = Models.read(filename_ref)[1]
    names = model_ref.parameters.free_parameters.names

    for name in names:
        values = model_ref.parameters[name].value
        values_err = model_ref.parameters[name].error

        assert_allclose(values, dico[name][0], rtol=1e-1)
        assert_allclose(values_err, dico[name][1], rtol=1e-1)
Example #16
0
def fit_model(filename_model,
              filename_dataset,
              obs_id,
              binned=False,
              simple=False):
    """Fit the events using a model.

    Parameters
    ----------
    filename_model : str
        Filename of the model definition.
    filename_dataset : str
        Filename of the dataset to use for simulation.
    obs_id : int
        Observation ID.
    """
    dataset = read_dataset(filename_dataset, filename_model, obs_id)

    log.info(f"Reading {filename_model}")
    models = Models.read(filename_model)

    #    dataset.models = models
    dataset.models.extend(models)
    if binned:
        dataset.fake()

    if dataset.background_model:
        dataset.background_model.parameters["norm"].frozen = True

    fit = Fit([dataset], optimize_opts={"print_level": 1})

    result = fit.run()

    log.info(f"Fit info: {result}")

    # write best fit model
    path = get_filename_best_fit_model(filename_model, obs_id)
    if binned:
        path = Path(str(path).replace("/fit/", "/fit_fake/"))
    log.info(f"Writing {path}")
    models.write(str(path), overwrite=True)

    # write covariance
    path = get_filename_covariance(path)
    if binned:
        path = Path(str(path).replace("/fit/", "/fit_fake/"))
    log.info(f"Writing {path}")

    # TODO: exclude background parameters for now, as they are fixed anyway
    covariance = result.parameters.get_subcovariance(models.parameters)
    np.savetxt(path, covariance)
Example #17
0
    def read_models(self, path, extend=True):
        """Read models from YAML file.

        Parameters
        ----------
        path : str
            path to the model file
        extend : bool
            Extend the exiting models on the datasets or replace them.
        """

        path = make_path(path)
        models = Models.read(path)
        self.set_models(models, extend=extend)
        log.info(f"Models loaded from {path}.")
Example #18
0
def fit_model(filename_model,
              filename_dataset,
              obs_id,
              binned=False,
              simple=False):
    """Fit the events using a model.

    Parameters
    ----------
    filename_model : str
        Filename of the model definition.
    filename_dataset : str
        Filename of the dataset to use for simulation.
    obs_id : int
        Observation ID.
    """
    dataset = read_dataset(filename_dataset, filename_model, obs_id)

    log.info(f"Reading {filename_model}")
    models = Models.read(filename_model)

    # dataset.models = models
    dataset.models.extend(models)
    if binned:
        dataset.fake()

    if dataset.background_model:
        dataset.background_model.parameters["norm"].frozen = True

    fit = Fit([dataset])

    result = fit.run(optimize_opts={"print_level": 1})

    log.info(f"Fit info: {result}")

    # write best fit model
    path = get_filename_best_fit_model(filename_model, obs_id, LIVETIME)
    path = path.absolute()
    if binned:
        path = Path(str(path).replace("/fit", "/fit_fake"))
    log.info(f"Writing {path}")
    # write best-fit model and covariance
    dataset.models.write(str(path), overwrite=True)
Example #19
0
def simulate_events(filename_model, filename_dataset, dataset, obs_id):
    """Simulate events for a given model and dataset.

    Parameters
    ----------
    filename_model : str
        Filename of the model definition.
    filename_dataset : str
        Filename of the dataset to use for simulation.
    nobs : int
        Number of obervations to simulate.
    """
    log.info(f"Reading {IRF_FILE}")
    irfs = load_cta_irfs(IRF_FILE)

    #    log.info(f"Reading {filename_dataset}")
    #    dataset = MapDataset.read(filename_dataset)

    log.info(f"Reading {filename_model}")
    models = Models.read(filename_model)
    #    dataset.models = models
    dataset.models.extend(models)

    sampler = MapDatasetEventSampler(random_state=0)

    #    obs_id = np.arange(nobs)
    #    with multiprocessing.Pool(processes=core) as pool:
    #        args1 = zip(obs_id, repeat(POINTING), repeat(LIVETIME), repeat(irfs),
    #                    repeat(dataset), repeat(filename_dataset), repeat(filename_model))
    #        pool.starmap(simulate_parallel, args1)

    #    for obs_id in np.arange(nobs):
    observation = Observation.create(obs_id=obs_id,
                                     pointing=POINTING,
                                     livetime=LIVETIME,
                                     irfs=irfs)

    events = sampler.run(dataset, observation)

    path = get_filename_events(filename_dataset, filename_model, obs_id)
    log.info(f"Writing {path}")
    path.parent.mkdir(exist_ok=True, parents=True)
    events.table.write(str(path), overwrite=True)
Example #20
0
    def read(cls, filename, filename_models=None, lazy=True, cache=True):
        """De-serialize datasets from YAML and FITS files.

        Parameters
        ----------
        filename : str or `Path`
            File path or name of datasets yaml file
        filename_models : str or `Path`
            File path or name of models fyaml ile
        lazy : bool
            Whether to lazy load data into memory
        cache : bool
            Whether to cache the data after loading.

        Returns
        -------
        dataset : `gammapy.datasets.Datasets`
            Datasets
        """
        from . import DATASET_REGISTRY

        filename = make_path(filename)
        data_list = read_yaml(filename)

        datasets = []
        for data in data_list["datasets"]:
            path = filename.parent

            if (path / data["filename"]).exists():
                data["filename"] = str(make_path(path / data["filename"]))

            dataset_cls = DATASET_REGISTRY.get_cls(data["type"])
            dataset = dataset_cls.from_dict(data, lazy=lazy, cache=cache)
            datasets.append(dataset)

        datasets = cls(datasets)

        if filename_models:
            datasets.models = Models.read(filename_models)

        return datasets
Example #21
0
    def read(cls, path, filedata="_datasets.yaml", filemodel="_models.yaml"):
        """De-serialize datasets from YAML and FITS files.

        Parameters
        ----------
        path : str, Path
            Base directory of the datasets files.
        filedata : str
            file path or name of yaml datasets file
        filemodel : str
            file path or name of yaml models file

        Returns
        -------
        dataset : 'gammapy.modeling.Datasets'
            Datasets
        """
        from . import DATASET_REGISTRY

        path = make_path(path)

        if (path / filedata).exists():
            filedata = path / filedata
        else:
            filedata = make_path(filedata)
        if (path / filemodel).exists():
            filemodel = path / filemodel
        else:
            filemodel = make_path(filemodel)

        models = Models.read(filemodel)
        data_list = read_yaml(filedata)

        datasets = []
        for data in data_list["datasets"]:
            if (path / data["filename"]).exists():
                data["filename"] = str(make_path(path / data["filename"]))
            dataset = DATASET_REGISTRY.get_cls(data["type"]).from_dict(data, models)
            datasets.append(dataset)
        return cls(datasets)
Example #22
0
def dataset():
    filename_models = "$GAMMAPY_DATA/fermi-3fhl-crab/Fermi-LAT-3FHL_models.yaml"
    models = Models.read(filename_models)

    # Define the free parameters and min, max values
    parameters = models.parameters
    parameters["lon_0"].frozen = False
    parameters["lat_0"].frozen = False
    parameters["norm"].frozen = True
    parameters["alpha"].frozen = True
    parameters["beta"].frozen = True
    parameters["lat_0"].min = -90
    parameters["lat_0"].max = 90
    parameters["lon_0"].min = 0
    parameters["lon_0"].max = 360
    parameters["amplitude"].min = 0.01 * parameters["amplitude"].value
    parameters["amplitude"].max = 100 * parameters["amplitude"].value

    filename = "$GAMMAPY_DATA/fermi-3fhl-crab/Fermi-LAT-3FHL_datasets.yaml"
    datasets = Datasets.read(filename=filename)
    datasets.models = models
    return datasets
Example #23
0
    def from_hdulist(cls, hdulist, hdu_bands=None, sed_type=None):
        """Create flux map dataset from list of HDUs.

        Parameters
        ----------
        hdulist : `~astropy.io.fits.HDUList`
            List of HDUs.
        hdu_bands : str
            Name of the HDU with the BANDS table. Default is 'BANDS'
            If set to None, each map should have its own hdu_band
        sed_type : {"dnde", "flux", "e2dnde", "eflux", "likelihood"}
            Sed type

        Returns
        -------
        flux_maps : `~gammapy.estimators.FluxMaps`
            Flux maps object.
        """
        maps = Maps.from_hdulist(hdulist=hdulist, hdu_bands=hdu_bands)

        if sed_type is None:
            sed_type = hdulist[0].header.get("SED_TYPE", None)

        filename = hdulist[0].header.get("MODEL", None)

        if filename:
            reference_model = Models.read(filename)[0]
        else:
            reference_model = None

        if "GTI" in hdulist:
            gti = GTI(Table.read(hdulist["GTI"]))
        else:
            gti = None

        return cls.from_maps(maps=maps,
                             sed_type=sed_type,
                             reference_model=reference_model,
                             gti=gti)
Example #24
0
def test_set_parameters_from_table():
    # read gammapy models
    models = Models.read("$GAMMAPY_DATA/tests/models/gc_example_models.yaml")

    tab = models.to_parameters_table()
    tab["value"][0] = 3.0
    tab["min"][0] = -10
    tab["max"][0] = 10
    tab["frozen"][0] = True
    tab["name"][0] = "index2"
    tab["frozen"][1] = True

    models.update_parameters_from_table(tab)

    d = models.parameters.to_dict()
    assert d[0]["value"] == 3.0
    assert d[0]["min"] == -10
    assert d[0]["max"] == 10
    assert d[0]["frozen"]
    assert d[0]["name"] == "index"

    assert d[1]["frozen"]
Example #25
0
def simulate_events(filename_model, filename_dataset, nobs):
    """Simulate events for a given model and dataset.

    Parameters
    ----------
    filename_model : str
        Filename of the model definition.
    filename_dataset : str
        Filename of the dataset to use for simulation.
    nobs : int
        Number of obervations to simulate.
    """
    log.info(f"Reading {IRF_FILE}")
    irfs = load_cta_irfs(IRF_FILE)

    log.info(f"Reading {filename_dataset}")
    dataset = MapDataset.read(filename_dataset)

    log.info(f"Reading {filename_model}")
    models = Models.read(filename_model)
    models.append(FoVBackgroundModel(dataset_name=dataset.name))
    dataset.models = models
#    dataset.models.extend(models)

    sampler = MapDatasetEventSampler(random_state=0)

    for obs_id in np.arange(nobs):
        observation = Observation.create(
            obs_id=obs_id, pointing=POINTING, livetime=LIVETIME, irfs=irfs
        )

        events = sampler.run(dataset, observation)

        path = get_filename_events(filename_dataset, filename_model, obs_id)
        log.info(f"Writing {path}")
        path.parent.mkdir(exist_ok=True, parents=True)
        events.table.write(str(path), overwrite=True)
Example #26
0
def test_missing_parameters():
    filename = get_pkg_data_filename("data/examples.yaml")
    models = Models.read(filename)
    assert models["source1"].spatial_model.e in models.parameters
    assert len(models["source1"].spatial_model.parameters) == 6
Example #27
0
    def from_hdulist(cls, hdulist, hdu_bands=None):
        """Create flux map dataset from list of HDUs.

        Parameters
        ----------
        hdulist : `~astropy.io.fits.HDUList`
            List of HDUs.
        hdu_bands : str
            Name of the HDU with the BANDS table. Default is 'BANDS'
            If set to None, each map should have its own hdu_band

        Returns
        -------
        fluxmaps : `~gammapy.estimators.FluxMaps`
            the flux map.
        """
        try:
            sed_type = hdulist[0].header["SED_TYPE"]
        except KeyError:
            raise ValueError(
                f"Cannot determine SED type of flux map from primary header.")

        result = {}
        for map_type in REQUIRED_MAPS[sed_type]:
            if map_type.upper() in hdulist:
                result[map_type] = Map.from_hdulist(hdulist,
                                                    hdu=map_type,
                                                    hdu_bands=hdu_bands)
            else:
                raise ValueError(
                    f"Cannot find required map {map_type} for SED type {sed_type}."
                )

        for map_type in OPTIONAL_MAPS[sed_type]:
            if map_type.upper() in hdulist:
                result[map_type] = Map.from_hdulist(hdulist,
                                                    hdu=map_type,
                                                    hdu_bands=hdu_bands)

        # Read additional image hdus
        for hdu in hdulist[1:]:
            if hdu.is_image:
                if hdu.name.lower() not in (REQUIRED_MAPS[sed_type] +
                                            OPTIONAL_MAPS[sed_type]):
                    result[hdu.name.lower()] = Map.from_hdulist(
                        hdulist, hdu=hdu.name, hdu_bands=hdu_bands)

        model_filename = hdulist[0].header.get("MODEL", None)

        reference_model = None
        if model_filename:
            try:
                reference_model = Models.read(model_filename)[0]
            except FileNotFoundError:
                raise FileNotFoundError(
                    f"Cannot find {model_filename} model file. Check MODEL keyword."
                )

        if "GTI" in hdulist:
            gti = GTI(Table.read(hdulist["GTI"]))
        else:
            gti = None

        return cls.from_dict(result, sed_type, reference_model, gti)
Example #28
0
 def read_models(self, path):
     """Read models from YAML file."""
     path = make_path(path)
     models = Models.read(path)
     self.set_models(models)
Example #29
0
def run_analyses(targets):
    log.info("Run small source extension check.")

    info = {}

    targets = list(AVAILABLE_TARGETS) if targets == "all-targets" else [
        targets
    ]

    for target in targets:
        t = time.time()

        config = AnalysisConfig.read(f"configs/config_{target}.yaml")
        analysis = Analysis(config)
        analysis.get_observations()
        info["data_preparation"] = time.time() - t

        t = time.time()

        analysis.get_datasets()
        info["data_reduction"] = time.time() - t

        models = Models.read(f"models/model_{target}.yaml")

        point_models = Models(define_model_pointlike(models[0]))
        analysis.set_models(point_models)

        t = time.time()
        analysis.run_fit()

        info["point_model_fitting"] = time.time() - t
        log.info(f"\n{point_models.to_parameters_table()}")

        log.info("Fitting extended gaussian source.")

        analysis.datasets.models = []
        analysis.set_models(models)
        t = time.time()

        analysis.run_fit()

        info["gauss_model_fitting"] = time.time() - t

        log.info(analysis.fit_result)

        log.info(f"\n{models.to_parameters_table()}")

        log.info("Extract size error, UL and stat profile.")

        t = time.time()
        analysis.models[0].spatial_model.lon_0.frozen = True
        analysis.models[0].spatial_model.lat_0.frozen = True
        analysis.models[0].spectral_model.index.frozen = True

        size_est = ExtensionEstimator(
            source=models[0].name,
            energy_edges=[0.2, 10.0] * u.TeV,
            selection_optional=["errn-errp", "ul", "scan"],
            size_min="0.08 deg",
            size_max="0.12 deg",
            size_n_values=20,
            reoptimize=True)
        res = size_est.run(analysis.datasets)

        info["estimator"] = time.time() - t
        t = time.time()

        log.info(res)
        plot_profile(res[0], target)

        Path(f"bench_{target}.yaml").write_text(
            yaml.dump(info, sort_keys=False, indent=4))
        analysis.models.to_parameters_table().write(
            f"results/{target}_results.ecsv", overwrite=True)