Exemplo n.º 1
0
    def _run_sherpa_fit(self):
        """Plain sherpa fit not using the session object
        """
        from sherpa.astro import datastack
        log.info("Starting SHERPA")
        log.info(self.info())
        ds = datastack.DataStack()
        ds.load_pha(self.pha_list)
        ds.set_source(self.model)
        thres_lo = self.energy_threshold_low.to('keV').value
        thres_hi = self.energy_threshold_high.to('keV').value
        ds.notice(thres_lo, thres_hi)
        datastack.set_stat(self.statistic)
        ds.fit()
        datastack.covar()
        covar = datastack.get_covar_results()
        efilter = datastack.get_filter()

        # First go on calculation flux points following
        # http://cxc.harvard.edu/sherpa/faq/phot_plot.html
        # This should be split out and improved
        xx = datastack.get_fit_plot().dataplot.x
        dd = datastack.get_fit_plot().dataplot.y
        ee = datastack.get_fit_plot().dataplot.yerr
        mm = datastack.get_fit_plot().modelplot.y
        src = datastack.get_source()(xx)
        points = dd / mm * src
        errors = ee / mm * src
        flux_graph = dict(energy=xx, flux=points, flux_err_hi=errors,
                          flux_err_lo=errors)

        from gammapy.spectrum.results import SpectrumFitResult
        self.result = SpectrumFitResult.from_sherpa(covar, efilter, self.model)
        ds.clear_stack()
        ds.clear_models()
Exemplo n.º 2
0
def test_pha_case_6(ds_setup, ds_datadir):

    datadir = ds_datadir
    ls = '@' + '/'.join((datadir, 'pha.lis'))
    rmf1 = '/'.join((datadir, "acisf04938_000N002_r0043_rmf3.fits"))
    rmf2 = '/'.join((datadir, "acisf07867_000N001_r0002_rmf3.fits"))
    arf1 = '/'.join((datadir, "acisf04938_000N002_r0043_arf3.fits"))
    arf2 = '/'.join((datadir, "acisf07867_000N001_r0002_arf3.fits"))
    datastack.load_pha(ls)

    datastack.load_bkg_rmf([], rmf1)
    datastack.load_bkg_rmf([], rmf2)

    datastack.load_bkg_arf([], arf1)
    datastack.load_bkg_arf([], arf2)

    # Define background models
    bkg_arfs = datastack.get_bkg_arf([])
    bkg_scales = datastack.get_bkg_scale([])
    bkg_models = [
        ui.const1d.c1 * acis_bkg_model('acis7s'),
        ui.const1d.c2 * acis_bkg_model('acis7s')
    ]
    bkg_rsps = datastack.get_response([], bkg_id=1)
    for i in range(2):
        id_ = i + 1
        # Make the ARF spectral response flat.  This is required for using
        # the acis_bkg_model.
        bkg_arfs[i].specresp = bkg_arfs[i].specresp * 0 + 1.
        datastack.set_bkg_full_model(id_, bkg_rsps[i](bkg_models[i]))

    # Fit background
    datastack.notice(0.5, 8.)
    datastack.set_method("neldermead")
    datastack.set_stat("cash")

    datastack.thaw(c1.c0)
    datastack.thaw(c2.c0)
    datastack.fit_bkg()
    datastack.freeze(c1.c0)
    datastack.freeze(c2.c0)

    # Define source models
    rsps = datastack.get_response([])
    src_model = ui.powlaw1d.pow1
    src_models = [src_model, src_model * ui.const1d.ratio_12]
    for i in range(2):
        id_ = i + 1
        datastack.set_full_model(id_,
                                 (rsps[i](src_models[i]) +
                                  bkg_scales[i] * bkg_rsps[i](bkg_models[i])))

    datastack.fit()
Exemplo n.º 3
0
    def test_case_6(self):
        datadir = '/'.join((self._this_dir, 'data'))
        ls = '@'+'/'.join((datadir, 'pha.lis'))
        rmf1 = '/'.join((datadir, "acisf04938_000N002_r0043_rmf3.fits"))
        rmf2 = '/'.join((datadir, "acisf07867_000N001_r0002_rmf3.fits"))
        arf1 = '/'.join((datadir, "acisf04938_000N002_r0043_arf3.fits"))
        arf2 = '/'.join((datadir, "acisf07867_000N001_r0002_arf3.fits"))
        datastack.load_pha(ls)

        datastack.load_bkg_rmf([], rmf1)
        datastack.load_bkg_rmf([], rmf2)

        datastack.load_bkg_arf([], arf1)
        datastack.load_bkg_arf([], arf2)

        # Define background models
        bkg_arfs = datastack.get_bkg_arf([])
        bkg_scales = datastack.get_bkg_scale([])
        bkg_models = [ui.const1d.c1 * acis_bkg_model('acis7s'),
                      ui.const1d.c2 * acis_bkg_model('acis7s')]
        bkg_rsps = datastack.get_response([], bkg_id=1)
        for i in range(2):
            id_ = i + 1
            # Make the ARF spectral response flat.  This is required for using
            # the acis_bkg_model.
            bkg_arfs[i].specresp = bkg_arfs[i].specresp * 0 + 1.
            datastack.set_bkg_full_model(id_, bkg_rsps[i](bkg_models[i]))

        # Fit background
        datastack.notice(0.5, 8.)
        datastack.set_method("neldermead")
        datastack.set_stat("cash")

        datastack.thaw(c1.c0)
        datastack.thaw(c2.c0)
        datastack.fit_bkg()
        datastack.freeze(c1.c0)
        datastack.freeze(c2.c0)

        # Define source models
        rsps = datastack.get_response([])
        src_model = ui.powlaw1d.pow1
        src_models = [src_model,
                      src_model * ui.const1d.ratio_12]
        for i in range(2):
            id_ = i + 1
            datastack.set_full_model(id_, (rsps[i](src_models[i]) +
                                           bkg_scales[i] *
                                           bkg_rsps[i](bkg_models[i])))

        datastack.fit()
Exemplo n.º 4
0
 def _run_sherpa_fit(self):
     """Plain sherpa fit not using the session object
     """
     from sherpa.astro import datastack
     log.info("Starting SHERPA")
     log.info(self.info())
     ds = datastack.DataStack()
     ds.load_pha(self.pha_list)
     ds.set_source(self.model)
     thres_lo = self.energy_threshold_low.to('keV').value
     thres_hi = self.energy_threshold_high.to('keV').value
     ds.notice(thres_lo, thres_hi)
     datastack.set_stat(self.statistic)
     ds.fit()
     ds.clear_stack()
     ds.clear_models()
Exemplo n.º 5
0
    def _run_sherpa_fit(self):
        """Plain sherpa fit using the session object
        """
        from sherpa.astro import datastack
        log.info("Starting SHERPA")
        log.info(self.info())
        ds = datastack.DataStack()
        ds.load_pha(self.pha_list)

        # Make model amplitude O(1e0)
        model = self.model * self.FLUX_FACTOR
        ds.set_source(model)
        thres_lo = self.energy_threshold_low.to('keV').value
        thres_hi = self.energy_threshold_high.to('keV').value

        namedataset = []
        for i in range(len(ds.datasets)):
            datastack.notice_id(i + 1, thres_lo[i], thres_hi[i])
            namedataset.append(i + 1)
        datastack.set_stat(self.statistic)
        ds.fit(*namedataset)
        datastack.covar(*namedataset)
        covar = datastack.get_covar_results()
        efilter = datastack.get_filter()

        # First go on calculation flux points following
        # http://cxc.harvard.edu/sherpa/faq/phot_plot.html
        # This should be split out and improved
        xx = datastack.get_fit_plot().dataplot.x
        dd = datastack.get_fit_plot().dataplot.y
        ee = datastack.get_fit_plot().dataplot.yerr
        mm = datastack.get_fit_plot().modelplot.y
        src = datastack.get_source()(xx)
        points = dd / mm * src
        errors = ee / mm * src
        flux_graph = dict(energy=xx,
                          flux=points,
                          flux_err_hi=errors,
                          flux_err_lo=errors)

        from gammapy.spectrum.results import SpectrumFitResult
        self.result = SpectrumFitResult.from_sherpa(covar, efilter, self.model)
        ds.clear_stack()
        ds.clear_models()
Exemplo n.º 6
0
# Change reference energy of the model
p1.ref = 1e9  # 1 TeV = 1e9 keV
p1.gamma = 2.0
p1.ampl = 1e-20  # in cm**-2 s**-1 keV**-1

# View parameters
print(p1)

# ## Fit and error estimation
#
# We need to set the correct statistic: [WSTAT](http://cxc.harvard.edu/sherpa/ahelp/wstat.html). We use functions [set_stat](http://cxc.harvard.edu/sherpa/ahelp/set_stat.html) to define the fit statistic, [notice](http://cxc.harvard.edu/sherpa/ahelp/notice.html) to set the energy range, and [fit](http://cxc.harvard.edu/sherpa/ahelp/fit.html).

# In[ ]:

### Define the statistic
sh.set_stat("wstat")

### Define the fit range
ds.notice(0.6e9, 20e9)

### Do the fit
ds.fit()

# ## Results plot
#
# Note that sherpa does not provide flux points. It also only provides plot for each individual spectrum.

# In[ ]:

sh.get_data_plot_prefs()["xlog"] = True
sh.get_data_plot_prefs()["ylog"] = True
Exemplo n.º 7
0
def run_sherpa_fit(data, plot_fit=None, eval_contours=None):
    """Perform a spectrum fit using sherpa
    http://cxc.harvard.edu/sherpa/ahelp/fit.html
    """
    data.show_stack()
    # define the source model
    data.set_source("logparabola.p1")

    # Change reference energy of the model
    p1.ref = 1e9  # 1 TeV = 1e9 keV
    p1.c1 = 2.0
    p1.c2 = 0.5
    p1.ampl = 1e-20  # in cm**-2 s**-1 keV**-1
    # view parameters
    print(p1)

    # define the statistic
    sh.set_stat("wstat")

    # we retrieve the ids of the observations in the datastack
    # this is useful both to plot single run fit and also for the covariance
    # estimation
    data_dict = data.dataset_ids
    obs_ids = []
    for key in data_dict:
        obs_ids.append(data_dict[key]["id"])
    print("found observation ids", obs_ids)

    # ignore the bins above the energy threshold
    for _id in obs_ids:
        sh.ignore_bad(_id)

    # run the fit
    data.fit()

    # produce diagnostic plots, data and model per each run
    if plot_fit is not None:
        import matplotlib
        import matplotlib.pyplot as plt

        matplotlib.use("agg")
        for idx, _id in enumerate(obs_ids):
            sh.set_analysis(_id, "energy", "counts", factor=0)
            sh.get_data_plot_prefs()["xlog"] = True
            sh.get_data_plot_prefs()["ylog"] = True
            sh.plot_fit(id=_id)
            sh.plot_fit_resid(id=_id)
            # TODO: simplify obs_id handling!
            dl3_obs_id = data.datasets[idx]["data"].header["OBS_ID"]
            path_plot_fit = plot_fit + "/plot_fit_sherpa_obs_id_{}.png".format(
                dl3_obs_id)
            plt.savefig(path_plot_fit)

    # evaluate covariance and errors
    sh.covariance(*obs_ids)
    covar = sh.get_covar_results()

    # store the output in a dictionary
    results = dict()
    # we keep sherpa nomenclature
    results["parnames"] = covar.parnames  # parameter names, tuple
    results["parvals"] = covar.parvals  # parameter values, tuple
    results["parmins"] = covar.parmins  # parameter min error, tuple
    results["parmaxes"] = covar.parmaxes  # parameter max error, tuple
    results[
        "extra_output"] = covar.extra_output  # covariance matrix, numpy array

    status = sh.get_fit_results()
    results["statname"] = status.statname
    results["statval"] = status.statval
    # for the energy range, take the x axis of the plot of the first id
    fit_range = sh.get_fit_plot(obs_ids[0]).dataplot.x
    results["fit_range"] = (fit_range[0], fit_range[-1])

    # dictionary for the contours, it will be empty
    # if eval_contours = False
    contours = dict()

    if eval_contours:
        # evaluate the confidence contours of two thawed parameters
        contour_ampl_c1 = dict()
        contour_ampl_c2 = dict()
        contour_c1_c2 = dict()

        # dimension of the grid to scan the confidence contours
        nloop = 50
        # ranges of the parameters
        c1_range = [1.5, 3.5]
        c2_range = [-0.2, 0.8]
        ampl_range = [2. * 1e-20, 6. * 1e-20]

        # amplitude vs c1
        sh.reg_proj(
            p1.ampl,
            p1.c1,
            id=obs_ids[0],
            otherids=obs_ids[1:],
            sigma=[1, 2, 3],
            min=[ampl_range[0], c1_range[0]],
            max=[ampl_range[1], c1_range[1]],
            nloop=(nloop, nloop),
        )

        # tmp_proj is an object we will use to store the get_reg_proj() method
        # info on the output of this method
        # http://cxc.harvard.edu/sherpa/ahelp/get_reg_proj.html
        tmp_proj = sh.get_reg_proj()
        # reshape in matrix form
        tmp_proj.y.shape = (nloop, nloop)
        # from now on we use the copy method because if we declare a variable
        # to point at the output of `sh.get_reg_proj()` this varaible
        # change when we rerun the `sh.get_reg_proj()` method on another
        # couple of parameters
        contour_ampl_c1["like_values"] = tmp_proj.y.copy()
        # the x0 array repeats its values every 20
        contour_ampl_c1["x0"] = tmp_proj.x0[:nloop].copy()
        # the x1 array repeats its values
        contour_ampl_c1["x1"] = tmp_proj.x1[:nloop].copy()
        contour_ampl_c1["levels"] = tmp_proj.levels.copy()
        # store also the parameter range we have investigated
        contour_ampl_c1["x0_range"] = (ampl_range[0], ampl_range[1])
        contour_ampl_c1["x1_range"] = (c1_range[0], c1_range[1])

        # amplitude vs c2
        sh.reg_proj(
            p1.ampl,
            p1.c2,
            id=obs_ids[0],
            otherids=obs_ids[1:],
            sigma=[1, 2, 3],
            min=[ampl_range[0], c2_range[0]],
            max=[ampl_range[1], c2_range[1]],
            nloop=(nloop, nloop),
        )

        tmp_proj = sh.get_reg_proj()
        # reshape in matrix form
        tmp_proj.y.shape = (nloop, nloop)
        contour_ampl_c2["like_values"] = tmp_proj.y.copy()
        contour_ampl_c2["x0"] = tmp_proj.x0[:nloop].copy()
        contour_ampl_c2["x1"] = tmp_proj.x1[:nloop].copy()
        contour_ampl_c2["levels"] = tmp_proj.levels.copy()
        contour_ampl_c2["x0_range"] = (ampl_range[0], ampl_range[1])
        contour_ampl_c2["x1_range"] = (c2_range[0], c2_range[1])

        # c1 vs c2
        sh.reg_proj(
            p1.c1,
            p1.c2,
            id=obs_ids[0],
            otherids=obs_ids[1:],
            sigma=[1, 2, 3],
            min=[c1_range[0], c2_range[0]],
            max=[c1_range[1], c2_range[1]],
            nloop=(nloop, nloop),
        )

        tmp_proj = sh.get_reg_proj()
        # reshape in matrix form
        tmp_proj.y.shape = (nloop, nloop)
        contour_c1_c2["like_values"] = tmp_proj.y.copy()
        contour_c1_c2["x0"] = tmp_proj.x0[:nloop].copy()
        contour_c1_c2["x1"] = tmp_proj.x1[:nloop].copy()
        contour_c1_c2["levels"] = tmp_proj.levels.copy()
        contour_c1_c2["x0_range"] = (c1_range[0], c1_range[1])
        contour_c1_c2["x1_range"] = (c2_range[0], c2_range[1])

        # add the dictionaries with the confidence contours to the final
        # output dictionary
        contours["contour_ampl_c1"] = contour_ampl_c1
        contours["contour_ampl_c2"] = contour_ampl_c2
        contours["contour_c1_c2"] = contour_c1_c2

    if eval_contours is not None:
        # write the contours in a .npy file
        path_contours = eval_contours + "/fit_contours_logparabola.npy"
        np.save(path_contours, contours)

    # return the dictionary with the results of the fit
    return results