コード例 #1
0
ファイル: psf_core.py プロジェクト: gabemery/gammapy
 def set(self):
     """Set the PSF for Sherpa."""
     import sherpa.astro.ui as sau
     from ..scripts.image_fit import read_json
     read_json(self.pars, sau.set_model)
     sau.load_psf('psf', sau.get_model())
     self.center_psf()
     sau.set_psf('psf')
コード例 #2
0
ファイル: psf.py プロジェクト: mwcraig/gammapy
 def set(self):
     """Set the PSF for Sherpa."""
     import sherpa.astro.ui as sau
     # from morphology.utils import read_json
     read_json(self.pars, sau.set_model)
     sau.load_psf('psf', sau.get_model())
     sau.set_psf('psf')
     self.center_psf()
コード例 #3
0
ファイル: psf_core.py プロジェクト: vorugantia/gammapy
 def set(self):
     """Set the PSF for Sherpa."""
     import sherpa.astro.ui as sau
     # from gammapy.image.models.utils import read_json
     read_json(self.pars, sau.set_model)
     sau.load_psf('psf', sau.get_model())
     self.center_psf()
     sau.set_psf('psf')
コード例 #4
0
ファイル: pcaUtil.py プロジェクト: neobar/BXA
def loadSrcModel(id=1, readFrom='srcPowerLaw.json'):
    """
    """
    with open(readFrom, 'r') as f:
        parDict = json.load(f)
    srcModel = ui.get_model(id=id)
    for p in srcModel.pars:
        p.val = parDict[p.fullname]
コード例 #5
0
ファイル: run_fit.py プロジェクト: JonathanDHarris/gammapy
def sherpa_spec_fit(in_files,
                    model,
                    noplot,
                    manual,
                    reproj,
                    do_conf):
    """Reads a set of pha files and performs a maximum likelihood fit.
    """
    import logging

    logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s')

    # from sherpa.astro.ui import *  # TEST if needed (only when outside sherpa env)
    import sherpa.astro.ui as sau
    from . import load_model
    from . import make_plot
    from .specsource import SpecSource

    logger = logging.getLogger("sherpa")
    logger.setLevel(logging.ERROR)

    # Read and load the data and model:
    list_data = []

    emax = 9e10  # Maximum energy taken into account for the fit --> Compute properly (as a function of the max E event?)
    # We don't use a specific maximum reduced statistic value since we don't expect the cstat to be anywhere near the large number limit
    sau.set_conf_opt("max_rstat", 10000)

    p1 = load_model.load_model(model[0])  # [-1] #load model returns an array of model components
    # where the last component is the total model
    spec = SpecSource('SRC', in_files)

    if reproj == 0:
        myspec = spec
    elif reproj == 3:
        myspec = spec.reproject(nbins={'offset': 5, 'eff': 10, 'zen': 10})
    elif reproj == 2:
        myspec = spec.reproject(nbins={'offset': 5, 'eff': 20, 'zen': 12})
    elif reproj == 1:
        myspec = spec.reproject(nbins={'offset': 25, 'eff': 40, 'zen': 30})
    elif reproj == 4:
        myspec = spec.reproject(nbins={'offset': 1, 'eff': 1, 'zen': 1})

    myspec.set_source(p1)

    if manual:
        load_model.set_manual_model(sau.get_model(datid))  # Parameters for all runs are linked
    else:
        print('Using default initial values for model parameters')

    myspec.fit(do_conf=do_conf)

    if noplot:
        quit()
    make_plot.make_plot(list_data, p1)

    raw_input('Press <ENTER> to continue')
コード例 #6
0
    def test_set_analysis_wave_fabrizio(self):
        rmf = self.datadir + '/ciao4.3/fabrizio/Data/3c273.rmf'
        arf = self.datadir + '/ciao4.3/fabrizio/Data/3c273.arf'

        ui.set_model("fabrizio", "xspowerlaw.p1")
        ui.fake_pha("fabrizio", arf, rmf, 10000)

        model = ui.get_model("fabrizio")
        bare_model, _ = ui._session._get_model_status("fabrizio")
        y = bare_model.calc([1, 1], model.xlo, model.xhi)
        y_m = numpy.mean(y)

        ui.set_analysis("fabrizio", "wave")

        model2 = ui.get_model("fabrizio")
        bare_model2, _ = ui._session._get_model_status("fabrizio")
        y2 = bare_model2.calc([1, 1], model2.xlo, model2.xhi)
        y2_m = numpy.mean(y2)

        self.assertAlmostEqual(y_m, y2_m)
コード例 #7
0
ファイル: test_xspec.py プロジェクト: anetasie/sherpa
    def test_set_analysis_wave_fabrizio(self):
        rmf = self.datadir + '/ciao4.3/fabrizio/Data/3c273.rmf'
        arf = self.datadir + '/ciao4.3/fabrizio/Data/3c273.arf'

        ui.set_model("fabrizio", "xspowerlaw.p1")
        ui.fake_pha("fabrizio", arf, rmf, 10000)

        model = ui.get_model("fabrizio")
        bare_model, _ = ui._session._get_model_status("fabrizio")
        y = bare_model.calc([1,1], model.xlo, model.xhi)
        y_m = numpy.mean(y)

        ui.set_analysis("fabrizio","wave")

        model2 = ui.get_model("fabrizio")
        bare_model2, _ = ui._session._get_model_status("fabrizio")
        y2 = bare_model2.calc([1,1], model2.xlo, model2.xhi)
        y2_m = numpy.mean(y2)

        self.assertAlmostEqual(y_m, y2_m)
コード例 #8
0
ファイル: test_xspec.py プロジェクト: DougBurke/sherpa
    def test_set_analysis_wave_fabrizio(self):
        rmf = self.make_path('3c273.rmf')
        arf = self.make_path('3c273.arf')

        ui.set_model("fabrizio", "xspowerlaw.p1")
        ui.fake_pha("fabrizio", arf, rmf, 10000)

        parvals = [1, 1]

        model = ui.get_model("fabrizio")
        bare_model, _ = ui._session._get_model_status("fabrizio")
        y = bare_model.calc(parvals, model.xlo, model.xhi)
        y_m = numpy.mean(y)

        ui.set_analysis("fabrizio", "wave")

        model2 = ui.get_model("fabrizio")
        bare_model2, _ = ui._session._get_model_status("fabrizio")
        y2 = bare_model2.calc(parvals, model2.xlo, model2.xhi)
        y2_m = numpy.mean(y2)

        self.assertAlmostEqual(y_m, y2_m)
コード例 #9
0
ファイル: test_xspec.py プロジェクト: nplee/sherpa
def test_set_analysis_wave_fabrizio(clean_astro_ui, make_data_path):
    rmf = make_data_path('3c273.rmf')
    arf = make_data_path('3c273.arf')

    ui.set_model("fabrizio", "xspowerlaw.p1")
    ui.fake_pha("fabrizio", arf, rmf, 10000)

    parvals = [1, 1]

    model = ui.get_model("fabrizio")
    bare_model, _ = ui._session._get_model_status("fabrizio")
    y = bare_model.calc(parvals, model.xlo, model.xhi)
    y_m = numpy.mean(y)

    ui.set_analysis("fabrizio", "wave")

    model2 = ui.get_model("fabrizio")
    bare_model2, _ = ui._session._get_model_status("fabrizio")
    y2 = bare_model2.calc(parvals, model2.xlo, model2.xhi)
    y2_m = numpy.mean(y2)

    assert y2_m == pytest.approx(y_m)
コード例 #10
0
ファイル: pcaUtil.py プロジェクト: neobar/BXA
def saveSrcModel(id=1, writeTo='srcPowerLaw.json', stat=True, info={}):
    """
    """
    srcModel = ui.get_model(id=id)
    parDict = {p.fullname: p.val for p in srcModel.pars}
    if stat:
        fsrc, *_ = ui.get_stat_info()
        for i in ['statname', 'numpoints', 'dof', 'qval', 'rstat', 'statval']:
            parDict[i] = getattr(fsrc, i)
    for key, val in info.items():
        parDict[key] = val
    with open(writeTo, 'w') as f:
        json.dump(parDict, f)
コード例 #11
0
    def test_set_analysis_wave_fabrizio(self):
        rmf = self.make_path('3c273.rmf')
        arf = self.make_path('3c273.arf')

        ui.set_model("fabrizio", "xspowerlaw.p1")
        ui.fake_pha("fabrizio", arf, rmf, 10000)

        parvals = [1, 1]

        model = ui.get_model("fabrizio")
        bare_model, _ = ui._session._get_model_status("fabrizio")
        y = bare_model.calc(parvals, model.xlo, model.xhi)
        y_m = numpy.mean(y)

        ui.set_analysis("fabrizio", "wave")

        model2 = ui.get_model("fabrizio")
        bare_model2, _ = ui._session._get_model_status("fabrizio")
        y2 = bare_model2.calc(parvals, model2.xlo, model2.xhi)
        y2_m = numpy.mean(y2)

        self.assertAlmostEqual(y_m, y2_m)
コード例 #12
0
def sherpa_spec_fit(in_files, model, noplot, manual, reproj, do_conf):
    """Reads a set of pha files and performs a maximum likelihood fit.
    """
    # from sherpa.astro.ui import *  # TEST if needed (only when outside sherpa env)
    import sherpa.astro.ui as sau
    from . import load_model
    from . import make_plot
    from .specsource import SpecSource

    # Read and load the data and model:
    list_data = []

    emax = 9e10  # Maximum energy taken into account for the fit --> Compute properly (as a function of the max E event?)
    # We don't use a specific maximum reduced statistic value since we don't expect the cstat to be anywhere near the large number limit
    sau.set_conf_opt("max_rstat", 10000)

    p1 = load_model.load_model(
        model[0])  # [-1] #load model returns an array of model components
    # where the last component is the total model
    spec = SpecSource('SRC', in_files)

    if reproj == 0:
        myspec = spec
    elif reproj == 3:
        myspec = spec.reproject(nbins={'offset': 5, 'eff': 10, 'zen': 10})
    elif reproj == 2:
        myspec = spec.reproject(nbins={'offset': 5, 'eff': 20, 'zen': 12})
    elif reproj == 1:
        myspec = spec.reproject(nbins={'offset': 25, 'eff': 40, 'zen': 30})
    elif reproj == 4:
        myspec = spec.reproject(nbins={'offset': 1, 'eff': 1, 'zen': 1})

    myspec.set_source(p1)

    if manual:
        load_model.set_manual_model(
            sau.get_model(datid))  # Parameters for all runs are linked
    else:
        print('Using default initial values for model parameters')

    myspec.fit(do_conf=do_conf)

    if noplot:
        quit()
    make_plot.make_plot(list_data, p1)

    raw_input('Press <ENTER> to continue')
コード例 #13
0
ファイル: utils.py プロジェクト: evantey14/nustar
def save_components(filename, compIDstosave=None):
    if os.path.isfile(filename): raise Exception("Model already exists.")
    with open(filename, 'w') as f:
        print('Saving ' + filename)
        print(shp.get_model())
        if compIDstosave is None: 
            compIDstosave = [c for c in shp.list_model_components() if c not in ['emap', 'psf']]
        f.write('components: ' + str(len(compIDstosave)) + '\n')
        for compID in compIDstosave:
            if compID not in ['psf', 'emap']:
                comp = shp.get_model_component(compID)
                f.write(comp.name.replace('.', ' ') + ' ' + str(len(comp.pars)) + '\n')
                for par in comp.pars:
                    f.write(par.name + ' ' +
                        str(par.min) + ' ' +
                        str(par.val) + ' ' +
                        str(par.max) + '\n')
        f.close()
コード例 #14
0
def get_source_qq_data(id=None):
    """Get data for a quantile-quantile plot of the source data and model.

    *id*
      The dataset id for which to get the data; defaults if unspecified.
    Returns:
      An ndarray of shape ``(3, npts)``. The first slice is the energy axis in
      keV; the second is the observed values in each bin (counts, or rate, or
      rate per keV, etc.); the third is the corresponding model value in each
      bin.

    The inputs are implicit; the data are obtained from the current state of
    the Sherpa ``ui`` module.

    """
    sdata = ui.get_data(id=id)
    kev = sdata.get_x()
    obs_data = sdata.counts
    model_data = ui.get_model(id=id)(kev)
    return np.vstack((kev, obs_data, model_data))
コード例 #15
0
ファイル: shmodelshelper.py プロジェクト: hamogu/filili
def get_model_parts(id = None):
    '''obtain a list of strings for sherpa models

    Iterate through all components which are part of the Sherpa model
    and return their identifiers. Ignore all composite models.

    Example
    -------

    >>> from sherpa.ui import *
    >>> load_arrays(1, [1,2,3], [1,2,3]) # Set some dummy data
    >>> set_model('const1d.c + gauss1d.lineg1 + gauss1d.lineg2 + gauss1d.lineg3')
    >>> show_model() # doctest: +SKIP
        Model: 1
        (((const1d.c + gauss1d.lineg1) + gauss1d.lineg2) + gauss1d.lineg3)
        ...
    >>> get_model_parts()  # doctest: +SKIP
    {'c', 'lineg1', 'lineg2', 'lineg3'}

    '''
    try:
        return set([par.modelname for par in get_model(id).pars])
    except IdentifierErr:
        return set([])
コード例 #16
0
ファイル: make_plot.py プロジェクト: astrofrog/gammapy
    def get_plot_arrays(self, data_list):
        """Construct arrays of model count rates."""

        sample_model = sau.get_model(data_list[0].name)
        self.get_binning(sample_model)  # do this only once assuming that true energy
        # binning does not change from run to run
        obs_exc = np.zeros_like(self.bcenter)
        obs_err = np.zeros_like(self.bcenter)
        tot_on = np.zeros_like(self.bcenter)
        tot_off = np.zeros_like(self.bcenter)
        mod_cnts = np.zeros_like(self.bcenter)
        exp_tot = np.zeros_like(self.etrue_center)
        mod_tot = np.zeros_like(self.etrue_center)

        for dat in data_list:
            datid = dat.name
            exposure = dat.data.exposure
            on_cnt_rate = dat.data.get_y()

            c_bkg = sau.get_bkg(datid)
            bg_cnt_rate = c_bkg.get_y()
            backscal = c_bkg.get_backscal()

            c_mod = sau.get_model(datid)
            arf = c_mod.arf
            arf_vals = arf.get_y()

            # Excess
            bw_expo = self.b_width * exposure
            on_cnts = on_cnt_rate * bw_expo
            off_cnts = bg_cnt_rate * bw_expo / backscal
            c_exc = on_cnts - off_cnts  # excess counts
            c_exc_err2 = on_cnts + off_cnts / backscal  # errors

            # model counts
            c_modcnts = c_mod.calc(self.para, 2.)  # second parameter is dummy...

            # Consider only noticed bins
            valid = dat.data.get_noticed_channels().astype(int)
            valid -= np.ones_like(valid)  # Channel id's start at 1!

            obs_exc[valid] = obs_exc[valid] + c_exc[valid]  # Total excess in noticed bins
            obs_err[valid] = obs_err[valid] + c_exc_err2[valid]  # Total error square
            tot_on[valid] = tot_on[valid] + on_cnts[valid]
            tot_off[valid] = tot_off[valid] + off_cnts[valid]
            mod_cnts[valid] = mod_cnts[valid] + c_modcnts[valid]  # Total noticed model counts
            valid_arf = self.ener_map[valid].sum(0) > 0  # valid pixels in true energy

            self.get_mod_val(self.totmodel, self.etrue_center)

            # Add run exposure*area*model for valid true energy bins only
            exp_tot[valid_arf] = exp_tot[valid_arf] + \
                                 arf_vals[valid_arf] * self.mod_val[valid_arf] * exposure

            ''' Not used, may be useful to produce upper limits
            #significance per bin:
            signis = significance(n_observed=tot_on, mu_background=tot_off, method='lima')
            some_significant = False
            #makeUL = []
            for i,signi in enumerate(signis):
            if signi<2:
            print('WARNING: Energy bin from', round(binmin[i]/1e9,1), 'to', \
            round(binmax[i]/1e9,1), 'TeV has', round(signi,2), 'sigma only.')
            print('...may want to convert to upper limit') # NOT YET IMPLEMENTED
            continue
            #makeUL.append(True)
            if np.isinf(signi) or np.isnan(signi): #isinf when Non = Noff = 0?
            if some_significant: # otherwise we are probably below threshold
            print('WARNING: Energy bin from', round(binmin[i]/1e9,1), 'to', \
            round(binmax[i]/1e9,1), 'TeV contains no events.')
            continue
            else:
            some_significant = True
            '''

        # compute average exposure (time*area) in each measured energy bin
        mean_expo = np.zeros(obs_exc.shape)
        for i in range(obs_exc.shape[0]):
            mean_expo[i] = exp_tot[self.ener_map[i, :]].sum() / \
                           self.mod_val[self.ener_map[i, :]].sum()
            bw_meanexpo = self.b_width * mean_expo

        # get flux and error per cm^2/s/TeV
        self.mean_flux = 1e9 * obs_exc / bw_meanexpo
        self.mean_flux[np.isnan(self.mean_flux)] = 0

        self.mean_err = 1e9 * np.sqrt(obs_err) / bw_meanexpo  # mean_flux/signis

        # Compute residuals where model counts >0
        self.resid = (-mod_cnts + obs_exc) / np.sqrt(obs_err)

        # Model spectral points
        self.bcenter /= 1e9  # keV? Nope, real high energy...
コード例 #17
0
ファイル: utils.py プロジェクト: nplee/ciao-contrib
def renorm(id=None, cpt=None, bkg_id=None, names=None, limscale=1000.0):
    """Change the normalization of a model to match the data.

    The idea is to change the normalization to be a better match to
    the data, so that the search can be quicker. It can be considered
    to be like the `guess` command, but for the normalization. It
    is *only* intended to change the normalization to a value near
    the correct one; it *should not* be used for any sort of
    calculation without first doing a fit. It is also only going to
    give reasonable results for models where the predicted data of a
    model is linearly related to the normalization.

    Parameters
    ----------
    id : None, int, or str
       The data set identifier to use. A value of ``None`` uses the
       default identifier.
    cpt
       If not ``None``, the model component to use. When ``None``, the
       full source expression for the data set is used. There is no
       check that the ``id`` argument matches the component (i.e. that
       the component is included in the source model for the data set)
    bkg_id : None, int
       If not None then change the normalization of the model to the
       given background dataset.
    names : None or array of str
       The parameter names that should be changed (a case-insensitive
       comparison is made, and the name does not include the model
       name). If ``None`` then the default set of
       ``['ampl', 'norm']`` is used.
    limscale : float
       The min and max range of the normalization is set to the
       calculated value divided and multiplied by ``limscale``.
       These limits will be modified to match the hard limits of the
       parameter if they exceed them.

    See Also
    --------
    guess, ignore, notice, set_par

    Notes
    -----
    The normalization is computed so that the predicted model counts
    matches the observed counts for the currently-noticed data range,
    as long as parameter names match the ``names`` argument (or
    ['ampl', 'norm'] if that is ``None``) and the parameter is not
    frozen.

    If no matches are found, then no changes are made. Otherwise, a
    scale factor is created by summing up the data counts and dividing
    this by the model sum over the currently-noticed range. This scale
    factor is divided by the number of matching parameters, and then
    the parameter values are multiplied by this value. If a model
    contains multiple parameters matching the contents of the
    ``names`` argument then each one will be changed by this routine.

    It is not intended for use with source expressions
    created with `set_full_model`, and may not work well with
    image models that use a PSF (one set with `set_psf`).

    Examples
    --------

    Adjust the normalization of the gal component before fitting.

    >>> load_pha('src.pi')
    >>> subtract()
    >>> notice(0.5, 7)
    >>> set_source(xsphabs.galabs * xsapec.gal)
    >>> renorm()

    Change the normalization of a 2D model using the 'src' dataset.
    Only the ``src`` component is changed since the default value for
    the ``names`` parameter - that is ['ampl', 'norm'] - does not
    match the normalization parameter of the `const2d` model.

    >>> load_image('src', 'img.fits')
    >>> set_source('src', gauss2d.src + const2d.bgnd)
    >>> renorm('src')

    The names parameter is set so that both components are adjusted,
    and each component is assumed to contribute half the signal.

    >>> load_image(12, 'img.fits')
    >>> notice2d_id(12, 'srcfit.reg')
    >>> set_source(12, gauss2d.src12 + const2d.bgnd12)
    >>> renorm(12, names=['ampl', 'c0'])

    Change the minimum and maximum values of the normalization
    parameter to be the calculated value divided by and multiplied by
    1e4 respectively (these changes are made to the soft limits).

    >>> renorm(limscale=1e4)

    """

    if names is None:
        matches = ['ampl', 'norm']
    elif names == []:
        raise ArgumentErr('bad', 'names argument', '[]')
    else:
        matches = [n.lower() for n in names]

    if bkg_id is None:
        d = ui.get_data(id=id)
        m = ui.get_model(id=id)
    else:
        d = ui.get_bkg(id=id, bkg_id=id)
        m = ui.get_bkg_model(id=id, bkg_id=bkg_id)

    if cpt is not None:
        # In this case the get_[bkg_]model call is not needed above,
        # but leave in as it at least ensures there's a model defined
        # for the data set.
        m = cpt

    pars = [p for p in m.pars if p.name.lower() in matches and not p.frozen]
    npars = len(pars)
    if npars == 0:
        wmsg = "no thawed parameters found matching: {}".format(
            ", ".join(matches))
        warn(wmsg)
        return

    yd = d.get_dep(filter=True).sum()
    ym = d.eval_model_to_fit(m).sum()

    # argh; these are numpy floats, and they do not throw a
    # ZeroDivisionError, rather you get a RuntimeWarning message.
    # So explicitly convert to Python float.
    #
    try:
        scale = float(yd) / float(ym) / npars
    except ZeroDivisionError:
        error("model sum evaluated to 0; no re-scaling attempted")
        return

    for p in pars:
        newval = p.val * scale
        newmin = newval / limscale
        newmax = newval * limscale

        # Could do the limit/range checks and then call set_par,
        # but only do so if there's a problem.
        #
        try:
            ui.set_par(p, val=newval, min=newmin, max=newmax)

        except ParameterErr:
            # The following is not guaranteed to catch all cases;
            # e.g if the new value is outside the hard limits.
            #
            minflag = newmin < p.hard_min
            maxflag = newmax > p.hard_max
            if minflag:
                newmin = p.hard_min
            if maxflag:
                newmax = p.hard_max

            ui.set_par(p, val=newval, min=newmin, max=newmax)

            # provide informational message after changing the
            # parameter
            if minflag and maxflag:
                reason = "to hard min and max limits"
            elif minflag:
                reason = "to the hard minimum limit"
            elif maxflag:
                reason = "to the hard maximum limit"
            else:
                # this should be impossible
                reason = "for an unknown reason"

            info("Parameter {} is restricted ".format(p.fullname) + reason)
コード例 #18
0
def test_pileup_model(make_data_path, clean_astro_ui):
    """Basic check of setting a pileup model.

    It is more to check we can set a pileup model, not to
    check the model works.
    """

    infile = make_data_path('3c273.pi')
    ui.load_pha('pileup', infile)
    ui.subtract('pileup')
    ui.notice(0.3, 7)

    ui.set_stat('chi2datavar')

    # pick xswabs as it is unlikely to change with XSPEC
    ui.set_source('pileup', ui.xswabs.amdl * ui.powlaw1d.pl)

    # get close to the best fit, but don't need to fit
    pl.ampl = 1.82e-4
    pl.gamma = 1.97
    amdl.nh = 0.012

    stat0 = ui.calc_stat('pileup')

    # We want to compare the data to the pileup model,
    # which should be run with the higher-energy bins included,
    # but that's not relevant here where I am just
    # checking the statistic value.

    ui.set_pileup_model('pileup', ui.jdpileup.jdp)

    # pick some values to make the model change the data
    jdp.ftime = 3.2
    jdp.fracexp = 1
    jdp.alpha = 0.95
    jdp.f = 0.91

    # Check pileup is added to get_model
    #
    mlines = str(ui.get_model('pileup')).split('\n')
    assert mlines[0] == 'apply_rmf(jdpileup.jdp((xswabs.amdl * powlaw1d.pl)))'
    assert mlines[3].strip(
    ) == 'jdp.alpha    thawed         0.95            0            1'
    assert mlines[5].strip(
    ) == 'jdp.f        thawed         0.91          0.9            1'
    assert mlines[11].strip(
    ) == 'pl.gamma     thawed         1.97          -10           10'

    # Ensure that the statistic has got worse (technically
    # it coud get better, but not for this case).
    #
    stat1 = ui.calc_stat('pileup')
    assert stat1 > stat0

    # As a test, check the actual statistic values
    # (evaluated with XSPEC 12.11.0 and Sherpa with the
    # master branch 2020/07/29, on Linux).
    #
    assert stat0 == pytest.approx(35.99899827358692)
    assert stat1 == pytest.approx(36.58791181460404)

    # Can we remove the pileup model?
    #
    ui.delete_pileup_model('pileup')

    # Check pileup is not in get_model
    #
    mlines = str(ui.get_model('pileup')).split('\n')

    # Numeric display depends on Python and/or NumPy
    # for the exposure time. I should have set the exposure
    # time to an integer value.
    #
    # assert mlines[0] == 'apply_rmf(apply_arf((38564.608926889 * (xswabs.amdl * powlaw1d.pl))))'

    toks = mlines[0].split()
    assert len(toks) == 5
    assert toks[0].startswith('apply_rmf(apply_arf((38564.608')
    assert toks[1] == '*'
    assert toks[2] == '(xswabs.amdl'
    assert toks[3] == '*'
    assert toks[4] == 'powlaw1d.pl))))'

    assert mlines[4].strip(
    ) == 'pl.gamma     thawed         1.97          -10           10'

    stat2 = ui.calc_stat('pileup')
    assert stat2 == stat0
コード例 #19
0
# inner radii chosen
for r in [20, 24, 28, 32]:
    # from KP15, (ra, dec) = (266.4172 deg, -29.00716 deg)
    # systematic shifting error = 6" = 2.4 px
    # ellip = .52
    # theta = 57 deg (from positive north)
    shp.set_par(chxe.xpos, 497.4, 497.4 - 4, 497.4 + 4)
    shp.set_par(chxe.ypos, 499.1, 499.1 - 4, 499.1 + 4)
    shp.set_par(chxe.fwhm, 30, 1, 200)
    shp.set_par(chxe.ellip, .5)
    shp.set_par(chxe.theta, -.9948)
    shp.set_par(chxe.ampl, 1e-5)
    shp.thaw(chxe.ellip, chxe.xpos, chxe.ypos)

    print(shp.get_model())

    shp.ignore2d('circle(500,500,1000)')
    shp.notice2d('circle(500,500,60)')
    shp.ignore2d('circle(500,500,' + str(r) + ')')
    shp.fit()

    print(shp.get_model())

    shp.set_conf_opt('numcores', 3)
    shp.set_conf_opt('maxiters', 50)
    shp.set_conf_opt('fast', True)
    shp.set_conf_opt('remin', 10000.0)
    shp.set_conf_opt('soft_limits', True)
    shp.freeze(chxe.xpos, chxe.ypos)
コード例 #20
0
ファイル: helper_functions.py プロジェクト: thriveth/Pychelle
def fit_with_sherpa(model, data, trans, rows,
                    ranges=[], errs=None, shmod=1, method='levmar'):
    """ This is probably going to be one of the slightly more complicated
    functions, so it'll probably need a relatively good and comprehensive
    docstring.

    Parameters
    -----------
    model : pandas DataFrame object.
        Must either have a 3-level index as created by grism, or at least a
        one-level index identifying the different components. If a three-level
        index is passed, it must either contain only one value of the
        Transition and Row values, or the 'trans' and 'row' kwargs must be
        passed in the call.
    data : numpy array or pandas Series or DataFrame.
        The data is assumed to have at least two columns containing first
        wavelength, then data. An optional third column can contain errors.
        If no such column exists, the errors are assumed to be 1.
    trans : string
        The desired value of the 'Transition' level of the model dataframe.
    rows : string
        The desired value of the 'Rows' level of the model index.
    shmod : integer
        Identifier for the Sherpa model in case one wants to have more models
        loaded in memory simultaneously. If not, it will be overwritten each
        time fit_with_sherpa() is run.
        Default: 1
    ranges : list of (min, max) tuples.
        The determines which wavelength ranges will be included in the fit. If
        an empty list is passed, the entire range of the data passed will be
        used.
        Default: [] (Empty list).
    method : string
        optimization to be used by Sherpa. Can be either 'levmar', 'neldermead'
        or 'moncar'. See Sherpa documentation for more detail.
        Default: 'levmar'.
    """
    # Should this perhaps be an instancemethod of one of the major classes
    # instead? On the upside it would mean direct access to all said class's
    # attributes, which is good because it means less mandatory input
    # parameters. On the downside, it is not generally useable. I want things
    # to be as general as possible. But not at any price. Cost/benifit analysis
    # not yet conclusive.

    # First of all, check if Sherpa is even installed.
    try:
        import sherpa.astro.ui as ai
        import sherpa.models as sm
    except ImportError:
        print " ".join("The Sherpa fitting software must be installed to use \
            this functionality.".split())
        raise

    # Sherpa isn't good at staying clean, need to help it.
    for i in ai.list_model_ids():
        ai.delete_model(shmod)
        ai.delete_data(shmod)
    # Load data first, 'cause Sherpa wants it so.
    if data.shape[0] == 2:
        ai.load_arrays(shmod, data[:, 0], data[:, 1])
    if data.shape[0] > 2:
        ai.load_arrays(shmod, data[:, 0], data[:, 1], data[:, 2])
    # Initialize model by setting continuum
    Contin = sm.Const1D('Contin')
    Contin.c0 = model.xs('Contin')['Ampl']
    ai.set_model(shmod, Contin)

    for i in model.index:
        if i == 'Contin':
            continue
        else:
            # use the identifier as letter (good idea?)
            name = model.ix[i]['Identifier']
            comp = ai.gauss1d(name)
            comp.ampl = model.ix[i]['Ampl']
            comp.pos = model.ix[i]['Pos']
            comp.fwhm = model.ix[i]['Sigma']
            ai.set_model(shmod, ai.get_model(shmod) + comp)
            ai.show_model(shmod)  # For testing...
    print '  '
    print ai.get_model(shmod)

    # Set ranges included in fit.
    # First, unset all.
    ai.ignore_id(shmod)
    if len(ranges) == 0:
        ai.notice_id(shmod)
    else:
        for r in ranges:
            ai.notice_id(shmod, r[0], r[1])

    # Set optimization algorithm
    ai.set_method(method)
    # Create copy of model
    new_model = model.copy()
    # Perform the fit:
    ai.fit(shmod)
    print ai.get_fit_results()
    print model
    return new_model
コード例 #21
0
    def fit(self):
        # try a PCA decomposition of this spectrum
        initial = self.decompose()
        ui.set_method('neldermead')
        bkgmodel = PCAModel('pca%s' % self.id, data=self.pca)
        self.bkgmodel = bkgmodel
        response = get_identity_response(self.id)
        convbkgmodel = response(bkgmodel)
        ui.set_bkg_full_model(self.id, convbkgmodel)
        for p, v in zip(bkgmodel.pars, initial):
            p.val = v
        srcmodel = ui.get_model(self.id)
        ui.set_full_model(self.id, srcmodel)
        initial_v = self.calc_bkg_stat()
        # print('before fit: stat: %s' % (initial_v))
        ui.fit_bkg(id=self.id)
        # print('fit: first full fit done')
        final = [p.val for p in ui.get_bkg_model(self.id).pars]
        # print('fit: parameters: %s' % (final))
        initial_v = self.calc_bkg_stat()
        # print('fit: stat: %s' % (initial_v))

        # lets try from zero
        # logf.info('fit: second full fit from zero')
        for p in bkgmodel.pars:
            p.val = 0
        ui.fit_bkg(id=self.id)
        initial_v0 = self.calc_bkg_stat()
        # logf.info('fit: parameters: %s' % (final))
        # logf.info('fit: stat: %s' % (initial_v0))

        # pick the better starting point
        if initial_v0 < initial_v:
            # logf.info('fit: using zero-fit')
            initial_v = initial_v0
            final = [p.val for p in ui.get_bkg_model(self.id).pars]
        else:
            # logf.info('fit: using decomposed-fit')
            for p, v in zip(bkgmodel.pars, final):
                p.val = v

        # start with the full fit and remove(freeze) parameters
        print('%d parameters, stat=%.2f' % (len(initial), initial_v))
        results = [(2 * len(final) + initial_v, final, len(final), initial_v)]
        for i in range(len(initial) - 1, 0, -1):
            bkgmodel.pars[i].val = 0
            bkgmodel.pars[i].freeze()
            ui.fit_bkg(id=self.id)
            final = [p.val for p in ui.get_bkg_model(self.id).pars]
            v = self.calc_bkg_stat()
            print('--> %d parameters, stat=%.2f' % (i, v))
            results.insert(0, (v + 2 * i, final, i, v))

        print()
        print('Background PCA fitting AIC results:')
        print('-----------------------------------')
        print()
        print('stat Ncomp AIC')
        for aic, params, nparams, val in results:
            print('%-05.1f %2d %-05.1f' % (val, nparams, aic))
        aic, final, nparams, val = min(results)
        for p, v in zip(bkgmodel.pars, final):
            p.val = v
        for i in range(nparams):
            bkgmodel.pars[i].thaw()

        print()
        print('Increasing parameters again...')
        # now increase the number of parameters again
        # results = [(aic, final, nparams, val)]
        last_aic, last_final, last_nparams, last_val = aic, final, nparams, val
        for i in range(last_nparams, len(bkgmodel.pars)):
            next_nparams = i + 1
            bkgmodel.pars[i].thaw()
            for p, v in zip(bkgmodel.pars, last_final):
                p.val = v
            ui.fit_bkg(id=self.id)
            next_final = [p.val for p in ui.get_bkg_model(self.id).pars]
            v = self.calc_bkg_stat()
            next_aic = v + 2 * next_nparams
            if next_aic < last_aic:  # accept
                print('%d parameters, aic=%.2f ** accepting' %
                      (next_nparams, next_aic))
                last_aic, last_final, last_nparams, last_val = next_aic, next_final, next_nparams, v
            else:
                print('%d parameters, aic=%.2f' % (next_nparams, next_aic))
            # stop if we are 3 parameters ahead what we needed
            if next_nparams >= last_nparams + 3:
                break

        print('Final choice: %d parameters, aic=%.2f' %
              (last_nparams, last_aic))
        # reset to the last good solution
        for p, v in zip(bkgmodel.pars, last_final):
            p.val = v

        last_model = convbkgmodel
        for i in range(10):
            print('Adding Gaussian#%d' % (i + 1))
            # find largest discrepancy
            ui.set_analysis(self.id, "ener", "rate")
            m = ui.get_bkg_fit_plot(self.id)
            y = m.dataplot.y.cumsum()
            z = m.modelplot.y.cumsum()
            diff_rate = np.abs(y - z)
            ui.set_analysis(self.id, "ener", "counts")
            m = ui.get_bkg_fit_plot(self.id)
            x = m.dataplot.x
            y = m.dataplot.y.cumsum()
            z = m.modelplot.y.cumsum()
            diff = np.abs(y - z)
            i = np.argmax(diff)
            energies = x
            e = x[i]
            print(
                'largest remaining discrepancy at %.3fkeV[%d], need %d counts'
                % (x[i], i, diff[i]))
            # e = x[i]
            power = diff_rate[i]
            # lets try to inject a gaussian there

            g = ui.xsgaussian('g_%d_%d' % (self.id, i))
            print('placing gaussian at %.2fkeV, with power %s' % (e, power))
            # we work in energy bins, not energy
            g.LineE.min = energies[0]
            g.LineE.max = energies[-1]
            g.LineE.val = e
            if i > len(diff) - 2:
                i = len(diff) - 2
            if i < 2:
                i = 2
            g.Sigma = (x[i + 1] - x[i - 1])
            g.Sigma.min = (x[i + 1] - x[i - 1]) / 3
            g.Sigma.max = x[-1] - x[0]
            g.norm.min = power * 1e-6
            g.norm.val = power
            convbkgmodel2 = response(g)
            next_model = last_model + convbkgmodel2
            ui.set_bkg_full_model(self.id, next_model)
            ui.fit_bkg(id=self.id)
            next_final = [p.val for p in ui.get_bkg_model(self.id).pars]
            next_nparams = len(next_final)
            v = self.calc_bkg_stat()
            next_aic = v + 2 * next_nparams
            print('with Gaussian:', next_aic,
                  '; change: %.1f (negative is good)' % (next_aic - last_aic))
            if next_aic < last_aic:
                print('accepting')
                last_model = next_model
                last_aic, last_final, last_nparams, last_val = next_aic, next_final, next_nparams, v
            else:
                print('not significant, rejecting')
                ui.set_bkg_full_model(self.id, last_model)
                for p, v in zip(last_model.pars, last_final):
                    p.val = v
                    if v == 0:  # the parameter was frozen.
                        ui.freeze(p)
                break

        self.cstat, self.dof = self.calc_bkg_stat(
            dof=True)  # Save the final cstat and dof (dof = ihi - ilo)
        self.filter_energy = ui.get_filter(
        )  # Save the filter for background fitting.
        ui.set_analysis('channel')
        ui.ignore()
        ui.notice(self.filter0)  # restore filter
        ui.set_analysis('energy')
コード例 #22
0
    def get_plot_arrays(self, data_list):
        """Construct arrays of model count rates."""

        sample_model = sau.get_model(data_list[0].name)
        self.get_binning(
            sample_model)  # do this only once assuming that true energy
        # binning does not change from run to run
        obs_exc = np.zeros_like(self.bcenter)
        obs_err = np.zeros_like(self.bcenter)
        tot_on = np.zeros_like(self.bcenter)
        tot_off = np.zeros_like(self.bcenter)
        mod_cnts = np.zeros_like(self.bcenter)
        exp_tot = np.zeros_like(self.etrue_center)
        mod_tot = np.zeros_like(self.etrue_center)

        for dat in data_list:
            datid = dat.name
            exposure = dat.data.exposure
            on_cnt_rate = dat.data.get_y()

            c_bkg = sau.get_bkg(datid)
            bg_cnt_rate = c_bkg.get_y()
            backscal = c_bkg.get_backscal()

            c_mod = sau.get_model(datid)
            arf = c_mod.arf
            arf_vals = arf.get_y()

            # Excess
            bw_expo = self.b_width * exposure
            on_cnts = on_cnt_rate * bw_expo
            off_cnts = bg_cnt_rate * bw_expo / backscal
            c_exc = on_cnts - off_cnts  # excess counts
            c_exc_err2 = on_cnts + off_cnts / backscal  # errors

            # model counts
            c_modcnts = c_mod.calc(self.para,
                                   2.)  # second parameter is dummy...

            # Consider only noticed bins
            valid = dat.data.get_noticed_channels().astype(int)
            valid -= np.ones_like(valid)  # Channel id's start at 1!

            obs_exc[valid] = obs_exc[valid] + c_exc[
                valid]  # Total excess in noticed bins
            obs_err[valid] = obs_err[valid] + c_exc_err2[
                valid]  # Total error square
            tot_on[valid] = tot_on[valid] + on_cnts[valid]
            tot_off[valid] = tot_off[valid] + off_cnts[valid]
            mod_cnts[valid] = mod_cnts[valid] + c_modcnts[
                valid]  # Total noticed model counts
            valid_arf = self.ener_map[valid].sum(
                0) > 0  # valid pixels in true energy

            self.get_mod_val(self.totmodel, self.etrue_center)

            # Add run exposure*area*model for valid true energy bins only
            exp_tot[valid_arf] = exp_tot[valid_arf] + \
                                 arf_vals[valid_arf] * self.mod_val[valid_arf] * exposure
            ''' Not used, may be useful to produce upper limits
            #significance per bin:
            signis = significance(n_observed=tot_on, mu_background=tot_off, method='lima')
            some_significant = False
            #makeUL = []
            for i,signi in enumerate(signis):
            if signi<2:
            print('WARNING: Energy bin from', round(binmin[i]/1e9,1), 'to', \
            round(binmax[i]/1e9,1), 'TeV has', round(signi,2), 'sigma only.')
            print('...may want to convert to upper limit') # NOT YET IMPLEMENTED
            continue
            #makeUL.append(True)
            if np.isinf(signi) or np.isnan(signi): #isinf when Non = Noff = 0?
            if some_significant: # otherwise we are probably below threshold
            print('WARNING: Energy bin from', round(binmin[i]/1e9,1), 'to', \
            round(binmax[i]/1e9,1), 'TeV contains no events.')
            continue
            else:
            some_significant = True
            '''

        # compute average exposure (time*area) in each measured energy bin
        mean_expo = np.zeros(obs_exc.shape)
        for i in range(obs_exc.shape[0]):
            mean_expo[i] = exp_tot[self.ener_map[i, :]].sum() / \
                           self.mod_val[self.ener_map[i, :]].sum()
            bw_meanexpo = self.b_width * mean_expo

        # get flux and error per cm^2/s/TeV
        self.mean_flux = 1e9 * obs_exc / bw_meanexpo
        self.mean_flux[np.isnan(self.mean_flux)] = 0

        self.mean_err = 1e9 * np.sqrt(
            obs_err) / bw_meanexpo  # mean_flux/signis

        # Compute residuals where model counts >0
        self.resid = (-mod_cnts + obs_exc) / np.sqrt(obs_err)

        # Model spectral points
        self.bcenter /= 1e9  # keV? Nope, real high energy...
コード例 #23
0
ファイル: xagnfitter.py プロジェクト: neobar/BXA
    parameters += zparameters
else:
    prefix += 'zfree_'
    parameters += [redshift]
    priors += [priorfuncs.create_uniform_prior_for(redshift)]

assert len(priors) == len(
    parameters), 'priors: %d parameters: %d' % (len(priors), len(parameters))

################
# set model
#    find background automatically using PCA method

print('setting source and background model ...')
set_model(id, model * galabso)
convmodel = get_model(id)
bkg_model = auto_background(id)
set_full_model(id, get_response(id)(model) + bkg_model * get_bkg_scale(id))
#plot_bkg_fit(id)

## we allow the background normalisation to be a free fitting parameter
p = bkg_model.pars[0]
p.max = p.val + 2
p.min = p.val - 2
parameters.append(p)
priors += [priorfuncs.create_uniform_prior_for(p)]

priorfunction = priorfuncs.create_prior_function(priors=priors)
assert not numpy.isnan(
    calc_stat(id)), 'NaN on calc_stat, probably a bad RMF/ARF file for PC'