Exemplo n.º 1
0
    def notice(self, emin_ener, emax_ener):
        """Notice energy range.

        if minimal value required below threshold, use threshold instead
        if maximal value required beyond emax, use emax instead
        """
        sau.notice_id(self.name, max(self.threshold, emin_ener), min(self.emax, emax_ener))
Exemplo n.º 2
0
def test_grouped_pha_all_bad_response_bg_warning(elo, ehi, nbins, bkg_id,
                                                 caplog, make_data_path,
                                                 clean_astro_ui):
    """Check we get the warning messages with background filtering"""

    ui.load_pha('check', make_data_path('3c273.pi'))

    ui.set_quality('check', 2 * numpy.ones(1024, dtype=numpy.int16), bkg_id=1)
    ui.ignore_bad('check', bkg_id=1)

    with caplog.at_level(logging.INFO, logger='sherpa'):
        ui.notice_id('check', elo, ehi, bkg_id=bkg_id)

    # filtering has or hasn't happened
    nsrc = ui.get_dep('check', filter=True).size
    nback = ui.get_dep('check', filter=True, bkg_id=1).size

    if bkg_id is None:
        assert nsrc == nbins
        assert nback == 0
    else:
        assert nsrc == 46  # ie no filter
        assert nback == 0

    # did we get a warning message from the background?
    assert len(caplog.records) == 1
    name, lvl, msg = caplog.record_tuples[0]
    assert name == 'sherpa.astro.data'
    assert lvl == logging.INFO
    assert msg.startswith('Skipping dataset ')
    assert msg.endswith('/3c273_bg.pi: mask excludes all data')
Exemplo n.º 3
0
    def notice(self, emin_ener, emax_ener):
        """Notice energy range.

        if minimal value required below threshold, use threshold instead
        if maximal value required beyond emax, use emax instead
        """
        sau.notice_id(self.name, max(self.threshold, emin_ener),
                      min(self.emax, emax_ener))
Exemplo n.º 4
0
    def run_hspec_fit(self, model, thres_low, thres_high):
        """Run the gammapy.hspec fit

        Parameters
        ----------
        model : str
            Sherpa model
        thres_high : `~gammapy.spectrum.Energy`
            Upper threshold of the spectral fit
        thres_low : `~gammapy.spectrum.Energy`
            Lower threshold of the spectral fit
        """

        log.info("Starting HSPEC")
        import sherpa.astro.ui as sau
        from ..hspec import wstat
        from sherpa.models import PowLaw1D

        if model == 'PL':
            p1 = PowLaw1D('p1')
            p1.gamma = 2.2
            p1.ref = 1e9
            p1.ampl = 6e-19
        else:
            raise ValueError('Desired Model is not defined')

        thres = thres_low.to('keV').value
        emax = thres_high.to('keV').value

        sau.freeze(p1.ref)
        sau.set_conf_opt("max_rstat", 100)

        list_data = []
        for obs in self.observations:
            datid = obs.phafile.parts[-1][7:12]
            sau.load_data(datid, str(obs.phafile))
            sau.notice_id(datid, thres, emax)
            sau.set_source(datid, p1)
            list_data.append(datid)
        wstat.wfit(list_data)
        sau.covar()
        fit_val = sau.get_covar_results()
        fit_attrs = ('parnames', 'parvals', 'parmins', 'parmaxes')
        fit = dict((attr, getattr(fit_val, attr)) for attr in fit_attrs)
        fit = self.apply_containment(fit)
        sau.clean()
        self.fit = fit
Exemplo n.º 5
0
    def _run_hspec_fit(self):
        """Run the gammapy.hspec fit
        """

        log.info("Starting HSPEC")
        import sherpa.astro.ui as sau
        from ..hspec import wstat

        sau.set_conf_opt("max_rstat", 100)

        thres_lo = self.energy_threshold_low.to('keV').value
        thres_hi = self.energy_threshold_high.to('keV').value
        sau.freeze(self.model.ref)

        list_data = []
        for pha in self.pha:
            datid = pha.parts[-1][7:12]
            sau.load_data(datid, str(pha))
            sau.notice_id(datid, thres_lo, thres_hi)
            sau.set_source(datid, self.model)
            list_data.append(datid)

        wstat.wfit(list_data)
Exemplo n.º 6
0
 def test_bug38(self):
     ui.load_pha('3c273', self.pha3c273)
     ui.notice_id('3c273', 0.3, 2)
     ui.group_counts('3c273', 30)
     ui.group_counts('3c273', 15)
Exemplo n.º 7
0
def test_more_ui_bug38(make_data_path):
    ui.load_pha('3c273', make_data_path('3c273.pi'))
    ui.notice_id('3c273', 0.3, 2)
    ui.group_counts('3c273', 30)
    ui.group_counts('3c273', 15)
Exemplo n.º 8
0
 def test_bug38(self):
     ui.load_pha('3c273', self.pha3c273)
     ui.notice_id('3c273', 0.3, 2)
     ui.group_counts('3c273', 30)
     ui.group_counts('3c273', 15)
Exemplo n.º 9
0
 def test_bug38(self):
     ui.load_pha("3c273", self.pha3c273)
     ui.notice_id("3c273", 0.3, 2)
     ui.group_counts("3c273", 30)
     ui.group_counts("3c273", 15)
Exemplo n.º 10
0
def test_load_pha2_compare_meg_order1(make_data_path):
    """Do we read in the MEG +/-1 orders?"""

    # The MEG -1 order is dataset 9
    # The MEG +1 order is dataset 10
    #
    pha2file = make_data_path('3c120_pha2')
    meg_p1file = make_data_path('3c120_meg_1.pha')
    meg_m1file = make_data_path('3c120_meg_-1.pha')

    ui.load_pha('meg_p1', meg_p1file)
    ui.load_pha('meg_m1', meg_m1file)

    orig_ids = set(ui.list_data_ids())
    assert 'meg_p1' in orig_ids
    assert 'meg_m1' in orig_ids

    ui.load_pha(pha2file)

    for n, lbl in zip([9, 10], ["-1", "1"]):
        h = '3c120_meg_{}'.format(lbl)
        ui.load_arf(n, make_data_path(h + '.arf'))
        ui.load_rmf(n, make_data_path(h + '.rmf'))

    # check that loading the pha2 file doesn't overwrite existing
    # data
    new_ids = set(ui.list_data_ids())

    for i in range(1, 13):
        orig_ids.add(i)

    assert orig_ids == new_ids

    # Check that the same model gives the same statistic
    # value; this should check that the data and response are
    # read in, that grouping and filtering work, and that
    # model evaluation is the same, without having to
    # check these steps individually.
    #
    # The model is not meant to be physically meaningful,
    # just one that reasonably represents the data and
    # can be evaluated without requiring XSPEC.
    #
    pmdl = ui.create_model_component('powlaw1d', 'pmdl')
    pmdl.gamma = 0.318
    pmdl.ampl = 2.52e-3

    ncts = 20
    for i in [9, 10, "meg_m1", "meg_p1"]:
        ui.set_analysis(i, 'wave')
        ui.group_counts(i, ncts)
        ui.notice_id(i, 2, 12)
        ui.set_source(i, pmdl)

    ui.set_stat('chi2datavar')
    s9 = ui.calc_stat(9)
    s10 = ui.calc_stat(10)

    sm1 = ui.calc_stat('meg_m1')
    sp1 = ui.calc_stat('meg_p1')

    # Since these should be the same, we use an equality test
    # rather than approximation. At least until it becomes
    # a problem.
    #
    assert s9 == sm1
    assert s10 == sp1

    # The values were calculated using CIAO 4.9, Linux64, with
    # Python 3.5.
    #
    assert s9 == pytest.approx(1005.4378559390879)
    assert s10 == pytest.approx(1119.980439489647)
Exemplo n.º 11
0
def fit_with_sherpa(model, data, trans, rows,
                    ranges=[], errs=None, shmod=1, method='levmar'):
    """ This is probably going to be one of the slightly more complicated
    functions, so it'll probably need a relatively good and comprehensive
    docstring.

    Parameters
    -----------
    model : pandas DataFrame object.
        Must either have a 3-level index as created by grism, or at least a
        one-level index identifying the different components. If a three-level
        index is passed, it must either contain only one value of the
        Transition and Row values, or the 'trans' and 'row' kwargs must be
        passed in the call.
    data : numpy array or pandas Series or DataFrame.
        The data is assumed to have at least two columns containing first
        wavelength, then data. An optional third column can contain errors.
        If no such column exists, the errors are assumed to be 1.
    trans : string
        The desired value of the 'Transition' level of the model dataframe.
    rows : string
        The desired value of the 'Rows' level of the model index.
    shmod : integer
        Identifier for the Sherpa model in case one wants to have more models
        loaded in memory simultaneously. If not, it will be overwritten each
        time fit_with_sherpa() is run.
        Default: 1
    ranges : list of (min, max) tuples.
        The determines which wavelength ranges will be included in the fit. If
        an empty list is passed, the entire range of the data passed will be
        used.
        Default: [] (Empty list).
    method : string
        optimization to be used by Sherpa. Can be either 'levmar', 'neldermead'
        or 'moncar'. See Sherpa documentation for more detail.
        Default: 'levmar'.
    """
    # Should this perhaps be an instancemethod of one of the major classes
    # instead? On the upside it would mean direct access to all said class's
    # attributes, which is good because it means less mandatory input
    # parameters. On the downside, it is not generally useable. I want things
    # to be as general as possible. But not at any price. Cost/benifit analysis
    # not yet conclusive.

    # First of all, check if Sherpa is even installed.
    try:
        import sherpa.astro.ui as ai
        import sherpa.models as sm
    except ImportError:
        print " ".join("The Sherpa fitting software must be installed to use \
            this functionality.".split())
        raise

    # Sherpa isn't good at staying clean, need to help it.
    for i in ai.list_model_ids():
        ai.delete_model(shmod)
        ai.delete_data(shmod)
    # Load data first, 'cause Sherpa wants it so.
    if data.shape[0] == 2:
        ai.load_arrays(shmod, data[:, 0], data[:, 1])
    if data.shape[0] > 2:
        ai.load_arrays(shmod, data[:, 0], data[:, 1], data[:, 2])
    # Initialize model by setting continuum
    Contin = sm.Const1D('Contin')
    Contin.c0 = model.xs('Contin')['Ampl']
    ai.set_model(shmod, Contin)

    for i in model.index:
        if i == 'Contin':
            continue
        else:
            # use the identifier as letter (good idea?)
            name = model.ix[i]['Identifier']
            comp = ai.gauss1d(name)
            comp.ampl = model.ix[i]['Ampl']
            comp.pos = model.ix[i]['Pos']
            comp.fwhm = model.ix[i]['Sigma']
            ai.set_model(shmod, ai.get_model(shmod) + comp)
            ai.show_model(shmod)  # For testing...
    print '  '
    print ai.get_model(shmod)

    # Set ranges included in fit.
    # First, unset all.
    ai.ignore_id(shmod)
    if len(ranges) == 0:
        ai.notice_id(shmod)
    else:
        for r in ranges:
            ai.notice_id(shmod, r[0], r[1])

    # Set optimization algorithm
    ai.set_method(method)
    # Create copy of model
    new_model = model.copy()
    # Perform the fit:
    ai.fit(shmod)
    print ai.get_fit_results()
    print model
    return new_model