Example #1
0
def test_simul_stat_fit(stat, hide_logging, reset_xspec, setup_two):
    data1 = setup_two['data_pi2278']
    data2 = setup_two['data_pi2286']
    model1 = setup_two['model_pi2278']
    model2 = setup_two['model_pi2286']
    data = DataSimulFit(name='data1data2', datasets=[data1, data2])
    model = SimulFitModel(name='model1model2', parts=[model1, model2])
    fit = Fit(data=data, model=model, stat=stat(), method=NelderMead())
    result = fit.fit()

    _fit_simul_datavarstat_results_bench = {
        'succeeded':
        1,
        'numpoints':
        18,
        'dof':
        15,
        'istatval':
        56609.70689926489,
        'statval':
        126.1509268988255,
        'parvals':
        numpy.array(
            [0.8417576197443695, 1.6496933246579941, 0.2383939869443424])
    }

    compare_results(_fit_simul_datavarstat_results_bench, result)
Example #2
0
def mwl_fit_low_level():
    """Use high-level Sherpa API.

    Low-level = no session, classes.

    Example: http://python4astronomers.github.io/fitting/low-level.html
    """
    fermi_data = FermiData().sherpa_data
    hess_data = IACTData().sherpa_data

    # spec_model = PowLaw1D('spec_model')
    spec_model = LogParabola('spec_model')
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    data = DataSimulFit(name='global_data', datasets=[fermi_data, hess_data])
    # TODO: Figure out how to notice using the low-level API
    # data.notice(mins=1e-3, maxes=None, axislist=None)
    model = SimulFitModel(name='global_model', parts=[spec_model, spec_model])
    stat = FermiStat()
    method = LevMar()
    fit = Fit(data=data, model=model, stat=stat, method=method)
    result = fit.fit()

    # IPython.embed()
    return Bunch(results=result, model=spec_model)
Example #3
0
    def fit(self, current):
        self._fit.model.thawedpars = current

        # nm = NelderMead()
        # nm.config['iquad'] = 0
        # nm.config['finalsimplex'] = 1

        #lm = LevMar()
        #lm.config['maxfev'] = 5

        cv = Covariance()

        # Use the fit method defined before called get_draws().  This way the user
        # does not have to pass in the fitting method and method options.
        fit = Fit(self._fit.data, self._fit.model, self._fit.stat, self._fit.method,
                  cv)

        fit_result = fit.fit()
        covar_result = fit.est_errors()
        sigma = np.array(covar_result.extra_output)

        if np.isnan(sigma).any():
            raise CovarError("NaNs found in covariance matrix")

        # cache the fitting scales
        self._sigma = sigma
Example #4
0
 def test_mycash_nobkgdata_modelhasbkg(self):
     data = self.bkg
     fit = Fit(data, self.model, MyCashWithBkg(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_mycashnobkg_results_bench,
                          results,
                          tol=1.0e-3)
Example #5
0
    def setUp(self):
        try:
            from sherpa.astro.io import read_pha
            from sherpa.astro.xspec import XSwabs, XSpowerlaw
        except:
            return
        # self.startdir = os.getcwd()
        self.old_level = logger.getEffectiveLevel()
        logger.setLevel(logging.CRITICAL)

        pha = self.make_path("refake_0934_1_21_1e4.fak")
        # rmf = self.make_path("ccdid7_default.rmf")
        # arf = self.make_path("quiet_0934.arf")

        self.simarf = self.make_path("aref_sample.fits")
        self.pcaarf = self.make_path("aref_Cedge.fits")

        data = read_pha(pha)
        data.ignore(None, 0.3)
        data.ignore(7.0, None)

        rsp = Response1D(data)
        self.abs1 = XSwabs('abs1')
        self.p1 = XSpowerlaw('p1')
        model = rsp(self.abs1 * self.p1)

        self.fit = Fit(data, model, CStat(), NelderMead(), Covariance())
Example #6
0
def setup():
    data = Data1D('fake', _x, _y, _err)

    g1 = Gauss1D('g1')
    g1.fwhm.set(1.0, _tiny, _max, frozen=False)
    g1.pos.set(1.0, -_max, _max, frozen=False)
    g1.ampl.set(1.0, -_max, _max, frozen=False)
    p1 = PowLaw1D('p1')
    p1.gamma.set(1.0, -10, 10, frozen=False)
    p1.ampl.set(1.0, 0.0, _max, frozen=False)
    p1.ref.set(1.0, -_max, _max, frozen=True)
    model = p1 + g1

    method = LevMar()
    method.config['maxfev'] = 10000
    method.config['ftol'] = float(_eps)
    method.config['epsfcn'] = float(_eps)
    method.config['gtol'] = float(_eps)
    method.config['xtol'] = float(_eps)
    method.config['factor'] = float(100)

    fit = Fit(data, model, Chi2DataVar(), method, Covariance())
    results = fit.fit()

    for key in ["succeeded", "numpoints", "nfev"]:
        assert _fit_results_bench[key] == int(getattr(results, key))

    for key in ["rstat", "qval", "statval", "dof"]:
        # used rel and abs tol of 1e-7 with numpy allclose
        assert float(getattr(results,
                             key)) == pytest.approx(_fit_results_bench[key])

    for key in ["parvals"]:
        try:
            # used rel and abs tol of 1e-4 with numpy allclose
            assert getattr(results,
                           key) == pytest.approx(_fit_results_bench[key])
        except AssertionError:
            print('parvals bench: ', _fit_results_bench[key])
            print('parvals fit:   ', getattr(results, key))
            print('results', results)
            raise

    fields = [
        'data', 'model', 'method', 'fit', 'results', 'covresults', 'dof', 'mu',
        'num'
    ]
    out = namedtuple('Results', fields)

    out.data = data
    out.model = model
    out.method = method
    out.fit = fit
    out.results = results
    out.covresults = fit.est_errors()
    out.dof = results.dof
    out.mu = numpy.array(results.parvals)
    out.cov = numpy.array(out.covresults.extra_output)
    out.num = 10
    return out
Example #7
0
    def fit(self, current):
        self._fit.model.thawedpars = current

        # nm = NelderMead()
        # nm.config['iquad'] = 0
        # nm.config['finalsimplex'] = 1

        #lm = LevMar()
        #lm.config['maxfev'] = 5

        cv = Covariance()

        # Use the fit method defined before called get_draws().  This way the user
        # does not have to pass in the fitting method and method options.
        fit = Fit(self._fit.data, self._fit.model, self._fit.stat,
                  self._fit.method, cv)

        fit_result = fit.fit()
        covar_result = fit.est_errors()
        sigma = np.array(covar_result.extra_output)

        if np.isnan(sigma).any():
            raise CovarError("NaNs found in covariance matrix")

        # cache the fitting scales
        self._sigma = sigma
Example #8
0
    def _fit_sherpa(self):
        """Wrapper around sherpa minimizer."""
        from sherpa.fit import Fit
        from sherpa.data import Data1DInt
        from sherpa.optmethods import NelderMead
        from .sherpa_utils import SherpaModel, SherpaStat

        binning = self.obs_list[0].e_reco
        # The sherpa data object is not usued in the fit. It is set to the
        # first observation for debugging purposes, see below
        data = self.obs_list[0].on_vector.data.data.value
        data = Data1DInt('Dummy data', binning[:-1].value, binning[1:].value,
                         data)
        # DEBUG
        # from sherpa.models import PowLaw1D
        # from sherpa.stats import Cash
        # model = PowLaw1D('sherpa')
        # model.ref = 0.1
        # fit = Fit(data, model, Cash(), NelderMead())

        # NOTE: We cannot use the Levenbergr-Marquart optimizer in Sherpa
        # because it relies on the fvec return value of the fit statistic (we
        # return None). The computation of fvec is not straightforwad, not just
        # stats per bin. E.g. for a cash fit the sherpa stat computes it
        # according to cstat
        # see https://github.com/sherpa/sherpa/blob/master/sherpa/include/sherpa/stats.hh#L122

        self._sherpa_fit = Fit(data, SherpaModel(self), SherpaStat(self),
                               NelderMead())
        fitresult = self._sherpa_fit.fit()
        log.debug(fitresult)
        self._make_fit_result(self.model.parameters)
Example #9
0
def test_intproj(old_numpy_printing, override_plot_backend):
    p = plot.IntervalProjection()
    r = p._repr_html_()

    check_empty(r, 'IntervalProjection', nsummary=8)

    x = np.arange(5, 8, 0.5)
    y = np.asarray([2, 3, 4, 5, 4, 3])
    dy = y / 2
    d = Data1D('n n', x, y, staterror=dy)

    m = Const1D()

    fit = Fit(d, m, stat=Chi2())
    fr = fit.fit()
    assert fr.succeeded

    p.prepare(min=1, max=6, nloop=10)
    p.calc(fit, m.c0)

    r = p._repr_html_()
    assert r is not None

    if plot.backend.name == 'pylab':
        assert '<summary>IntervalProjection</summary>' in r
        assert '<svg ' in r
        return

    assert '<summary>IntervalProjection (8)</summary>' in r

    assert '<div class="dataname">x</div><div class="dataval">[ 1.        1.555556  2.111111  2.666667  3.222222  3.777778  4.333333  4.888889\n  5.444444  6.      ]</div>' in r
    assert '<div class="dataname">nloop</div><div class="dataval">10</div>' in r
Example #10
0
 def test_mycash_data_and_model_donothave_bkg(self):
     data = self.bkg
     fit = Fit(data, self.model, MyCashNoBkg(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_mycashnobkg_results_bench,
                          results,
                          tol=1.0e-3)
Example #11
0
    def call(self, niter, seed):

        pars = {}
        pars_index = {}
        index = 0
        for par in self.model.pars:
            if par.frozen is False:
                name = '%s.%s' % (par.modelname, par.name)
                pars_index[index] = name
                pars[name] = []
                index += 1

        data = self.data
        y = data.y
        x = data.x
        if type(data) == Data1DAsymmetricErrs:
            y_l = y - data.elo
            y_h = y + data.ehi
        elif isinstance(data, (Data1D,)):
            y_l = data.staterror
            y_h = data.staterror
        else:
            msg ="{0} {1}".format(ReSampleData.__name__, type(data))
            raise NotImplementedError(msg)

        numpy.random.seed(seed)
        for j in range(niter):
            ry = []
            for i in range(len(y_l)): 
                a = y_l[i]
                b = y_h[i]
                r = -1
                while r < a or r > b:
                    sigma = b - y[i]
                    u = numpy.random.random_sample()
                    if u < 0.5:
                        sigma=y[i]-a
                    r = numpy.random.normal(loc=y[i],scale=sigma,size=None)
                    if u < 0.5 and r > y[i]:
                        r = -1
                    if u > 0.5 and r < y[i]:
                        r = -1
                ry.append(r)

            # fit is performed for each simulated data point
            fit = Fit(Data1D('tmp', x, ry), self.model, LeastSq( ), LevMar())
            fit_result = fit.fit()

            for index, val in enumerate(fit_result.parvals):
                name = pars_index[index]
                pars[name].append(val)

        result = {}
        for index, name in pars_index.items():
            avg = numpy.average(pars[name])
            std = numpy.std(pars[name])
            print(name, ': avg =', avg, ', std =', std)
            result[name] = pars[name]

        return result
Example #12
0
 def test_same_cache(self):
     poly = Polynom1D()
     poly.pars[1].thaw()
     sdata = DataSimulFit('d1d2d3', (self.d1, self.d2, self.d3))
     smodel = SimulFitModel('same', (poly, poly, poly))
     sfit = Fit(sdata, smodel, method=NelderMead(), stat=Cash())
     result = sfit.fit()
     self.compare_results(self._fit_same_poly_bench, result)
Example #13
0
 def test_same_cache(self):
     poly = Polynom1D()
     poly.pars[1].thaw()
     sdata = DataSimulFit('d1d2d3', (self.d1, self.d2, self.d3))
     smodel = SimulFitModel('same', (poly, poly, poly))
     sfit = Fit(sdata, smodel, method=NelderMead(), stat=Cash())
     result = sfit.fit()
     self.compare_results(self._fit_same_poly_bench, result)
Example #14
0
    def fit(self):
        """Fit spectrum"""
        from sherpa.fit import Fit
        from sherpa.models import ArithmeticModel, SimulFitModel
        from sherpa.astro.instrument import Response1D
        from sherpa.data import DataSimulFit

        # Translate model to sherpa model if necessary
        if isinstance(self.model, models.SpectralModel):
            model = self.model.to_sherpa()
        else:
            model = self.model

        if not isinstance(model, ArithmeticModel):
            raise ValueError('Model not understood: {}'.format(model))

        # Make model amplitude O(1e0)
        val = model.ampl.val * self.FLUX_FACTOR ** (-1)
        model.ampl = val

        if self.fit_range is not None:
            log.info('Restricting fit range to {}'.format(self.fit_range))
            fitmin = self.fit_range[0].to('keV').value
            fitmax = self.fit_range[1].to('keV').value

        # Loop over observations
        pha = list()
        folded_model = list()
        nobs = len(self.obs_list)
        for ii in range(nobs):
            temp = self.obs_list[ii].to_sherpa()
            if self.fit_range is not None:
                temp.notice(fitmin, fitmax)
                if temp.get_background() is not None:
                    temp.get_background().notice(fitmin, fitmax)
            temp.ignore_bad()
            if temp.get_background() is not None:
                temp.get_background().ignore_bad()
            pha.append(temp)
            # Forward folding
            resp = Response1D(pha[ii])
            folded_model.append(resp(model) * self.FLUX_FACTOR)

        data = DataSimulFit('simul fit data', pha)
        fitmodel = SimulFitModel('simul fit model', folded_model)

        log.debug(fitmodel)
        fit = Fit(data, fitmodel, self.statistic)
        fitresult = fit.fit()
        log.debug(fitresult)
        # The model instance passed to the Fit now holds the best fit values
        covar = fit.est_errors()
        log.debug(covar)

        for ii in range(nobs):
            efilter = pha[ii].get_filter()
            shmodel = fitmodel.parts[ii]
            self.result[ii].fit = _sherpa_to_fitresult(shmodel, covar, efilter, fitresult)
Example #15
0
 def test_warning(self):
     ui.load_ascii_with_errors(1, self.gro_fname)
     data = ui.get_data(1)
     powlaw1d = PowLaw1D('p1')
     ui.set_model(powlaw1d)
     fit = Fit(data, powlaw1d)
     results = fit.fit()
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         ui.resample_data(1, 3)
         assert len(w) == 0
Example #16
0
 def test_gauss_gauss(self):
     g1, g2 = Gauss1D(), Gauss1D()
     g1.fwhm = 1.3
     g1.pos = 1.5
     g2.fwhm = 4.
     g2.pos = -2.0
     sdata = DataSimulFit('d4d5', (self.d4, self.d5))
     smodel = SimulFitModel('g1g2', (g1, g2))
     sfit = Fit(sdata, smodel, method=LevMar(), stat=LeastSq())
     result = sfit.fit()
     self.compare_results(self._fit_g2g2_bench, result)
def test_rsp1d_matrix_pha_zero_energy_bin():
    """What happens when the first bin starts at 0, with replacement.

    Unlike test_rsp1d_delta_pha_zero_energy_bin this directly
    calls Response1D to create the model.
    """

    ethresh = 1.0e-5

    rdata = create_non_delta_rmf()

    # hack the first bin to have 0 energy
    rdata.energ_lo[0] = 0.0

    # PHA and ARF have different exposure ties
    exposure_arf = 0.1
    exposure_pha = 2.4

    specresp = create_non_delta_specresp()

    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        adata = create_arf(rdata.energ_lo,
                           rdata.energ_hi,
                           specresp,
                           exposure=exposure_arf,
                           ethresh=ethresh)

    validate_zero_replacement(ws, 'ARF', 'user-arf', ethresh)

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha',
                  channel=channels,
                  counts=counts,
                  exposure=exposure_pha)
    pha.set_rmf(rdata)
    pha.set_arf(adata)

    pha.set_analysis('energy')

    mdl = MyPowLaw1D()

    rsp = Response1D(pha)
    wrapped = rsp(mdl)

    # Evaluate the statistic / model. The value was calculated using
    # commit a65fb94004664eab219cc09652172ffe1dad80a6 on a linux
    # system (Ubuntu 17.04).
    #
    f = Fit(pha, wrapped)
    ans = f.calc_stat()
    assert ans == pytest.approx(37971.8716151947)
Example #18
0
def test_cache():
    """To make sure that the runtime fit(cache=???) works"""

    x = np.array([1.0, 2.0, 3.0])
    model = MyCacheTestModel()
    par = np.array([1.1, 2.0, 3.0])
    y = model.calc(par, x)

    data = Data1D('tmp', x, y)
    fit = Fit(data, model, LeastSq())
    fit.fit(cache=False)
Example #19
0
 def test_wstat(self):
     fit = Fit(self.data, self.model, WStat(), LevMar())
     results = fit.fit()
     # On a local linux machine I have to bump the tolerance to
     # 3e-4, but this isn't seen on Travis. The fit isn't
     # "great", so it may be that the results are sensitive to
     # numerical differences (e.g. as introduced with updated
     # compilers).
     # tol = 3e-4
     tol = 1e-6  # TODO: investigate difference
     self.compare_results(self._fit_wstat_results_bench, results, tol=tol)
Example #20
0
 def test_gauss_gauss(self):
     g1, g2 = Gauss1D(), Gauss1D()
     g1.fwhm = 1.3
     g1.pos = 1.5
     g2.fwhm = 4.
     g2.pos = -2.0
     sdata = DataSimulFit('d4d5', (self.d4, self.d5))
     smodel = SimulFitModel('g1g2', (g1, g2))
     sfit = Fit(sdata, smodel, method=LevMar(), stat=LeastSq())
     result = sfit.fit()
     self.compare_results(self._fit_g2g2_bench, result)
Example #21
0
 def test_diff_cache(self):
     poly1 = Polynom1D()
     poly2 = Polynom1D()
     poly3 = Polynom1D()
     poly1.pars[1].thaw()
     poly2.pars[1].thaw()
     poly3.pars[1].thaw()
     sdata = DataSimulFit('d123', (self.d1, self.d2, self.d3))
     smodel = SimulFitModel('diff', (poly1, poly2, poly3))
     sfit = Fit(sdata, smodel, method=NelderMead(), stat=Cash())
     result = sfit.fit()
     self.compare_results(self._fit_diff_poly_bench, result)
Example #22
0
 def test_diff_cache(self):
     poly1 = Polynom1D()
     poly2 = Polynom1D()
     poly3 = Polynom1D()
     poly1.pars[1].thaw()
     poly2.pars[1].thaw()
     poly3.pars[1].thaw()
     sdata = DataSimulFit('d123', (self.d1, self.d2, self.d3))
     smodel = SimulFitModel('diff', (poly1, poly2, poly3))
     sfit = Fit(sdata, smodel, method=NelderMead(), stat=Cash())
     result = sfit.fit()
     self.compare_results(self._fit_diff_poly_bench, result)
Example #23
0
 def test_simul_stat_fit(self):
     data1 = self.data_pi2278
     data2 = self.data_pi2286
     model1 = self.model_mult
     model2 = self.model_mult
     data = DataSimulFit(name='data1data2', datasets=[data1, data2])
     model = SimulFitModel(name='model1model2', parts=[model1, model2])
     fit = Fit(data=data, model=model, stat=MyChiNoBkg(),
               method=NelderMead())
     result = fit.fit()
     self.compare_results(self._fit_simul_datavarstat_results_bench,
                          result)
Example #24
0
 def test_simul_stat_fit(self):
     data1 = self.data_pi2278
     data2 = self.data_pi2286
     model1 = self.model_mult
     model2 = self.model_mult
     data = DataSimulFit(name='data1data2', datasets=[data1, data2])
     model = SimulFitModel(name='model1model2', parts=[model1, model2])
     fit = Fit(data=data, model=model, stat=MyChiNoBkg(),
               method=NelderMead())
     result = fit.fit()
     self.compare_results(self._fit_simul_datavarstat_results_bench,
                          result)
Example #25
0
def test_rsp1d_matrix_pha_zero_energy_bin():
    """What happens when the first bin starts at 0, with replacement.

    Unlike test_rsp1d_delta_pha_zero_energy_bin this directly
    calls Response1D to create the model.
    """

    ethresh = 1.0e-5

    rdata = create_non_delta_rmf()

    # hack the first bin to have 0 energy
    rdata.energ_lo[0] = 0.0

    # PHA and ARF have different exposure ties
    exposure_arf = 0.1
    exposure_pha = 2.4

    specresp = create_non_delta_specresp()

    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter("always")
        adata = create_arf(rdata.energ_lo,
                           rdata.energ_hi,
                           specresp,
                           exposure=exposure_arf,
                           ethresh=ethresh)

    validate_zero_replacement(ws, 'ARF', 'user-arf', ethresh)

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha', channel=channels, counts=counts,
                  exposure=exposure_pha)
    pha.set_rmf(rdata)
    pha.set_arf(adata)

    pha.set_analysis('energy')

    mdl = MyPowLaw1D()

    rsp = Response1D(pha)
    wrapped = rsp(mdl)

    # Evaluate the statistic / model. The value was calculated using
    # commit a65fb94004664eab219cc09652172ffe1dad80a6 on a linux
    # system (Ubuntu 17.04).
    #
    f = Fit(pha, wrapped)
    ans = f.calc_stat()
    assert ans == pytest.approx(37971.8716151947)
Example #26
0
def test_data2d_int_eval_model_to_fit(array_sizes_fixture):
    from sherpa.fit import Fit
    from sherpa.optmethods import LevMar
    from sherpa.stats import Chi2
    from sherpa.models import Gauss2D

    x0, x1, dx, y = array_sizes_fixture
    data2 = Data2DInt('name', x0.flatten(), x0.flatten() + dx, x1.flatten(), x1.flatten() + dx, y.flatten(),
                      staterror=numpy.sqrt(y).flatten())

    model2 = Gauss2D()
    fitter = Fit(data2, model2, Chi2(), LevMar())
    fitter.fit()  # Failed in Sherpa 4.11.0
def test_regproj(old_numpy_printing, override_plot_backend):
    p = plot.RegionProjection()
    r = p._repr_html_()

    check_empty(r, 'RegionProjection', nsummary=13)

    x = np.arange(5, 8, 0.5)
    y = np.asarray([2, 3, 4, 5, 4, 3])
    dy = y / 2
    d = Data1D('n n', x, y, staterror=dy)

    m = Polynom1D()
    m.c1.thaw()

    fit = Fit(d, m, stat=Chi2())
    fr = fit.fit()
    assert fr.succeeded

    p.prepare(min=(-2, -1), max=(2, 2), nloop=(10, 20))
    p.calc(fit, m.c0, m.c1)

    r = p._repr_html_()
    assert r is not None

    if plot_backend_is("pylab"):
        assert "<summary>RegionProjection</summary>" in r
        assert "<svg " in r
        return

    assert "<summary>RegionProjection (13)</summary>" in r

    # Issue #1372 shows that the numbers here can depend on the platform; as
    # this test is not about whether the fit converged to the same solution
    # the tests are very basic. An alternative would be to just place
    # the values from the fit object into the strings, but then there is
    # the problem that this test currently requires old_numpy_printing,
    # so the results would not necessarily match.
    #
    assert '<div class="dataname">parval0</div><div class="dataval">-0.5' in r
    assert '<div class="dataname">parval1</div><div class="dataval">0.5' in r
    assert '<div class="dataname">sigma</div><div class="dataval">(1, 2, 3)</div>' in r

    # These values may depend on the platform so only very-limited check.
    #
    assert '<div class="dataname">y</div><div class="dataval">[ 30' in r
    assert '<div class="dataname">levels</div><div class="dataval">[  3.6' in r

    assert '<div class="dataname">min</div><div class="dataval">[-2, -1]</div>' in r
    assert '<div class="dataname">max</div><div class="dataval">[2, 2]</div>' in r

    assert '<div class="dataname">nloop</div><div class="dataval">(10, 20)</div>' in r
Example #28
0
    def setUp(self):
        data = Data1D('fake', self._x, self._y, self._err)

        g1 = Gauss1D('g1')
        g1.fwhm.set(1.0, _tiny, _max, frozen=False)
        g1.pos.set(1.0, -_max, _max, frozen=False)
        g1.ampl.set(1.0, -_max, _max, frozen=False)
        p1 = PowLaw1D('p1')
        p1.gamma.set(1.0, -10, 10, frozen=False)
        p1.ampl.set(1.0, 0.0, _max, frozen=False)
        p1.ref.set(1.0, -_max, _max, frozen=True)
        model = p1 + g1

        method = LevMar()
        method.config['maxfev'] = 10000
        method.config['ftol'] = float(_eps)
        method.config['epsfcn'] = float(_eps)
        method.config['gtol'] = float(_eps)
        method.config['xtol'] = float(_eps)
        method.config['factor'] = float(100)

        self.fit = Fit(data, model, Chi2DataVar(), method, Covariance())
        results = self.fit.fit()

        for key in ["succeeded", "numpoints", "nfev"]:
            assert self._fit_results_bench[key] == int(getattr(results, key))

        for key in ["rstat", "qval", "statval", "dof"]:
            assert numpy.allclose(float(self._fit_results_bench[key]),
                                  float(getattr(results, key)),
                                  1.e-7, 1.e-7)

        for key in ["parvals"]:
            try:
                assert numpy.allclose(self._fit_results_bench[key],
                                      getattr(results, key),
                                      1.e-4, 1.e-4)
            except AssertionError:
                print('parvals bench: ', self._fit_results_bench[key])
                print('parvals fit:   ', getattr(results, key))
                print('results', results)
                raise

        covresults = self.fit.est_errors()
        self.dof = results.dof
        self.mu = numpy.array(results.parvals)
        self.cov = numpy.array(covresults.extra_output)
        self.num = 10
Example #29
0
    def setUp(self):
        try:
            from sherpa.astro.io import read_pha
            from sherpa.astro.xspec import XSwabs, XSpowerlaw
        except:
            return
        #self.startdir = os.getcwd()
        self.old_level = logger.getEffectiveLevel()
        logger.setLevel(logging.CRITICAL)

        datadir = SherpaTestCase.datadir
        if datadir is None:
            return

        pha = os.path.join(datadir, "refake_0934_1_21_1e4.fak")
        rmf = os.path.join(datadir, "ccdid7_default.rmf")
        arf = os.path.join(datadir, "quiet_0934.arf")
        
        self.simarf = os.path.join(datadir, "aref_sample.fits")
        self.pcaarf = os.path.join(datadir, "aref_Cedge.fits")

        data = read_pha(pha)
        data.ignore(None,0.3)
        data.ignore(7.0,None)

        rsp = Response1D(data)
        self.abs1 = XSwabs('abs1')
        self.p1 = XSpowerlaw('p1')
        model = rsp(self.abs1*self.p1)
        
        self.fit = Fit(data, model, CStat(), NelderMead(), Covariance())
Example #30
0
    def _fit_sherpa(self):
        """Wrapper around sherpa minimizer."""
        from sherpa.fit import Fit
        from sherpa.data import Data1DInt
        from sherpa.optmethods import NelderMead
        from .sherpa_utils import SherpaModel, SherpaStat

        binning = self.obs_list[0].e_reco
        # The sherpa data object is not usued in the fit. It is set to the
        # first observation for debugging purposes, see below
        data = self.obs_list[0].on_vector.data.data.value
        data = Data1DInt('Dummy data', binning[:-1].value,
                         binning[1:].value, data)
        # DEBUG
        # from sherpa.models import PowLaw1D
        # from sherpa.stats import Cash
        # model = PowLaw1D('sherpa')
        # model.ref = 0.1
        # fit = Fit(data, model, Cash(), NelderMead())

        # NOTE: We cannot use the Levenbergr-Marquart optimizer in Sherpa
        # because it relies on the fvec return value of the fit statistic (we
        # return None). The computation of fvec is not straightforwad, not just
        # stats per bin. E.g. for a cash fit the sherpa stat computes it
        # according to cstat
        # see https://github.com/sherpa/sherpa/blob/master/sherpa/include/sherpa/stats.hh#L122

        self._sherpa_fit = Fit(data,
                               SherpaModel(self),
                               SherpaStat(self),
                               NelderMead())
        fitresult = self._sherpa_fit.fit()
        log.debug(fitresult)
        self._make_fit_result()
Example #31
0
def setup(make_data_path):
    from sherpa.astro.io import read_pha
    from sherpa.astro.xspec import XSwabs, XSpowerlaw

    old_level = logger.getEffectiveLevel()
    logger.setLevel(logging.CRITICAL)

    pha = make_data_path("refake_0934_1_21_1e4.fak")

    simarf = make_data_path("aref_sample.fits")
    pcaarf = make_data_path("aref_Cedge.fits")

    data = read_pha(pha)
    data.ignore(None, 0.3)
    data.ignore(7.0, None)

    rsp = Response1D(data)
    abs1 = XSwabs('abs1')
    p1 = XSpowerlaw('p1')
    model = rsp(abs1 * p1)

    abs1.nh = 0.092886
    p1.phoindex = 0.994544
    p1.norm = 9.26369

    fit = Fit(data, model, CStat(), NelderMead(), Covariance())

    yield {'simarf': simarf,
           'pcaarf': pcaarf,
           'niter': 10,
           'fit': fit}

    # Reset the logger
    logger.setLevel(old_level)
Example #32
0
def test_sherpa_crab_fit():
    from sherpa.models import NormGauss2D, PowLaw1D, TableModel, Const2D
    from sherpa.stats import Chi2ConstVar
    from sherpa.optmethods import LevMar
    from sherpa.fit import Fit
    from ..sherpa_ import CombinedModel3D

    filename = gammapy_extra.filename('experiments/sherpa_cube_analysis/counts.fits.gz')
    # Note: The cube is stored in incorrect format
    counts = SkyCube.read(filename, format='fermi-counts')
    cube = counts.to_sherpa_data3d()

    # Set up exposure table model
    filename = gammapy_extra.filename('experiments/sherpa_cube_analysis/exposure.fits.gz')
    exposure_data = fits.getdata(filename)
    exposure = TableModel('exposure')
    exposure.load(None, exposure_data.ravel())

    # Freeze exposure amplitude
    exposure.ampl.freeze()

    # Setup combined spatial and spectral model
    spatial_model = NormGauss2D('spatial-model')
    spectral_model = PowLaw1D('spectral-model')
    source_model = CombinedModel3D(spatial_model=spatial_model, spectral_model=spectral_model)

    # Set starting values
    source_model.gamma = 2.2
    source_model.xpos = 83.6
    source_model.ypos = 22.01
    source_model.fwhm = 0.12
    source_model.ampl = 0.05

    model = 1E-9 * exposure * source_model  # 1E-9 flux factor

    # Fit
    fit = Fit(data=cube, model=model, stat=Chi2ConstVar(), method=LevMar())
    result = fit.fit()

    reference = [0.121556,
                 83.625627,
                 22.015564,
                 0.096903,
                 2.240989]

    assert_allclose(result.parvals, reference, rtol=1E-3)
Example #33
0
def test_sherpa_crab_fit():
    from sherpa.models import NormGauss2D, PowLaw1D, TableModel, Const2D
    from sherpa.stats import Chi2ConstVar
    from sherpa.optmethods import LevMar
    from sherpa.fit import Fit
    from ..sherpa_ import Data3D, CombinedModel3D

    filename = gammapy_extra.filename(
        'experiments/sherpa_cube_analysis/counts.fits.gz')
    counts = SkyCube.read(filename)
    cube = counts.to_sherpa_data3d()

    # Set up exposure table model
    filename = gammapy_extra.filename(
        'experiments/sherpa_cube_analysis/exposure.fits.gz')
    exposure_data = fits.getdata(filename)
    exposure = TableModel('exposure')
    exposure.load(None, exposure_data.ravel())

    # Freeze exposure amplitude
    exposure.ampl.freeze()

    # Setup combined spatial and spectral model
    spatial_model = NormGauss2D('spatial-model')
    spectral_model = PowLaw1D('spectral-model')
    source_model = CombinedModel3D(spatial_model=spatial_model,
                                   spectral_model=spectral_model)

    # Set starting values
    source_model.gamma = 2.2
    source_model.xpos = 83.6
    source_model.ypos = 22.01
    source_model.fwhm = 0.12
    source_model.ampl = 0.05

    model = 1E-9 * exposure * (source_model)  # 1E-9 flux factor

    # Fit
    fit = Fit(data=cube, model=model, stat=Chi2ConstVar(), method=LevMar())
    result = fit.fit()

    reference = (0.11925401159500593, 83.640630749333056, 22.020525848447541,
                 0.036353759774770608, 1.1900312815970555)

    assert_allclose(result.parvals, reference, rtol=1E-8)
Example #34
0
def test_sherpa_crab_fit():
    from sherpa.models import NormGauss2D, PowLaw1D, TableModel, Const2D
    from sherpa.stats import Chi2ConstVar
    from sherpa.optmethods import LevMar
    from sherpa.fit import Fit
    from ..sherpa_ import CombinedModel3D

    filename = gammapy_extra.filename(
        'experiments/sherpa_cube_analysis/counts.fits.gz')
    # Note: The cube is stored in incorrect format
    counts = SkyCube.read(filename, format='fermi-counts')
    cube = counts.to_sherpa_data3d()

    # Set up exposure table model
    filename = gammapy_extra.filename(
        'experiments/sherpa_cube_analysis/exposure.fits.gz')
    exposure_data = fits.getdata(filename)
    exposure = TableModel('exposure')
    exposure.load(None, exposure_data.ravel())

    # Freeze exposure amplitude
    exposure.ampl.freeze()

    # Setup combined spatial and spectral model
    spatial_model = NormGauss2D('spatial-model')
    spectral_model = PowLaw1D('spectral-model')
    source_model = CombinedModel3D(spatial_model=spatial_model,
                                   spectral_model=spectral_model)

    # Set starting values
    source_model.gamma = 2.2
    source_model.xpos = 83.6
    source_model.ypos = 22.01
    source_model.fwhm = 0.12
    source_model.ampl = 0.05

    model = 1E-9 * exposure * source_model  # 1E-9 flux factor

    # Fit
    fit = Fit(data=cube, model=model, stat=Chi2ConstVar(), method=LevMar())
    result = fit.fit()

    reference = [0.121556, 83.625627, 22.015564, 0.096903, 2.240989]

    assert_allclose(result.parvals, reference, rtol=1E-5)
Example #35
0
def test_sherpa_crab_fit():
    from sherpa.models import NormGauss2D, PowLaw1D, TableModel, Const2D
    from sherpa.stats import Chi2ConstVar
    from sherpa.optmethods import LevMar
    from sherpa.fit import Fit
    from ..sherpa_ import Data3D, CombinedModel3D

    filename = gammapy_extra.filename('experiments/sherpa_cube_analysis/counts.fits.gz')
    counts = SkyCube.read(filename)
    cube = counts.to_sherpa_data3d()

    # Set up exposure table model
    filename = gammapy_extra.filename('experiments/sherpa_cube_analysis/exposure.fits.gz')
    exposure_data = fits.getdata(filename)
    exposure = TableModel('exposure')
    exposure.load(None, exposure_data.ravel())

    # Freeze exposure amplitude
    exposure.ampl.freeze()

    # Setup combined spatial and spectral model
    spatial_model = NormGauss2D('spatial-model')
    spectral_model = PowLaw1D('spectral-model')
    source_model = CombinedModel3D(spatial_model=spatial_model, spectral_model=spectral_model)

    # Set starting values
    source_model.gamma = 2.2
    source_model.xpos = 83.6
    source_model.ypos = 22.01
    source_model.fwhm = 0.12
    source_model.ampl = 0.05

    model = 1E-9 * exposure * (source_model) # 1E-9 flux factor

    # Fit
    fit = Fit(data=cube, model=model, stat=Chi2ConstVar(), method=LevMar())
    result = fit.fit()

    reference = (0.11925401159500593,
                 83.640630749333056,
                 22.020525848447541,
                 0.036353759774770608,
                 1.1900312815970555)

    assert_allclose(result.parvals, reference, rtol=1E-8)
Example #36
0
def test_regproj(old_numpy_printing, override_plot_backend):
    p = plot.RegionProjection()
    r = p._repr_html_()

    check_empty(r, 'RegionProjection', nsummary=13)

    x = np.arange(5, 8, 0.5)
    y = np.asarray([2, 3, 4, 5, 4, 3])
    dy = y / 2
    d = Data1D('n n', x, y, staterror=dy)

    m = Polynom1D()
    m.c1.thaw()

    fit = Fit(d, m, stat=Chi2())
    fr = fit.fit()
    assert fr.succeeded

    p.prepare(min=(-2, -1), max=(2, 2), nloop=(10, 20))
    p.calc(fit, m.c0, m.c1)

    r = p._repr_html_()
    assert r is not None

    if plot.backend.name == 'pylab':
        assert '<summary>RegionProjection</summary>' in r
        assert '<svg ' in r
        return

    print(r)
    assert '<summary>RegionProjection (13)</summary>' in r

    assert '<div class="dataname">parval0</div><div class="dataval">-0.5315772076542427</div>' in r
    assert '<div class="dataname">parval1</div><div class="dataval">0.5854611101216837</div>' in r
    assert '<div class="dataname">sigma</div><div class="dataval">(1, 2, 3)</div>' in r

    assert '<div class="dataname">y</div><div class="dataval">[ 306.854444  282.795953  259.744431  237.699877  216.662291  196.631674\n' in r

    assert '<div class="dataname">levels</div><div class="dataval">[  3.606863   7.491188  13.140272]</div>' in r
    assert '<div class="dataname">min</div><div class="dataval">[-2, -1]</div>' in r
    assert '<div class="dataname">max</div><div class="dataval">[2, 2]</div>' in r

    assert '<div class="dataname">nloop</div><div class="dataval">(10, 20)</div>' in r
Example #37
0
def test_leastsq_stat(hide_logging, reset_xspec, setup_group):
    fit = Fit(setup_group['data'], setup_group['model'], LeastSq(), LevMar())
    results = fit.fit()

    _fit_leastsq_results_bench = {
        'succeeded':
        1,
        'numpoints':
        143,
        'dof':
        140,
        'istatval':
        117067.64900554597,
        'statval':
        4203.173180288109,
        'parvals':
        numpy.array([1.808142494916457, 5.461611041944977, -1.907736527635154])
    }

    compare_results(_fit_leastsq_results_bench, results, tol=2e-4)
Example #38
0
def test_cstat_stat(hide_logging, reset_xspec, setup):
    fit = Fit(setup['data'], setup['model'], CStat(), NelderMead())
    results = fit.fit()

    _fit_cstat_results_bench = {
        'succeeded':
        1,
        'numpoints':
        460,
        'dof':
        457,
        'istatval':
        21647.62293983995,
        'statval':
        472.6585691450068,
        'parvals':
        numpy.array([1.75021021282262, 5.474614304244775, -1.9985761873334102])
    }

    compare_results(_fit_cstat_results_bench, results)
Example #39
0
def mwl_fit_low_level_calling_fermi():
    """Example how to do a Sherpa model fit,
    but use the Fermi ScienceTools to evaluate
    the likelihood for the Fermi dataset.
    """

    spec_model = LogParabola('spec_model')
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    model = spec_model

    data = FermiDataShim()
    stat = FermiStatShim()
    method = LevMar()
    fit = Fit(data=data, model=model, stat=stat, method=method)
    result = fit.fit()

    return dict(results=result, model=spec_model)
Example #40
0
def mwl_fit_low_level_calling_fermi():
    """Example how to do a Sherpa model fit,
    but use the Fermi ScienceTools to evaluate
    the likelihood for the Fermi dataset.
    """

    spec_model = LogParabola('spec_model')
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    model = spec_model

    data = FermiDataShim()
    stat = FermiStatShim()
    method = LevMar()
    fit = Fit(data=data, model=model, stat=stat, method=method)
    result = fit.fit()

    # IPython.embed()
    return Bunch(results=result, model=spec_model)
Example #41
0
def test_wstat(hide_logging, reset_xspec, setup):

    fit = Fit(setup['data'], setup['model'], WStat(), LevMar())
    results = fit.fit()

    _fit_wstat_results_bench = {
        'succeeded':
        1,
        'numpoints':
        460,
        'dof':
        457,
        'istatval':
        21647.48285025895,
        'statval':
        472.6585709918982,
        'parvals':
        numpy.array([1.750204250228727, 5.47466040324842, -1.9983562007031974])
    }

    compare_results(_fit_wstat_results_bench, results, tol=2e-4)
Example #42
0
    def setUp(self):
        data = Data1D('fake', self._x, self._y, self._err)

        g1 = Gauss1D('g1')
        g1.fwhm.set(1.0, _tiny, _max, frozen=False)
        g1.pos.set(1.0, -_max, _max, frozen=False)
        g1.ampl.set(1.0, -_max, _max, frozen=False)
        p1 = PowLaw1D('p1')
        p1.gamma.set(1.0, -10, 10, frozen=False)
        p1.ampl.set(1.0, 0.0, _max, frozen=False)
        p1.ref.set(1.0, -_max, _max, frozen=True)      
        model = p1 + g1

        method = LevMar()
        method.config['maxfev'] = 10000
        method.config['ftol'] = float(_eps)
        method.config['epsfcn'] = float(_eps)
        method.config['gtol'] = float(_eps)
        method.config['xtol'] = float(_eps)
        method.config['factor'] = float(100)

        self.fit = Fit(data, model, Chi2DataVar(), method, Covariance())
        results = self.fit.fit()

        for key in ["succeeded", "numpoints", "nfev"]:
            assert self._fit_results_bench[key] == int(getattr(results, key))

        for key in ["rstat", "qval", "statval", "dof"]:
            assert numpy.allclose(float(self._fit_results_bench[key]),
                                  float(getattr(results, key)),
                                  1.e-7, 1.e-7)

        for key in ["parvals"]:
            try:
                assert numpy.allclose(self._fit_results_bench[key],
                                      getattr(results, key),
                                      1.e-4, 1.e-4)
            except AssertionError:
                print 'parvals bench: ', self._fit_results_bench[key]
                print 'parvals fit:   ', getattr(results, key)
                print 'results', results
                raise

        covresults = self.fit.est_errors()
        self.dof = results.dof
        self.mu = numpy.array(results.parvals)
        self.cov = numpy.array(covresults.extra_output)
        self.num = 10
Example #43
0
    def setUp(self):
        from sherpa.astro.io import read_pha
        from sherpa.astro.xspec import XSwabs, XSpowerlaw
        # self.startdir = os.getcwd()
        self.old_level = logger.getEffectiveLevel()
        logger.setLevel(logging.CRITICAL)

        pha = self.make_path("refake_0934_1_21_1e4.fak")
        # rmf = self.make_path("ccdid7_default.rmf")
        # arf = self.make_path("quiet_0934.arf")

        self.simarf = self.make_path("aref_sample.fits")
        self.pcaarf = self.make_path("aref_Cedge.fits")

        data = read_pha(pha)
        data.ignore(None, 0.3)
        data.ignore(7.0, None)

        rsp = Response1D(data)
        self.abs1 = XSwabs('abs1')
        self.p1 = XSpowerlaw('p1')
        model = rsp(self.abs1 * self.p1)

        self.fit = Fit(data, model, CStat(), NelderMead(), Covariance())
Example #44
0
 def test_wstat(self):
     fit = Fit(self.data, self.model, WStat(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_wstat_results_bench, results)
Example #45
0
 def test_mychi_nobkgdata_modelhasbkg(self):
     data = self.bkg
     fit = Fit(data, self.model, MyChiWithBkg(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_mychinobkg_results_bench, results, 1e-5)
Example #46
0
 def test_mychi_datahasbkg_modelhasnobkg(self):
     fit = Fit(self.data, self.model, MyChiNoBkg(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_mychi_results_bench, results)
Example #47
0
 def test_mycash_nobkgdata_modelhasbkg(self):
     data = self.bkg
     fit = Fit(data, self.model, MyCashWithBkg(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_mycashnobkg_results_bench, results)
Example #48
0
 def test_mycash_datahasbkg_modelhasnobkg(self):
     fit = Fit(self.data, self.model, MyCashNoBkg(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_mycash_results_bench, results)
Example #49
0
 def test_mycash_data_and_model_donothave_bkg(self):
     data = self.bkg
     fit = Fit(data, self.model, MyCashNoBkg(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_mycashnobkg_results_bench, results)
Example #50
0
class test_sim(SherpaTestCase):

    @unittest.skipIf(not has_fits_support(),
                     'need pycrates, pyfits')
    @unittest.skipIf(not has_package_from_list('sherpa.astro.xspec'),
                     "required sherpa.astro.xspec module missing")
    def setUp(self):
        try:
            from sherpa.astro.io import read_pha
            from sherpa.astro.xspec import XSwabs, XSpowerlaw
        except:
            return
        #self.startdir = os.getcwd()
        self.old_level = logger.getEffectiveLevel()
        logger.setLevel(logging.CRITICAL)

        datadir = SherpaTestCase.datadir
        if datadir is None:
            return

        pha = os.path.join(datadir, "refake_0934_1_21_1e4.fak")
        rmf = os.path.join(datadir, "ccdid7_default.rmf")
        arf = os.path.join(datadir, "quiet_0934.arf")
        
        self.simarf = os.path.join(datadir, "aref_sample.fits")
        self.pcaarf = os.path.join(datadir, "aref_Cedge.fits")

        data = read_pha(pha)
        data.ignore(None,0.3)
        data.ignore(7.0,None)

        rsp = Response1D(data)
        self.abs1 = XSwabs('abs1')
        self.p1 = XSpowerlaw('p1')
        model = rsp(self.abs1*self.p1)
        
        self.fit = Fit(data, model, CStat(), NelderMead(), Covariance())


    def tearDown(self):
        #os.chdir(self.startdir)
        if hasattr(self,'old_level'):
            logger.setLevel(self.old_level)

    @unittest.skipIf(not has_package_from_list('sherpa.astro.xspec'),
                     "required sherpa.astro.xspec module missing")
    @unittest.skipIf(test_data_missing(), "required test data missing")
    def test_pragbayes_simarf(self):
        datadir = SherpaTestCase.datadir
        if datadir is None:
            return

        mcmc = sim.MCMC()

        self.abs1.nh = 0.092886
        self.p1.phoindex = 0.994544
        self.p1.norm = 9.26369

        mcmc.set_sampler("PragBayes")
        mcmc.set_sampler_opt("simarf", self.simarf)
        mcmc.set_sampler_opt("p_M", 0.5)
        mcmc.set_sampler_opt("nsubiter", 7)

        covar_results = self.fit.est_errors()
        cov = covar_results.extra_output

        niter = 10
        stats, accept, params = mcmc.get_draws(self.fit, cov, niter=niter)
        # try:
        #     assert (covar_results.parmaxes < params.std(1)).all()
        # except AssertionError:
        #     print 'covar: ', str(covar_results.parmaxes)
        #     print 'param: ', str(params.std(1))
        #     raise

    @unittest.skipIf(not has_package_from_list('sherpa.astro.xspec'),
                     "required sherpa.astro.xspec module missing")
    @unittest.skipIf(test_data_missing(), "required test data missing")
    def test_pragbayes_pcaarf(self):
        datadir = SherpaTestCase.datadir
        if datadir is None:
            return

        mcmc = sim.MCMC()

        self.abs1.nh = 0.092886
        self.p1.phoindex = 0.994544
        self.p1.norm = 9.26369

        mcmc.set_sampler("pragBayes")
        mcmc.set_sampler_opt("simarf", self.pcaarf)
        mcmc.set_sampler_opt("p_M", 0.5)
        mcmc.set_sampler_opt("nsubiter", 5)

        covar_results = self.fit.est_errors()
        cov = covar_results.extra_output

        niter = 10
        stats, accept, params = mcmc.get_draws(self.fit, cov, niter=niter)
Example #51
0
 def test_mycash_data_and_model_have_bkg(self):
     fit = Fit(self.data, self.model, MyCashWithBkg(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_mycash_results_bench, results)
Example #52
0
 def test_cash_stat(self):
     fit = Fit(self.data, self.model, Cash(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_mycash_results_bench, results)
Example #53
0
 def test_leastsq_stat(self):
     fit = Fit(self.data, self.model, LeastSq(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_leastsq_results_bench, results)
Example #54
0
 def test_chi2gehrels_stat(self):
     fit = Fit(self.data, self.model, Chi2Gehrels(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_chi2gehrels_results_bench, results)
Example #55
0
 def test_chi2constvar_stat(self):
     fit = Fit(self.data, self.model, Chi2ConstVar(), NelderMead())
     results = fit.fit()
     self.compare_results(self._fit_chi2constvar_results_bench, results)
Example #56
0
 def test_mychi_data_and_model_donothave_bkg(self):
     data = self.bkg
     fit = Fit(data, self.model, MyChiNoBkg(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_mychinobkg_results_bench, results, 1e-5)
Example #57
0
 def test_mychi_data_and_model_have_bkg(self):
     fit = Fit(self.data, self.model, MyChiWithBkg(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_mychi_results_bench, results)
Example #58
0
class test_sim(SherpaTestCase):

    @requires_fits
    @requires_xspec
    def setUp(self):
        from sherpa.astro.io import read_pha
        from sherpa.astro.xspec import XSwabs, XSpowerlaw
        # self.startdir = os.getcwd()
        self.old_level = logger.getEffectiveLevel()
        logger.setLevel(logging.CRITICAL)

        pha = self.make_path("refake_0934_1_21_1e4.fak")
        # rmf = self.make_path("ccdid7_default.rmf")
        # arf = self.make_path("quiet_0934.arf")

        self.simarf = self.make_path("aref_sample.fits")
        self.pcaarf = self.make_path("aref_Cedge.fits")

        data = read_pha(pha)
        data.ignore(None, 0.3)
        data.ignore(7.0, None)

        rsp = Response1D(data)
        self.abs1 = XSwabs('abs1')
        self.p1 = XSpowerlaw('p1')
        model = rsp(self.abs1 * self.p1)

        self.fit = Fit(data, model, CStat(), NelderMead(), Covariance())

    def tearDown(self):
        # os.chdir(self.startdir)
        if hasattr(self, 'old_level'):
            logger.setLevel(self.old_level)

    @requires_xspec
    @requires_data
    def test_pragbayes_simarf(self):
        mcmc = sim.MCMC()

        self.abs1.nh = 0.092886
        self.p1.phoindex = 0.994544
        self.p1.norm = 9.26369

        mcmc.set_sampler("PragBayes")
        mcmc.set_sampler_opt("simarf", self.simarf)
        mcmc.set_sampler_opt("p_M", 0.5)
        mcmc.set_sampler_opt("nsubiter", 7)

        covar_results = self.fit.est_errors()
        cov = covar_results.extra_output

        niter = 10
        stats, accept, params = mcmc.get_draws(self.fit, cov, niter=niter)
        # try:
        #     assert (covar_results.parmaxes < params.std(1)).all()
        # except AssertionError:
        #     print 'covar: ', str(covar_results.parmaxes)
        #     print 'param: ', str(params.std(1))
        #     raise

    @requires_xspec
    @requires_data
    def test_pragbayes_pcaarf(self):
        mcmc = sim.MCMC()

        self.abs1.nh = 0.092886
        self.p1.phoindex = 0.994544
        self.p1.norm = 9.26369

        mcmc.set_sampler("pragBayes")
        mcmc.set_sampler_opt("simarf", self.pcaarf)
        mcmc.set_sampler_opt("p_M", 0.5)
        mcmc.set_sampler_opt("nsubiter", 5)

        covar_results = self.fit.est_errors()
        cov = covar_results.extra_output

        niter = 10
        stats, accept, params = mcmc.get_draws(self.fit, cov, niter=niter)