示例#1
0
def test_parallel_map_funcs2():
    def tst(ncores, sg, stat, opt):
        sd = DataSimulFit('sd', [d, d], numcores=2)
        f = Fit(sd, sg, stat, opt)
        result = f.fit()
        return result

    def cmp_results(result, tol=1.0e-3):
        assert result.succeeded
        parvals = (1.7555670572301785, 1.5092728216164186, 4.893136872267538)
        assert result.numpoints == 200

        # use tol in approx?
        assert result.parvals == pytest.approx(parvals)

    numpy.random.seed(0)
    x = numpy.linspace(-5., 5., 100)
    ampl = 5
    pos = 1.5
    sigma = 0.75
    err = 0.25
    y = ampl * numpy.exp(-0.5 * (x - pos)**2 / sigma**2)
    y += numpy.random.normal(0., err, x.shape)
    d = Data1D('junk', x, y)
    g = Gauss1D()
    opt = LevMar()
    stat = LeastSq()
    sg = SimulFitModel('sg', [g, g])

    result = tst(1, sg, stat, opt)
    cmp_results(result)

    result = tst(2, sg, stat, opt)
    cmp_results(result)
示例#2
0
    def call(self, niter, seed):

        pars = {}
        pars_index = {}
        index = 0
        for par in self.model.pars:
            if par.frozen is False:
                name = '%s.%s' % (par.modelname, par.name)
                pars_index[index] = name
                pars[name] = []
                index += 1

        data = self.data
        y = data.y
        x = data.x
        if type(data) == Data1DAsymmetricErrs:
            y_l = y - data.elo
            y_h = y + data.ehi
        elif isinstance(data, (Data1D,)):
            y_l = data.staterror
            y_h = data.staterror
        else:
            msg ="{0} {1}".format(ReSampleData.__name__, type(data))
            raise NotImplementedError(msg)

        numpy.random.seed(seed)
        for j in range(niter):
            ry = []
            for i in range(len(y_l)): 
                a = y_l[i]
                b = y_h[i]
                r = -1
                while r < a or r > b:
                    sigma = b - y[i]
                    u = numpy.random.random_sample()
                    if u < 0.5:
                        sigma=y[i]-a
                    r = numpy.random.normal(loc=y[i],scale=sigma,size=None)
                    if u < 0.5 and r > y[i]:
                        r = -1
                    if u > 0.5 and r < y[i]:
                        r = -1
                ry.append(r)

            # fit is performed for each simulated data point
            fit = Fit(Data1D('tmp', x, ry), self.model, LeastSq( ), LevMar())
            fit_result = fit.fit()

            for index, val in enumerate(fit_result.parvals):
                name = pars_index[index]
                pars[name].append(val)

        result = {}
        for index, name in pars_index.items():
            avg = numpy.average(pars[name])
            std = numpy.std(pars[name])
            print(name, ': avg =', avg, ', std =', std)
            result[name] = pars[name]

        return result
示例#3
0
def test_regrid_binaryop_1d(reset_seed):
    """issue #762, Cannot regrid a composite model (BinaryOpModel)"""

    np.random.seed(0)
    leastsq = LeastSq()
    levmar = LevMar()
    mygauss = MyGauss()
    myconst = MyConst1D()
    mymodel = mygauss + myconst
    x = np.linspace(-5., 5., 5)
    err = 0.25
    y = mymodel(x) + np.random.normal(mygauss.pos.val, err, x.shape)
    mygauss.counter = 0
    myconst.counter = 0
    data = Data1D('one', x, y)
    fit = Fit(data, mymodel, leastsq, levmar)
    result = fit.fit()
    assert result.numpoints == x.size
    assert result.statval < 1.0
    assert mygauss.counter == myconst.counter
    assert (result.nfev + 4) * x.size == mygauss.counter

    mygauss.counter = 0
    myconst.counter = 0
    x_regrid = np.linspace(-5., 5., 25)
    mymodel_regrid = mymodel.regrid(x_regrid)
    fit = Fit(data, mymodel_regrid, leastsq, levmar)
    result = fit.fit()
    assert result.numpoints == x.size
    assert result.statval < 1.0
    assert mygauss.counter == myconst.counter
    assert (result.nfev + 4) * x_regrid.size == mygauss.counter
示例#4
0
 def setUp(self):
     self.method = LevMar()
     self.stat = Chi2Gehrels()
     self.est = Covariance()
     self.gro_fname = self.make_path('gro.txt')
     self.gro_delta_fname = self.make_path('gro_delta.txt')        
     return
示例#5
0
def mwl_fit_low_level():
    """Use high-level Sherpa API.

    Low-level = no session, classes.

    Example: http://python4astronomers.github.io/fitting/low-level.html
    """
    fermi_data = FermiData().sherpa_data
    hess_data = IACTData().sherpa_data

    # spec_model = PowLaw1D('spec_model')
    spec_model = LogParabola('spec_model')
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    data = DataSimulFit(name='global_data', datasets=[fermi_data, hess_data])
    # TODO: Figure out how to notice using the low-level API
    # data.notice(mins=1e-3, maxes=None, axislist=None)
    model = SimulFitModel(name='global_model', parts=[spec_model, spec_model])
    stat = FermiStat()
    method = LevMar()
    fit = Fit(data=data, model=model, stat=stat, method=method)
    result = fit.fit()

    # IPython.embed()
    return Bunch(results=result, model=spec_model)
示例#6
0
def setup():
    data = Data1D('fake', _x, _y, _err)

    g1 = Gauss1D('g1')
    g1.fwhm.set(1.0, _tiny, _max, frozen=False)
    g1.pos.set(1.0, -_max, _max, frozen=False)
    g1.ampl.set(1.0, -_max, _max, frozen=False)
    p1 = PowLaw1D('p1')
    p1.gamma.set(1.0, -10, 10, frozen=False)
    p1.ampl.set(1.0, 0.0, _max, frozen=False)
    p1.ref.set(1.0, -_max, _max, frozen=True)
    model = p1 + g1

    method = LevMar()
    method.config['maxfev'] = 10000
    method.config['ftol'] = float(_eps)
    method.config['epsfcn'] = float(_eps)
    method.config['gtol'] = float(_eps)
    method.config['xtol'] = float(_eps)
    method.config['factor'] = float(100)

    fit = Fit(data, model, Chi2DataVar(), method, Covariance())
    results = fit.fit()

    for key in ["succeeded", "numpoints", "nfev"]:
        assert _fit_results_bench[key] == int(getattr(results, key))

    for key in ["rstat", "qval", "statval", "dof"]:
        # used rel and abs tol of 1e-7 with numpy allclose
        assert float(getattr(results,
                             key)) == pytest.approx(_fit_results_bench[key])

    for key in ["parvals"]:
        try:
            # used rel and abs tol of 1e-4 with numpy allclose
            assert getattr(results,
                           key) == pytest.approx(_fit_results_bench[key])
        except AssertionError:
            print('parvals bench: ', _fit_results_bench[key])
            print('parvals fit:   ', getattr(results, key))
            print('results', results)
            raise

    fields = [
        'data', 'model', 'method', 'fit', 'results', 'covresults', 'dof', 'mu',
        'num'
    ]
    out = namedtuple('Results', fields)

    out.data = data
    out.model = model
    out.method = method
    out.fit = fit
    out.results = results
    out.covresults = fit.est_errors()
    out.dof = results.dof
    out.mu = numpy.array(results.parvals)
    out.cov = numpy.array(out.covresults.extra_output)
    out.num = 10
    return out
示例#7
0
 def test_wstat(self):
     fit = Fit(self.data, self.model, WStat(), LevMar())
     results = fit.fit()
     # On a local linux machine I have to bump the tolerance to
     # 3e-4, but this isn't seen on Travis. The fit isn't
     # "great", so it may be that the results are sensitive to
     # numerical differences (e.g. as introduced with updated
     # compilers).
     # tol = 3e-4
     tol = 1e-6  # TODO: investigate difference
     self.compare_results(self._fit_wstat_results_bench, results, tol=tol)
示例#8
0
 def test_gauss_gauss(self):
     g1, g2 = Gauss1D(), Gauss1D()
     g1.fwhm = 1.3
     g1.pos = 1.5
     g2.fwhm = 4.
     g2.pos = -2.0
     sdata = DataSimulFit('d4d5', (self.d4, self.d5))
     smodel = SimulFitModel('g1g2', (g1, g2))
     sfit = Fit(sdata, smodel, method=LevMar(), stat=LeastSq())
     result = sfit.fit()
     self.compare_results(self._fit_g2g2_bench, result)
示例#9
0
def test_data2d_int_eval_model_to_fit(array_sizes_fixture):
    from sherpa.fit import Fit
    from sherpa.optmethods import LevMar
    from sherpa.stats import Chi2
    from sherpa.models import Gauss2D

    x0, x1, dx, y = array_sizes_fixture
    data2 = Data2DInt('name', x0.flatten(), x0.flatten() + dx, x1.flatten(), x1.flatten() + dx, y.flatten(),
                      staterror=numpy.sqrt(y).flatten())

    model2 = Gauss2D()
    fitter = Fit(data2, model2, Chi2(), LevMar())
    fitter.fit()  # Failed in Sherpa 4.11.0
示例#10
0
    def setUp(self):
        data = Data1D('fake', self._x, self._y, self._err)

        g1 = Gauss1D('g1')
        g1.fwhm.set(1.0, _tiny, _max, frozen=False)
        g1.pos.set(1.0, -_max, _max, frozen=False)
        g1.ampl.set(1.0, -_max, _max, frozen=False)
        p1 = PowLaw1D('p1')
        p1.gamma.set(1.0, -10, 10, frozen=False)
        p1.ampl.set(1.0, 0.0, _max, frozen=False)
        p1.ref.set(1.0, -_max, _max, frozen=True)
        model = p1 + g1

        method = LevMar()
        method.config['maxfev'] = 10000
        method.config['ftol'] = float(_eps)
        method.config['epsfcn'] = float(_eps)
        method.config['gtol'] = float(_eps)
        method.config['xtol'] = float(_eps)
        method.config['factor'] = float(100)

        self.fit = Fit(data, model, Chi2DataVar(), method, Covariance())
        results = self.fit.fit()

        for key in ["succeeded", "numpoints", "nfev"]:
            assert self._fit_results_bench[key] == int(getattr(results, key))

        for key in ["rstat", "qval", "statval", "dof"]:
            assert numpy.allclose(float(self._fit_results_bench[key]),
                                  float(getattr(results, key)),
                                  1.e-7, 1.e-7)

        for key in ["parvals"]:
            try:
                assert numpy.allclose(self._fit_results_bench[key],
                                      getattr(results, key),
                                      1.e-4, 1.e-4)
            except AssertionError:
                print('parvals bench: ', self._fit_results_bench[key])
                print('parvals fit:   ', getattr(results, key))
                print('results', results)
                raise

        covresults = self.fit.est_errors()
        self.dof = results.dof
        self.mu = numpy.array(results.parvals)
        self.cov = numpy.array(covresults.extra_output)
        self.num = 10
示例#11
0
def test_sherpa_crab_fit():
    from sherpa.models import NormGauss2D, PowLaw1D, TableModel, Const2D
    from sherpa.stats import Chi2ConstVar
    from sherpa.optmethods import LevMar
    from sherpa.fit import Fit
    from ..sherpa_ import CombinedModel3D

    filename = gammapy_extra.filename(
        'experiments/sherpa_cube_analysis/counts.fits.gz')
    # Note: The cube is stored in incorrect format
    counts = SkyCube.read(filename, format='fermi-counts')
    cube = counts.to_sherpa_data3d()

    # Set up exposure table model
    filename = gammapy_extra.filename(
        'experiments/sherpa_cube_analysis/exposure.fits.gz')
    exposure_data = fits.getdata(filename)
    exposure = TableModel('exposure')
    exposure.load(None, exposure_data.ravel())

    # Freeze exposure amplitude
    exposure.ampl.freeze()

    # Setup combined spatial and spectral model
    spatial_model = NormGauss2D('spatial-model')
    spectral_model = PowLaw1D('spectral-model')
    source_model = CombinedModel3D(spatial_model=spatial_model,
                                   spectral_model=spectral_model)

    # Set starting values
    source_model.gamma = 2.2
    source_model.xpos = 83.6
    source_model.ypos = 22.01
    source_model.fwhm = 0.12
    source_model.ampl = 0.05

    model = 1E-9 * exposure * source_model  # 1E-9 flux factor

    # Fit
    fit = Fit(data=cube, model=model, stat=Chi2ConstVar(), method=LevMar())
    result = fit.fit()

    reference = [0.121556, 83.625627, 22.015564, 0.096903, 2.240989]

    assert_allclose(result.parvals, reference, rtol=1E-5)
示例#12
0
def test_sherpa_crab_fit():
    from sherpa.models import NormGauss2D, PowLaw1D, TableModel, Const2D
    from sherpa.stats import Chi2ConstVar
    from sherpa.optmethods import LevMar
    from sherpa.fit import Fit
    from ..sherpa_ import Data3D, CombinedModel3D

    filename = gammapy_extra.filename(
        'experiments/sherpa_cube_analysis/counts.fits.gz')
    counts = SkyCube.read(filename)
    cube = counts.to_sherpa_data3d()

    # Set up exposure table model
    filename = gammapy_extra.filename(
        'experiments/sherpa_cube_analysis/exposure.fits.gz')
    exposure_data = fits.getdata(filename)
    exposure = TableModel('exposure')
    exposure.load(None, exposure_data.ravel())

    # Freeze exposure amplitude
    exposure.ampl.freeze()

    # Setup combined spatial and spectral model
    spatial_model = NormGauss2D('spatial-model')
    spectral_model = PowLaw1D('spectral-model')
    source_model = CombinedModel3D(spatial_model=spatial_model,
                                   spectral_model=spectral_model)

    # Set starting values
    source_model.gamma = 2.2
    source_model.xpos = 83.6
    source_model.ypos = 22.01
    source_model.fwhm = 0.12
    source_model.ampl = 0.05

    model = 1E-9 * exposure * (source_model)  # 1E-9 flux factor

    # Fit
    fit = Fit(data=cube, model=model, stat=Chi2ConstVar(), method=LevMar())
    result = fit.fit()

    reference = (0.11925401159500593, 83.640630749333056, 22.020525848447541,
                 0.036353759774770608, 1.1900312815970555)

    assert_allclose(result.parvals, reference, rtol=1E-8)
示例#13
0
def mwl_fit_low_level_calling_fermi():
    """Example how to do a Sherpa model fit,
    but use the Fermi ScienceTools to evaluate
    the likelihood for the Fermi dataset.
    """

    spec_model = LogParabola('spec_model')
    spec_model.c1 = 0.5
    spec_model.c2 = 0.2
    spec_model.ampl = 5e-11

    model = spec_model

    data = FermiDataShim()
    stat = FermiStatShim()
    method = LevMar()
    fit = Fit(data=data, model=model, stat=stat, method=method)
    result = fit.fit()

    return dict(results=result, model=spec_model)
示例#14
0
def test_leastsq_stat(hide_logging, reset_xspec, setup_group):
    fit = Fit(setup_group['data'], setup_group['model'], LeastSq(), LevMar())
    results = fit.fit()

    _fit_leastsq_results_bench = {
        'succeeded':
        1,
        'numpoints':
        143,
        'dof':
        140,
        'istatval':
        117067.64900554597,
        'statval':
        4203.173180288109,
        'parvals':
        numpy.array([1.808142494916457, 5.461611041944977, -1.907736527635154])
    }

    compare_results(_fit_leastsq_results_bench, results, tol=2e-4)
def test_fitresults_chisq():
    d = Data1D('dx', [1, 2, 3], [4, 2, 2], [1.2, 1.4, 1.4])
    m = Const1D()
    m.c0 = 3
    fr = fit.Fit(d, m, method=LevMar(), stat=Chi2()).fit()
    r = fr._repr_html_()

    assert r is not None

    assert '<summary>Fit parameters</summary>' in r
    assert '<summary>Summary (10)</summary>' in r
    assert '<td>const1d.c0</td>' in r

    assert '<div class="dataname">Method</div><div class="dataval">{}</div>'.format(fr.methodname) in r
    assert '<div class="dataname">Final statistic</div><div class="dataval">1.65289</div>' in r

    assert '<div class="dataname">Reduced statistic</div><div class="dataval">0.826446</div>' in r
    assert '<div class="dataname">Probability (Q-value)</div><div class="dataval">0.437602</div>' in r

    assert '<div class="dataname">Number of data points</div><div class="dataval">3</div>' in r
    assert '<div class="dataname">Degrees of freedom</div><div class="dataval">2</div>' in r
def test_fitresults_named():
    d = Data1D('dx', [1, 2, 3], [4, 2, 2])
    m = Const1D()
    m.c0 = 3
    fr = fit.Fit(d, m, method=LevMar(), stat=LeastSq()).fit()
    fr.datasets = [1]
    r = fr._repr_html_()

    assert r is not None

    assert '<summary>Fit parameters</summary>' in r
    assert '<summary>Summary (9)' in r
    assert '<td>const1d.c0</td>' in r

    assert '<div class="dataname">Dataset</div><div class="dataval">1</div>' in r
    assert '<div class="dataname">Method</div><div class="dataval">{}</div>'.format(fr.methodname) in r
    assert '<div class="dataname">Statistic</div><div class="dataval">leastsq</div>' in r

    assert '<div class="dataname">&#916; statistic</div><div class="dataval">0.333333</div>' in r
    assert '<div class="dataname">Number of data points</div><div class="dataval">3</div>' in r
    assert '<div class="dataname">Degrees of freedom</div><div class="dataval">2</div>' in r
示例#17
0
def test_wstat(hide_logging, reset_xspec, setup):

    fit = Fit(setup['data'], setup['model'], WStat(), LevMar())
    results = fit.fit()

    _fit_wstat_results_bench = {
        'succeeded':
        1,
        'numpoints':
        460,
        'dof':
        457,
        'istatval':
        21647.48285025895,
        'statval':
        472.6585709918982,
        'parvals':
        numpy.array([1.750204250228727, 5.47466040324842, -1.9983562007031974])
    }

    compare_results(_fit_wstat_results_bench, results, tol=2e-4)
示例#18
0
def test_mychi_bkg(stat, hide_logging, reset_xspec, setup_bkg_group):
    fit = Fit(setup_bkg_group['bkg'], setup_bkg_group['model'], stat(),
              LevMar())
    results = fit.fit()

    _fit_mychinobkg_results_bench = {
        'succeeded':
        1,
        'numpoints':
        70,
        'dof':
        67,
        'istatval':
        12368.806484278228,
        'statval':
        799.9399745311307,
        'parvals':
        numpy.array([0.1904013138796835, 2.497496167887353, 2.111511871780941])
    }

    compare_results(_fit_mychinobkg_results_bench, results, tol=2e-6)
示例#19
0
def test_mychi_data(stat, hide_logging, reset_xspec, setup_group):
    fit = Fit(setup_group['data'], setup_group['model'], stat(), LevMar())
    results = fit.fit()

    _fit_mychi_results_bench = {
        'succeeded':
        1,
        'numpoints':
        143,
        'dof':
        140,
        'istatval':
        117067.64900554594,
        'statval':
        4211.349359724583,
        'parvals':
        numpy.array(
            [1.8177747886737923, 5.448440759203273, -1.8728780046411722])
    }

    compare_results(_fit_mychi_results_bench, results, tol=2e-4)
示例#20
0
def test_chi2constvar_stat(hide_logging, reset_xspec, setup_group):
    fit = Fit(setup_group['data'], setup_group['model'], Chi2ConstVar(),
              LevMar())
    results = fit.fit()

    _fit_chi2constvar_results_bench = {
        'succeeded':
        1,
        'numpoints':
        143,
        'dof':
        140,
        'istatval':
        3903.1647954751857,
        'statval':
        140.1384389790626,
        'parvals':
        numpy.array(
            [1.8081424949164122, 5.461611041944607, -1.9077365276482876])
    }

    compare_results(_fit_chi2constvar_results_bench, results, tol=2e-4)
示例#21
0
def fit_asymmetric_err(bench, data):
    model = PowLaw1D('p1')
    fit = Fit(data, model, Chi2Gehrels(), LevMar())
    results = fit.fit()
    compare(bench, results)
    return model
示例#22
0
 def test_mychi_nobkgdata_modelhasbkg(self):
     data = self.bkg
     fit = Fit(data, self.model, MyChiWithBkg(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_mychinobkg_results_bench, results, 1e-5)
示例#23
0
 def test_mychi_datahasbkg_modelhasnobkg(self):
     fit = Fit(self.data, self.model, MyChiNoBkg(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_mychi_results_bench, results)
示例#24
0
 def test_mychi_data_and_model_donothave_bkg(self):
     data = self.bkg
     fit = Fit(data, self.model, MyChiNoBkg(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_mychinobkg_results_bench, results, 1e-5)
示例#25
0
 def test_mychi_data_and_model_have_bkg(self):
     fit = Fit(self.data, self.model, MyChiWithBkg(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_mychi_results_bench, results)
示例#26
0
from collections import OrderedDict
import numpy as np
from astropy import units as u
from sherpa.models import ArithmeticModel
from sherpa.data import BaseData, Data
from sherpa.stats import Likelihood
from sherpa.optmethods import LevMar, NelderMead, MonCar, GridSearch

__all__ = [
    'SherpaDataWrapper',
    'SherpaModelWrapper',
    'SherpaStatWrapper',
]

SHERPA_OPTMETHODS = OrderedDict()
SHERPA_OPTMETHODS['levmar'] = LevMar()
SHERPA_OPTMETHODS['simplex'] = NelderMead()
SHERPA_OPTMETHODS['moncar'] = MonCar()
SHERPA_OPTMETHODS['gridsearch'] = GridSearch()


class SherpaDataWrapper(Data):
    def __init__(self, gp_data, name='GPData'):
        # sherpa does some magic here: it sets class attributes from constructor
        # arguments so `gp_data` will be available later on the instance.
        self._data_dummy = np.empty_like(gp_data.e_ref)
        BaseData.__init__(self)

    def to_fit(self, staterr):
        return self._data_dummy, None, None
示例#27
0
def test_regrid_binaryop_1d():
    """issue #762, Cannot regrid a composite model (BinaryOpModel)"""
    from sherpa.stats import LeastSq
    from sherpa.fit import Fit
    from sherpa.optmethods import LevMar

    class MyConst1D(RegriddableModel1D):
        def __init__(self, name='myconst1d'):
            self.c0 = Parameter(name, 'c0', 3.1)
            self.counter = 0
            ArithmeticModel.__init__(self, name, (self.c0, ))

        def calc(self, par, *args, **kwargs):
            x = args[0]
            self.counter += x.size
            return par[0]

    class MyGauss(RegriddableModel1D):
        def __init__(self, name='mygauss'):
            self.sigma = Parameter(name, 'sigma', 10, min=0, max=10)
            self.pos = Parameter(name, 'pos', 0, min=-10, max=10)
            self.ampl = Parameter(name, 'ampl', 5)
            self.counter = 0
            ArithmeticModel.__init__(self, name,
                                     (self.sigma, self.pos, self.ampl))

        def calc(self, par, *args, **kwargs):
            sigma, pos, ampl = par[0], par[1], par[2]
            x = args[0]
            self.counter += x.size
            return ampl * np.exp(-0.5 * (args[0] - pos)**2 / sigma**2)

    np.random.seed(0)
    leastsq = LeastSq()
    levmar = LevMar()
    mygauss = MyGauss()
    myconst = MyConst1D()
    mymodel = mygauss + myconst
    x = np.linspace(-5., 5., 5)
    err = 0.25
    y = mymodel(x) + np.random.normal(mygauss.pos.val, err, x.shape)
    mygauss.counter = 0
    myconst.counter = 0
    data = Data1D('one', x, y)
    fit = Fit(data, mymodel, leastsq, levmar)
    result = fit.fit()
    assert result.numpoints == x.size
    assert result.statval < 1.0
    assert mygauss.counter == myconst.counter
    assert (result.nfev + 4) * x.size == mygauss.counter

    mygauss.counter = 0
    myconst.counter = 0
    x_regrid = np.linspace(-5., 5., 25)
    mymodel_regrid = mymodel.regrid(x_regrid)
    fit = Fit(data, mymodel_regrid, leastsq, levmar)
    result = fit.fit()
    assert result.numpoints == x.size
    assert result.statval < 1.0
    assert mygauss.counter == myconst.counter
    assert (result.nfev + 4) * x_regrid.size == mygauss.counter
示例#28
0
 def test_chi2constvar_stat(self):
     fit = Fit(self.data, self.model, Chi2ConstVar(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_chi2constvar_results_bench, results)
示例#29
0
文件: __init__.py 项目: nplee/sherpa
    def call(self, niter, seed=None):
        """Resample the data and fit the model to each iteration.

        .. versionadded:: 4.12.2
           The samples and statistic keys were added to the return
           value, the parameter values are returned as NumPy arrays
           rather than as lists, and the seed parameter was made
           optional.

        Parameters
        ----------
        niter : int
            The number of iterations.
        seed : int or None, optional
            The seed value.

        Returns
        -------
        sampled : dict
           The keys are samples, which contains the resampled data
           used in the fits as a niter by ndata array, and the free
           parameters in the fit, containing a NumPy array containing
           the fit parameter for each iteration (of size niter).

        Notes
        -----
        The fit for each iteration uses the input values of the
        model parameters as the starting point. The parameters of
        the model are not changed by this method.

        """

        # Each fit is reset to this set of values as the starting point
        orig_pars = self.model.thawedpars

        pars = {}
        pars_index = []
        for par in self.model.pars:
            if par.frozen:
                continue

            name = par.fullname
            pars_index.append(name)
            pars[name] = numpy.zeros(niter)

        data = self.data
        y = data.y
        x = data.x
        if type(data) == Data1DAsymmetricErrs:
            y_l = y - data.elo
            y_h = y + data.ehi
        elif isinstance(data, (Data1D, )):
            y_l = data.staterror
            y_h = data.staterror
        else:
            msg = "{0} {1}".format(ReSampleData.__name__, type(data))
            raise NotImplementedError(msg)

        ny = len(y)

        fake_data = Data1D('tmp', x, numpy.zeros(ny))

        numpy.random.seed(seed)
        ry_all = numpy.zeros((niter, ny), dtype=y_l.dtype)
        stats = numpy.zeros(niter)
        for j in range(niter):
            ry = ry_all[j]
            for i in range(ny):
                a = y_l[i]
                b = y_h[i]
                r = None

                while r is None:

                    # Flip between low or hi
                    #  u = 0  pick low
                    #  u = 1  pick high
                    #
                    # Switching to randint rather than random_sample
                    # leads to different answers, so the tests fail,
                    # so leave as is.
                    #
                    # u = numpy.random.randint(low=0, high=2)
                    #
                    u = numpy.random.random_sample()
                    u = 0 if u < 0.5 else 1

                    # Rather than dropping this value, we could
                    # reflect it (ie multiply it by -1 if the sign
                    # is wrong). Would this affect the statistical
                    # properties?
                    #
                    dr = numpy.random.normal(loc=0, scale=1, size=None)
                    if u == 0:
                        if dr > 0:
                            continue

                        sigma = y[i] - a

                    else:
                        if dr < 0:
                            continue

                        sigma = b - y[i]

                    r = y[i] + dr * sigma

                ry[i] = r

            # fit is performed for each simulated data point, and we
            # always start at the original best-fit location to
            # start the fit (by making sure we always reset after a fit).
            #
            fake_data.y = ry
            fit = Fit(fake_data, self.model, LeastSq(), LevMar())
            try:
                fit_result = fit.fit()
            finally:
                self.model.thawedpars = orig_pars

            stats[j] = fit_result.statval
            for name, val in zip(fit_result.parnames, fit_result.parvals):
                pars[name][j] = val

        result = {'samples': ry_all, 'statistic': stats}
        for name in pars_index:
            avg = numpy.average(pars[name])
            std = numpy.std(pars[name])
            info('{} : avg = {} , std = {}'.format(name, avg, std, std))
            result[name] = pars[name]

        return result
示例#30
0
 def test_leastsq_stat(self):
     fit = Fit(self.data, self.model, LeastSq(), LevMar())
     results = fit.fit()
     self.compare_results(self._fit_leastsq_results_bench, results)