Ejemplo n.º 1
0
def test_fitresults_multi(method):
    """Fit multiple datasets"""

    d1 = Data1D('dx', [1, 2, 3], [4, 2, 2])
    d2 = Data1D('dx', [4, 5, 6, 10], [4, 4, 2, 4])
    d = DataSimulFit('combined', (d1, d2))

    m1 = Const1D()
    m1.c0 = 3
    m = SimulFitModel('silly', (m1, m1))

    fr = fit.Fit(d, m, method=method(), stat=LeastSq()).fit()
    fr.datasets = ['ddx', 'ddy']
    r = fr._repr_html_()

    assert r is not None

    assert '<summary>Summary (9)</summary>' in r
    assert '<td>const1d.c0</td>' in r

    assert '<div class="dataname">Datasets</div><div class="dataval">ddx,ddy</div>' in r
    assert '<div class="dataname">Method</div><div class="dataval">{}</div>'.format(fr.methodname) in r
    assert '<div class="dataname">Statistic</div><div class="dataval">leastsq</div>' in r

    assert '<div class="dataname">&#916; statistic</div><div class="dataval">0.142857</div>' in r
    assert '<div class="dataname">Number of data points</div><div class="dataval">7</div>' in r
    assert '<div class="dataname">Degrees of freedom</div><div class="dataval">6</div>' in r
Ejemplo n.º 2
0
    def setUp(self):
        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        x = numpy.linspace(1.0, 101., num=101)[0::2]
        y1 = [ 1., 5., 2., 4., 7.,11., 9., 8.,12.,18.,12.,11.,13.,12.,13.,
               13.,20.,23.,16.,20.,24.,17.,21.,26.,22.,24.,24.,21.,28.,
               28.,26.,25.,34.,26.,34.,33.,25.,38.,31.,43.,35.,42.,50.,
               41.,43.,47.,57.,53.,60.,46.,54.]
        y2 = [ 0., 7., 6., 3., 5., 5., 9.,11.,13., 8.,14.,13.,14.,18.,11.,
               15.,17.,26., 15.,19.,25.,30.,15.,29.,16.,25.,27.,29.,36.,
               41.,22.,27.,33.,32.,45.,37.,38.,38.,34.,52.,40.,41.,31.,
               47.,38.,52.,57.,33.,48.,53.,45.]
        y3 = [ 1., 2., 4., 2., 5., 8.,15.,10.,13.,10.,16.,10.,13.,12.,16.,
               17.,17.,20., 23.,16.,25.,22.,19.,31.,26.,24.,21.,29.,36.,
               30.,33.,30.,37.,27.,36.,32., 42.,44.,39.,30.,40.,33.,39.,
               49.,56.,47.,46.,35.,63.,40.,57.]
        self.d1 = Data1D('1', x, y1)
        self.d2 = Data1D('2', x, y2)
        self.d3 = Data1D('3', x, y3)

        x = numpy.linspace(-5., 5., 100)
        g1, g2 = Gauss1D(), Gauss1D()
        g1.fwhm = 1.14
        g1.pos = 1.2
        g2.fwhm = 4.13
        g2.pos = -1.3
        numpy.random.seed(0)
        y1 = g1(x) + numpy.random.normal(0.0, 0.05, x.shape)
        y2 = g2(x) + numpy.random.normal(0.0, 0.05, x.shape)
        self.d4 = Data1D('4', x, y1)
        self.d5 = Data1D('5', x, y2)
Ejemplo n.º 3
0
def setUp3(hide_logging):

    x = numpy.linspace(1.0, 101., num=101)[0::2]
    y1 = [
        1., 5., 2., 4., 7., 11., 9., 8., 12., 18., 12., 11., 13., 12., 13.,
        13., 20., 23., 16., 20., 24., 17., 21., 26., 22., 24., 24., 21., 28.,
        28., 26., 25., 34., 26., 34., 33., 25., 38., 31., 43., 35., 42., 50.,
        41., 43., 47., 57., 53., 60., 46., 54.
    ]
    y2 = [
        0., 7., 6., 3., 5., 5., 9., 11., 13., 8., 14., 13., 14., 18., 11., 15.,
        17., 26., 15., 19., 25., 30., 15., 29., 16., 25., 27., 29., 36., 41.,
        22., 27., 33., 32., 45., 37., 38., 38., 34., 52., 40., 41., 31., 47.,
        38., 52., 57., 33., 48., 53., 45.
    ]
    y3 = [
        1., 2., 4., 2., 5., 8., 15., 10., 13., 10., 16., 10., 13., 12., 16.,
        17., 17., 20., 23., 16., 25., 22., 19., 31., 26., 24., 21., 29., 36.,
        30., 33., 30., 37., 27., 36., 32., 42., 44., 39., 30., 40., 33., 39.,
        49., 56., 47., 46., 35., 63., 40., 57.
    ]
    d1 = Data1D('1', x, y1)
    d2 = Data1D('2', x, y2)
    d3 = Data1D('3', x, y3)

    return d1, d2, d3
Ejemplo n.º 4
0
def data_simul_fit():
    data_one = Data1D("data_one", X_ARRAY, Y_ARRAY, STATISTICAL_ERROR_ARRAY,
                      SYSTEMATIC_ERROR_ARRAY)
    data_two = Data1D("data_two", MULTIPLIER * X_ARRAY, MULTIPLIER * Y_ARRAY,
                      MULTIPLIER * STATISTICAL_ERROR_ARRAY,
                      MULTIPLIER * SYSTEMATIC_ERROR_ARRAY)
    return DataSimulFit(NAME, (data_one, data_two))
Ejemplo n.º 5
0
def test_psf1d_kernel_data(caplog):
    """Access the kernel data: subkernel=True"""

    k = np.asarray([2, 5, 3])
    x = 5 + np.arange(k.size)
    d = Data1D('my-kernel', x, k)

    m = PSFModel(kernel=d)

    dfold = Data1D('fold', np.arange(10), np.zeros(10))

    with caplog.at_level(logging.INFO, logger='sherpa'):
        ans = m.get_kernel(dfold)

    assert len(caplog.records) == 1
    r = caplog.record_tuples[0]
    assert r[0] == 'sherpa.instrument'
    assert r[1] == logging.INFO
    assert r[2] == "PSF frac: 1.0"

    assert isinstance(ans, Data1D)
    assert ans.name == 'kernel'
    # integers, so treat as exact
    assert (ans.x == np.arange(5, 8)).all()
    assert ans.y == pytest.approx(k / k.sum())
    assert ans.staterror is None
    assert ans.syserror is None
Ejemplo n.º 6
0
def test_psf1d_combined_v2():
    """See test_psf1d_step_v2"""

    smdl = StepLo1D()
    smdl.xcut = 100
    smdl.ampl = 10

    cmdl = Const1D()
    cmdl.c0 = -500

    imdl = smdl + cmdl

    gsmooth = Gauss1D()
    psf = PSFModel('psf', gsmooth)

    x = np.arange(0, 200, 0.5)
    d = Data1D('fake', x, x * 0)
    psf.fold(d)

    smoothed = psf(imdl)
    y = smoothed(x)

    # So the output is not easy to describe analytically, hence
    # we just check parts of it.
    #
    assert y[(x >= 19.5) & (x <= 100)] == pytest.approx([-490] * 162, abs=1e-4)
    assert y[x >= 119] == pytest.approx([-500] * 162, abs=1e-4)

    # check that the x <= 19 values are in ascending order
    y1 = y[x <= 19]
    assert (y1[1:] > y1[:-1]).all()
Ejemplo n.º 7
0
def test_psf1d_combined():
    """This is based on
    sherpa.models.tests.test_regrid_unit.test_regrid1_works_with_convolution_style
    but I wanted to make sure we have an explicit check of the underlying
    code.
    """

    smdl = StepLo1D()
    smdl.xcut = 12.5
    smdl.ampl = 10

    cmdl = Const1D()
    cmdl.c0 = -500

    imdl = smdl + cmdl

    gsmooth = Gauss1D()
    gsmooth.fwhm = 3
    psf = PSFModel('psf', gsmooth)

    x = np.arange(5, 23, 3)
    d = Data1D('fake', x, x * 0)
    psf.fold(d)

    smoothed = psf(imdl)
    y = smoothed(x)

    assert y == pytest.approx([-490, -490, -490, -500, -500, -500], rel=7e-3)
Ejemplo n.º 8
0
def test_psf1d_step_v2():
    """Trying to track down why we have seen different behavior in
    test_regrid_unit.py.
    """

    smdl = StepLo1D()
    smdl.xcut = 100
    smdl.ampl = 10

    gsmooth = Gauss1D()
    psf = PSFModel('psf', gsmooth)

    x = np.arange(0, 200, 0.5)
    d = Data1D('fake', x, x * 0)
    psf.fold(d)

    smoothed = psf(smdl)
    y = smoothed(x)

    # So the output is not easy to describe analytically, hence
    # we just check parts of it.
    #
    assert y[(x >= 19.5) & (x <= 100)] == pytest.approx([10] * 162, abs=1e-4)
    assert y[x >= 119] == pytest.approx([0] * 162, abs=1e-4)

    # check that the x <= 19 values are in ascending order
    y1 = y[x <= 19]
    assert (y1[1:] > y1[:-1]).all()
Ejemplo n.º 9
0
def test_psf1d_kernel_model(caplog):
    """Access the kernel data: subkernel=True"""

    k = Box1D()
    k.xlow = 1
    k.xhi = 3
    m = PSFModel(kernel=k)

    dfold = Data1D('fold', np.arange(10), np.zeros(10))

    with caplog.at_level(logging.INFO, logger='sherpa'):
        ans = m.get_kernel(dfold)

    assert len(caplog.records) == 1
    r = caplog.record_tuples[0]
    assert r[0] == 'sherpa.instrument'
    assert r[1] == logging.INFO
    assert r[2] == "PSF frac: 1.0"

    assert isinstance(ans, Data1D)
    assert ans.name == 'kernel'

    # integers, so treat as exact
    assert (ans.x == np.arange(10)).all()

    # box1D between 1 and 3 inclusive
    y = np.asarray([0, 1, 1, 1, 0, 0, 0, 0, 0, 0])
    assert ans.y == pytest.approx(y / y.sum())

    assert ans.staterror is None
    assert ans.syserror is None
Ejemplo n.º 10
0
def test_psf1d_model_show():
    """What happens when the kernel is a model?"""

    box1 = make_1d_model()
    m = PSFModel("pmodel1", box1)

    dfold = Data1D('fold', np.arange(10), np.zeros(10))
    m.fold(dfold)

    out = str(m).split("\n")
    assert len(out) == 8
    assert out[0] == "pmodel1"
    assert out[
        1] == "   Param        Type          Value          Min          Max      Units"
    assert out[
        2] == "   -----        ----          -----          ---          ---      -----"
    assert out[3] == "   pmodel1.kernel frozen         box1"
    assert out[
        4] == "   pmodel1.size frozen           10           10           10"
    assert out[
        5] == "   pmodel1.center frozen            5            5            5"
    assert out[
        6] == "   pmodel1.radial frozen            0            0            1           "
    assert out[
        7] == "   pmodel1.norm frozen            1            0            1           "
Ejemplo n.º 11
0
def test_manual_setting_mask():
    d = Data1D(name='test', x=[1, 2, 3], y=[0, 0, 0])
    d.mask = True
    assert len(d.get_dep(filter=True)) == 3

    d.mask = False
    # This test looks like it does not do anything, but in fact "mask"
    # is a property with complext logic, so the fact that setting it to
    # False makes is False is non-trivial.
    # I don't want to test for
    # len(d.get_dep(filter=True)) == 0
    # because the get_dep raises and error when no data is noticed
    # and I don't want to test get_dep here, but the fact that setting
    # the mask itself works.
    assert d.mask is False

    d.mask = [True, False, True]
    assert len(d.get_dep(filter=True)) == 2
    arr = numpy.ma.array([3, 4, 5])
    d.mask = arr.mask  # aka numpy.ma.nomask, but used in a more
    # natural way
    assert len(d.get_dep(filter=True)) == 3

    with pytest.raises(DataErr) as e:
        d.mask = None
    assert 'True, False, or a mask array' in str(e.value)
Ejemplo n.º 12
0
def test_regrid_binaryop_1d(reset_seed):
    """issue #762, Cannot regrid a composite model (BinaryOpModel)"""

    np.random.seed(0)
    leastsq = LeastSq()
    levmar = LevMar()
    mygauss = MyGauss()
    myconst = MyConst1D()
    mymodel = mygauss + myconst
    x = np.linspace(-5., 5., 5)
    err = 0.25
    y = mymodel(x) + np.random.normal(mygauss.pos.val, err, x.shape)
    mygauss.counter = 0
    myconst.counter = 0
    data = Data1D('one', x, y)
    fit = Fit(data, mymodel, leastsq, levmar)
    result = fit.fit()
    assert result.numpoints == x.size
    assert result.statval < 1.0
    assert mygauss.counter == myconst.counter
    assert (result.nfev + 4) * x.size == mygauss.counter

    mygauss.counter = 0
    myconst.counter = 0
    x_regrid = np.linspace(-5., 5., 25)
    mymodel_regrid = mymodel.regrid(x_regrid)
    fit = Fit(data, mymodel_regrid, leastsq, levmar)
    result = fit.fit()
    assert result.numpoints == x.size
    assert result.statval < 1.0
    assert mygauss.counter == myconst.counter
    assert (result.nfev + 4) * x_regrid.size == mygauss.counter
Ejemplo n.º 13
0
def test_errresults_limits_interval():
    """Missing an error limit"""
    d = Data1D('dx', [1, 2, 3], [4, 2, 2], [1.2, 0.9, 0.9])
    m = Const1D()
    m.c0 = 3
    f = fit.Fit(d, m, stat=Chi2())
    er = f.est_errors()

    # perhaps should just fake this instead?
    assert len(er.parmaxes) == 1
    er.parmaxes = ([0.1, 0.2], )

    r = er._repr_html_()

    assert r is not None

    print(r)

    assert '<summary>covariance 1&#963; (68.2689%) bounds</summary>' in r
    assert '<summary>Summary (2)' in r
    assert '<td>const1d.c0</td>' in r
    assert '<div class="dataname">Fitting Method</div><div class="dataval">levmar</div>' in r
    assert '<div class="dataname">Statistic</div><div class="dataval">chi2</div>' in r

    assert '<tr><td>const1d.c0</td><td>           3</td><td>   -0.562226</td><td>(1.000000e-01, 2.000000e-01)</td></tr>' in r
Ejemplo n.º 14
0
def setup():
    data = Data1D('fake', _x, _y, _err)

    g1 = Gauss1D('g1')
    g1.fwhm.set(1.0, _tiny, _max, frozen=False)
    g1.pos.set(1.0, -_max, _max, frozen=False)
    g1.ampl.set(1.0, -_max, _max, frozen=False)
    p1 = PowLaw1D('p1')
    p1.gamma.set(1.0, -10, 10, frozen=False)
    p1.ampl.set(1.0, 0.0, _max, frozen=False)
    p1.ref.set(1.0, -_max, _max, frozen=True)
    model = p1 + g1

    method = LevMar()
    method.config['maxfev'] = 10000
    method.config['ftol'] = float(_eps)
    method.config['epsfcn'] = float(_eps)
    method.config['gtol'] = float(_eps)
    method.config['xtol'] = float(_eps)
    method.config['factor'] = float(100)

    fit = Fit(data, model, Chi2DataVar(), method, Covariance())
    results = fit.fit()

    for key in ["succeeded", "numpoints", "nfev"]:
        assert _fit_results_bench[key] == int(getattr(results, key))

    for key in ["rstat", "qval", "statval", "dof"]:
        # used rel and abs tol of 1e-7 with numpy allclose
        assert float(getattr(results,
                             key)) == pytest.approx(_fit_results_bench[key])

    for key in ["parvals"]:
        try:
            # used rel and abs tol of 1e-4 with numpy allclose
            assert getattr(results,
                           key) == pytest.approx(_fit_results_bench[key])
        except AssertionError:
            print('parvals bench: ', _fit_results_bench[key])
            print('parvals fit:   ', getattr(results, key))
            print('results', results)
            raise

    fields = [
        'data', 'model', 'method', 'fit', 'results', 'covresults', 'dof', 'mu',
        'num'
    ]
    out = namedtuple('Results', fields)

    out.data = data
    out.model = model
    out.method = method
    out.fit = fit
    out.results = results
    out.covresults = fit.est_errors()
    out.dof = results.dof
    out.mu = numpy.array(results.parvals)
    out.cov = numpy.array(out.covresults.extra_output)
    out.num = 10
    return out
Ejemplo n.º 15
0
def test_errors_with_no_stat():
    """Check we get no errors when stat is None"""

    d = Data1D('x', numpy.asarray([2, 4, 10]), numpy.asarray([2, 4, 0]))
    dp = sherpaplot.DataPlot()
    dp.prepare(d, stat=None)
    assert dp.yerr is None
Ejemplo n.º 16
0
def test_intproj(old_numpy_printing, override_plot_backend):
    p = plot.IntervalProjection()
    r = p._repr_html_()

    check_empty(r, 'IntervalProjection', nsummary=8)

    x = np.arange(5, 8, 0.5)
    y = np.asarray([2, 3, 4, 5, 4, 3])
    dy = y / 2
    d = Data1D('n n', x, y, staterror=dy)

    m = Const1D()

    fit = Fit(d, m, stat=Chi2())
    fr = fit.fit()
    assert fr.succeeded

    p.prepare(min=1, max=6, nloop=10)
    p.calc(fit, m.c0)

    r = p._repr_html_()
    assert r is not None

    if plot.backend.name == 'pylab':
        assert '<summary>IntervalProjection</summary>' in r
        assert '<svg ' in r
        return

    assert '<summary>IntervalProjection (8)</summary>' in r

    assert '<div class="dataname">x</div><div class="dataval">[ 1.        1.555556  2.111111  2.666667  3.222222  3.777778  4.333333  4.888889\n  5.444444  6.      ]</div>' in r
    assert '<div class="dataname">nloop</div><div class="dataval">10</div>' in r
Ejemplo n.º 17
0
    def call(self, niter, seed):

        pars = {}
        pars_index = {}
        index = 0
        for par in self.model.pars:
            if par.frozen is False:
                name = '%s.%s' % (par.modelname, par.name)
                pars_index[index] = name
                pars[name] = []
                index += 1

        data = self.data
        y = data.y
        x = data.x
        if type(data) == Data1DAsymmetricErrs:
            y_l = y - data.elo
            y_h = y + data.ehi
        elif isinstance(data, (Data1D,)):
            y_l = data.staterror
            y_h = data.staterror
        else:
            msg ="{0} {1}".format(ReSampleData.__name__, type(data))
            raise NotImplementedError(msg)

        numpy.random.seed(seed)
        for j in range(niter):
            ry = []
            for i in range(len(y_l)): 
                a = y_l[i]
                b = y_h[i]
                r = -1
                while r < a or r > b:
                    sigma = b - y[i]
                    u = numpy.random.random_sample()
                    if u < 0.5:
                        sigma=y[i]-a
                    r = numpy.random.normal(loc=y[i],scale=sigma,size=None)
                    if u < 0.5 and r > y[i]:
                        r = -1
                    if u > 0.5 and r < y[i]:
                        r = -1
                ry.append(r)

            # fit is performed for each simulated data point
            fit = Fit(Data1D('tmp', x, ry), self.model, LeastSq( ), LevMar())
            fit_result = fit.fit()

            for index, val in enumerate(fit_result.parvals):
                name = pars_index[index]
                pars[name].append(val)

        result = {}
        for index, name in pars_index.items():
            avg = numpy.average(pars[name])
            std = numpy.std(pars[name])
            print(name, ': avg =', avg, ', std =', std)
            result[name] = pars[name]

        return result
Ejemplo n.º 18
0
def test_low_level_regrid1d_partial_overlap(requested):
    """What happens if there is partial overlap of the grid?

    The question becomes how do we evaluate the model
    "outside" the regrid range. There's at least two
    options:

      a) set to 0
      b) use the original grid

    This test is chosen so that it holds with both
    possibilities: the model evaluates to 0 outside
    of x=3.1 - 4.2, and the partial overlaps are
    carefully chosen to always include this full
    range.

    See https://github.com/sherpa/sherpa/issues/722
    """

    # The range over which we want the model evaluated
    xgrid = np.arange(2, 6, 0.1)
    d = Data1D('tst', xgrid, np.ones_like(xgrid))

    mdl = Box1D()
    mdl.xlow = 3.1
    mdl.xhi = 4.2
    mdl.ampl = 0.4

    yexpected = d.eval_model(mdl)
    assert yexpected.min() == pytest.approx(0.0)
    assert yexpected.max() == pytest.approx(0.4)

    ygot = d.eval_model(mdl.regrid(requested))
    assert ygot == pytest.approx(yexpected)
Ejemplo n.º 19
0
def test_fit(override_plot_backend):
    p = plot.FitPlot()
    r = p._repr_html_()
    assert r is None  # note: always None

    x = np.arange(5, 8, 0.5)
    y = np.ones(x.size)
    d = Data1D('n n', x, y)

    m = Const1D()

    dplot = plot.DataPlot()
    dplot.prepare(d)

    mplot = plot.ModelPlot()
    mplot.prepare(d, m)

    p.prepare(dplot, mplot)
    r = p._repr_html_()

    # different to previous checks
    assert r is not None

    if plot.backend.name == 'pylab':
        assert '<summary>FitPlot</summary>' in r
        assert '<svg ' in r
        return

    assert '<summary>DataPlot (' in r
    assert '<summary>ModelPlot (' in r
    assert '<div class="dataval">n n</div>' in r
    assert '<div class="dataval">Model</div>' in r
Ejemplo n.º 20
0
def test_parallel_map_funcs2():
    def tst(ncores, sg, stat, opt):
        sd = DataSimulFit('sd', [d, d], numcores=2)
        f = Fit(sd, sg, stat, opt)
        result = f.fit()
        return result

    def cmp_results(result, tol=1.0e-3):
        assert result.succeeded
        parvals = (1.7555670572301785, 1.5092728216164186, 4.893136872267538)
        assert result.numpoints == 200

        # use tol in approx?
        assert result.parvals == pytest.approx(parvals)

    numpy.random.seed(0)
    x = numpy.linspace(-5., 5., 100)
    ampl = 5
    pos = 1.5
    sigma = 0.75
    err = 0.25
    y = ampl * numpy.exp(-0.5 * (x - pos)**2 / sigma**2)
    y += numpy.random.normal(0., err, x.shape)
    d = Data1D('junk', x, y)
    g = Gauss1D()
    opt = LevMar()
    stat = LeastSq()
    sg = SimulFitModel('sg', [g, g])

    result = tst(1, sg, stat, opt)
    cmp_results(result)

    result = tst(2, sg, stat, opt)
    cmp_results(result)
Ejemplo n.º 21
0
def test_runtime_interp():
    def tst_runtime_interp(model, requested, interp):
        regrid_model = mdl.regrid(requested, interp=interp)
        yregrid = regrid_model(xgrid)
        return yregrid

    xgrid = np.arange(2, 6, 0.1)
    requested = np.arange(2.5, 5.1, 0.075)
    mdl = Box1D()
    mdl.xlow = 3.1
    mdl.xhi = 4.2
    mdl.ampl = 0.4
    yregrid = tst_runtime_interp(mdl, requested, akima.akima)
    assert 4.4 == approx(yregrid.sum())
    yregrid = tst_runtime_interp(mdl, requested, linear_interp)
    assert 4.4 == approx(yregrid.sum())
    yregrid = tst_runtime_interp(mdl, requested, neville)
    assert -5.0e6 > yregrid.sum()

    d = Data1D('tst', xgrid, np.ones_like(xgrid))
    yexpected = d.eval_model(mdl)
    requested = np.arange(2.5, 7, 0.2)
    rmdl = mdl.regrid(requested)
    ygot = d.eval_model(rmdl)
    assert ygot == approx(yexpected)
Ejemplo n.º 22
0
 def setUp(self):
     # defensive programming (one of the tests has been seen to fail
     # when the whole test suite is run without this)
     ui.clean()
     self._old_logger_level = logger.getEffectiveLevel()
     logger.setLevel(logging.ERROR)
     self.data = Data1D('tst', self._x, self._y, self._e)
     self.mdl = Polynom1D('mdl')
Ejemplo n.º 23
0
def test_wrong_kwargs():
    xgrid = np.arange(2, 6, 0.1)
    d = Data1D('tst', xgrid, np.ones_like(xgrid))
    mdl = Box1D()
    requested = np.arange(1, 7, 0.1)
    with pytest.raises(TypeError) as excinfo:
        ygot = d.eval_model(mdl.regrid(requested, fubar='wrong_kwargs'))
    assert "unknown keyword argument: 'fubar'" in str(excinfo.value)
Ejemplo n.º 24
0
def setUp(hide_logging):

    x = [-13, -5, -3, 2, 7, 12]
    y = np.asarray([102.3, 16.7, -0.6, -6.7, -9.9, 33.2])
    err = np.ones(6) * 5
    data = Data1D('tst', x, y, err)
    mdl = Polynom1D('mdl')
    return data, mdl
Ejemplo n.º 25
0
def setUp2(hide_logging, reset_seed):

    x = numpy.linspace(-5., 5., 100)
    g1, g2 = Gauss1D(), Gauss1D()
    g1.fwhm = 1.14
    g1.pos = 1.2
    g2.fwhm = 4.13
    g2.pos = -1.3

    numpy.random.seed(0)
    y1 = g1(x) + numpy.random.normal(0.0, 0.05, x.shape)
    y2 = g2(x) + numpy.random.normal(0.0, 0.05, x.shape)

    d4 = Data1D('4', x, y1)
    d5 = Data1D('5', x, y2)

    return d4, d5
Ejemplo n.º 26
0
def test_psf1d_show():
    """Test the __str__ method

    Loop through basic data and then add in the options
    (but not all possible combinations).
    """
    def check(x, n, ans):
        toks = x[n].split()
        assert toks == ans

    NAME = ['psfmodel']
    PARAMS = ['Param', 'Type', 'Value', 'Min', 'Max', 'Units']
    LINES = ['-----', '----', '-----', '---', '---', '-----']
    KERNEL = ['psfmodel.kernel', 'frozen', 'oned']
    SIZE = ['psfmodel.size', 'frozen', '7', '7', '7']
    CENTER = ['psfmodel.center', 'frozen', '3', '3', '3']
    RADIAL = ['psfmodel.radial', 'frozen', '0', '0', '1']
    NORM = ['psfmodel.norm', 'frozen', '1', '0', '1']

    m = PSFModel()

    # basic settings
    out = str(m).split('\n')

    assert len(out) == 5
    check(out, 0, NAME)
    check(out, 1, PARAMS)
    check(out, 2, LINES)
    check(out, 3, RADIAL)
    check(out, 4, NORM)

    m.kernel = make_1d_data()

    # before a fold you don't get the size and center parameters
    out = str(m).split('\n')

    assert len(out) == 6
    check(out, 0, NAME)
    check(out, 1, PARAMS)
    check(out, 2, LINES)
    check(out, 3, KERNEL)
    check(out, 4, RADIAL)
    check(out, 5, NORM)

    dfold = Data1D('fold', np.arange(10), np.zeros(10))
    m.fold(dfold)

    out = str(m).split('\n')

    assert len(out) == 8
    check(out, 0, NAME)
    check(out, 1, PARAMS)
    check(out, 2, LINES)
    check(out, 3, KERNEL)
    check(out, 4, SIZE)
    check(out, 5, CENTER)
    check(out, 6, RADIAL)
    check(out, 7, NORM)
Ejemplo n.º 27
0
def test_psf1d_fold_no_kernel():
    """Error out if there's no kernel"""

    m = PSFModel('bob')
    dfold = Data1D('fold', np.arange(10), np.zeros(10))
    with pytest.raises(PSFErr) as exc:
        m.fold(dfold)

    assert "model 'bob' does not have an associated PSF function" == str(exc.value)
Ejemplo n.º 28
0
def test_cache():
    """To make sure that the runtime fit(cache=???) works"""

    x = np.array([1.0, 2.0, 3.0])
    model = MyCacheTestModel()
    par = np.array([1.1, 2.0, 3.0])
    y = model.calc(par, x)

    data = Data1D('tmp', x, y)
    fit = Fit(data, model, LeastSq())
    fit.fit(cache=False)
Ejemplo n.º 29
0
def make_data(data_class):
    """Create a test data object of the given class.

    Using a string means it is easier to support the various PHA
    "types" - eg basic, grouping, grouping+quality.

    """

    x0 = np.asarray([1, 3, 7, 12])
    y = np.asarray([2, 3, 4, 5])
    if data_class == "1d":
        return Data1D('x1', x0, y)

    if data_class == "1dint":
        return Data1DInt('xint1', x0, np.asarray([3, 5, 8, 15]), y)

    chans = np.arange(1, 5)
    if data_class == "pha":
        return DataPHA('pha', chans, y)

    # We want to provide PHA tests that check out the grouping and
    # quality handling (but it is not worth trying all different
    # variants), so we have "grp" for grouping and no quality [*], and
    # "qual" for grouping and quality.
    #
    # [*] by which I mean we have not called ignore_bad, not that
    # there is no quality array.
    #
    grp = np.asarray([1, -1, 1, 1])
    qual = np.asarray([0, 0, 2, 0])
    pha = DataPHA('pha', chans, y, grouping=grp, quality=qual)
    if data_class == "grp":
        return pha

    if data_class == "qual":
        pha.ignore_bad()
        return pha

    x0 = np.asarray([1, 2, 3] * 2)
    x1 = np.asarray([1, 1, 1, 2, 2, 2])
    y = np.asarray([2, 3, 4, 5, 6, 7])
    if data_class == "2d":
        return Data2D('x2', x0, x1, y, shape=(2, 3))

    if data_class == "2dint":
        return Data2DInt('xint2', x0, x1, x0 + 1, x1 + 1, y, shape=(2, 3))

    if data_class == "img":
        return DataIMG('img', x0, x1, y, shape=(2, 3))

    if data_class == "imgint":
        return DataIMGInt('imgi', x0, x1, x0 + 1, x1 + 1, y, shape=(2, 3))

    assert False
Ejemplo n.º 30
0
def test_data(override_plot_backend):
    p = plot.DataPlot()
    r = p._repr_html_()
    check_full(r, 'DataPlot', 'None', 'None', nsummary=7)  # NOT empty

    x = np.arange(5, 8, 0.5)
    y = np.ones(x.size)
    dr = y / 0.1
    d = Data1D('n n', x, y, staterror=dr)
    p.prepare(d)
    r = p._repr_html_()
    check_full(r, 'DataPlot', 'y', 'n n', nsummary=7)