示例#1
0
    def setUp(self):
        self._old_logger_level = logger.getEffectiveLevel()
        logger.setLevel(logging.ERROR)

        x = numpy.linspace(1.0, 101., num=101)[0::2]
        y1 = [ 1., 5., 2., 4., 7.,11., 9., 8.,12.,18.,12.,11.,13.,12.,13.,
               13.,20.,23.,16.,20.,24.,17.,21.,26.,22.,24.,24.,21.,28.,
               28.,26.,25.,34.,26.,34.,33.,25.,38.,31.,43.,35.,42.,50.,
               41.,43.,47.,57.,53.,60.,46.,54.]
        y2 = [ 0., 7., 6., 3., 5., 5., 9.,11.,13., 8.,14.,13.,14.,18.,11.,
               15.,17.,26., 15.,19.,25.,30.,15.,29.,16.,25.,27.,29.,36.,
               41.,22.,27.,33.,32.,45.,37.,38.,38.,34.,52.,40.,41.,31.,
               47.,38.,52.,57.,33.,48.,53.,45.]
        y3 = [ 1., 2., 4., 2., 5., 8.,15.,10.,13.,10.,16.,10.,13.,12.,16.,
               17.,17.,20., 23.,16.,25.,22.,19.,31.,26.,24.,21.,29.,36.,
               30.,33.,30.,37.,27.,36.,32., 42.,44.,39.,30.,40.,33.,39.,
               49.,56.,47.,46.,35.,63.,40.,57.]
        self.d1 = Data1D('1', x, y1)
        self.d2 = Data1D('2', x, y2)
        self.d3 = Data1D('3', x, y3)

        x = numpy.linspace(-5., 5., 100)
        g1, g2 = Gauss1D(), Gauss1D()
        g1.fwhm = 1.14
        g1.pos = 1.2
        g2.fwhm = 4.13
        g2.pos = -1.3
        numpy.random.seed(0)
        y1 = g1(x) + numpy.random.normal(0.0, 0.05, x.shape)
        y2 = g2(x) + numpy.random.normal(0.0, 0.05, x.shape)
        self.d4 = Data1D('4', x, y1)
        self.d5 = Data1D('5', x, y2)
 def __init__(self, pos1, pos2):
     self.src1 = Gauss1D()
     self.src1.pos = pos1
     self.src1_pos = pos1
     self.src2 = Gauss1D()
     self.src2.pos = pos2
     self.src2_pos = pos2
     self.tst_pos(self.src1.pos, self.src1_pos)
     self.tst_pos(self.src2.pos, self.src2_pos)
     return
示例#3
0
 def test_gauss_gauss(self):
     g1, g2 = Gauss1D(), Gauss1D()
     g1.fwhm = 1.3
     g1.pos = 1.5
     g2.fwhm = 4.
     g2.pos = -2.0
     sdata = DataSimulFit('d4d5', (self.d4, self.d5))
     smodel = SimulFitModel('g1g2', (g1, g2))
     sfit = Fit(sdata, smodel, method=LevMar(), stat=LeastSq())
     result = sfit.fit()
     self.compare_results(self._fit_g2g2_bench, result)
def test_model_combined_samename():
    """We can show a binary op"""
    m1 = Gauss1D('name')
    m2 = Gauss1D('name')
    m = m1 + m2
    r = m._repr_html_()

    assert r is not None

    assert '<th class="model-odd" scope="rowgroup" rowspan=3>name</th>' in r
    assert '<th class="model-even" scope="rowgroup" rowspan=3>name</th>' in r
示例#5
0
def test_has_pha_response():
    """Check the examples from the docstring"""

    exposure = 200.1
    rdata = create_non_delta_rmf()
    specresp = create_non_delta_specresp()
    adata = create_arf(rdata.energ_lo,
                       rdata.energ_hi,
                       specresp,
                       exposure=exposure)

    nchans = rdata.e_min.size
    channels = np.arange(1, nchans + 1, dtype=np.int16)
    counts = np.ones(nchans, dtype=np.int16)
    pha = DataPHA('test-pha',
                  channel=channels,
                  counts=counts,
                  exposure=exposure)

    pha.set_arf(adata)
    pha.set_rmf(rdata)

    rsp = Response1D(pha)
    m1 = Gauss1D()
    m2 = PowLaw1D()

    assert not has_pha_response(m1)
    assert has_pha_response(rsp(m1))
    assert not has_pha_response(m1 + m2)
    assert has_pha_response(rsp(m1 + m2))
    assert has_pha_response(m1 + rsp(m2))

    # reflexivity check
    assert has_pha_response(rsp(m1) + m2)
    assert has_pha_response(rsp(m1) + rsp(m2))
示例#6
0
def test_sourceplot_channels(caplog, make_basic_datapha):
    """Although we ask for channels we get energy units"""

    data = make_basic_datapha
    data.units = "channel"

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 1
    lname, lvl, msg = caplog.record_tuples[0]
    assert lname == 'sherpa.astro.plot'
    assert lvl == logging.WARN
    assert msg == 'Channel space is unappropriate for the PHA unfolded source model,\nusing energy.'

    check_sourceplot_energy(sp)
示例#7
0
def test_sourceplot_facn(factor, caplog, make_basic_datapha):
    """Change plot factor for test_sourceplot"""

    data = make_basic_datapha
    data.units = "energy"
    data.plot_fac = factor
    assert data.rate

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_energy(sp, factor=factor)
示例#8
0
def test_sourceplot_wavelength_counts(caplog):
    """test_sourceplot_wavelength but when rate=False is chosen"""

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('',
                   np.arange(10),
                   np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "wave"
    data.rate = False

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_wavelength(sp)
示例#9
0
def test_sourceplot_wavelength_facn(factor, caplog):
    """Change plot factor for test_sourceplot_wavelength"""

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('',
                   np.arange(10),
                   np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "wavelength"
    data.plot_fac = factor
    assert data.rate

    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_wavelength(sp, factor=factor)
示例#10
0
def test_psf1d_combined_v2():
    """See test_psf1d_step_v2"""

    smdl = StepLo1D()
    smdl.xcut = 100
    smdl.ampl = 10

    cmdl = Const1D()
    cmdl.c0 = -500

    imdl = smdl + cmdl

    gsmooth = Gauss1D()
    psf = PSFModel('psf', gsmooth)

    x = np.arange(0, 200, 0.5)
    d = Data1D('fake', x, x * 0)
    psf.fold(d)

    smoothed = psf(imdl)
    y = smoothed(x)

    # So the output is not easy to describe analytically, hence
    # we just check parts of it.
    #
    assert y[(x >= 19.5) & (x <= 100)] == pytest.approx([-490] * 162, abs=1e-4)
    assert y[x >= 119] == pytest.approx([-500] * 162, abs=1e-4)

    # check that the x <= 19 values are in ascending order
    y1 = y[x <= 19]
    assert (y1[1:] > y1[:-1]).all()
示例#11
0
def test_parallel_map_funcs2():
    def tst(ncores, sg, stat, opt):
        sd = DataSimulFit('sd', [d, d], numcores=2)
        f = Fit(sd, sg, stat, opt)
        result = f.fit()
        return result

    def cmp_results(result, tol=1.0e-3):
        assert result.succeeded
        parvals = (1.7555670572301785, 1.5092728216164186, 4.893136872267538)
        assert result.numpoints == 200

        # use tol in approx?
        assert result.parvals == pytest.approx(parvals)

    numpy.random.seed(0)
    x = numpy.linspace(-5., 5., 100)
    ampl = 5
    pos = 1.5
    sigma = 0.75
    err = 0.25
    y = ampl * numpy.exp(-0.5 * (x - pos)**2 / sigma**2)
    y += numpy.random.normal(0., err, x.shape)
    d = Data1D('junk', x, y)
    g = Gauss1D()
    opt = LevMar()
    stat = LeastSq()
    sg = SimulFitModel('sg', [g, g])

    result = tst(1, sg, stat, opt)
    cmp_results(result)

    result = tst(2, sg, stat, opt)
    cmp_results(result)
def test_convolve_combined_1d():
    """Try to replicate the logic of test_psf1d_step_v2
    from sherpa/tests/test_instrument.py
    """

    smdl = StepLo1D()
    smdl.xcut = 100
    smdl.ampl = 10

    gsmooth = Gauss1D()

    x = np.arange(0, 200, 0.5)

    data = smdl(x)
    kernel = gsmooth(x)
    kernel /= kernel.sum()

    tcd = _psf.tcdData()
    y = tcd.convolve(data, kernel, data.shape, kernel.shape, [0])

    # So the output is not easy to describe analytically, hence
    # we just check parts of it.
    #
    assert y[(x >= 19.5) & (x <= 100)] == pytest.approx([10] * 162, abs=1e-4)
    assert y[x >= 119] == pytest.approx([0] * 162, abs=1e-4)

    # check that the x <= 19 values are in ascending order
    y1 = y[x <= 19]
    assert (y1[1:] > y1[:-1]).all()

    # and now with the kernel internally stored
    y2 = tcd.convolve(data, kernel, data.shape, kernel.shape, [0])
    assert y2 == pytest.approx(y)
示例#13
0
def test_sourceplot_wavelength(caplog):
    """Check we get wavelength units"""

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('',
                   np.arange(10),
                   np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "wave"

    # Note that the model evaluation in done in Angstroms
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_wavelength(sp)
示例#14
0
def test_psf1d_combined():
    """This is based on
    sherpa.models.tests.test_regrid_unit.test_regrid1_works_with_convolution_style
    but I wanted to make sure we have an explicit check of the underlying
    code.
    """

    smdl = StepLo1D()
    smdl.xcut = 12.5
    smdl.ampl = 10

    cmdl = Const1D()
    cmdl.c0 = -500

    imdl = smdl + cmdl

    gsmooth = Gauss1D()
    gsmooth.fwhm = 3
    psf = PSFModel('psf', gsmooth)

    x = np.arange(5, 23, 3)
    d = Data1D('fake', x, x * 0)
    psf.fold(d)

    smoothed = psf(imdl)
    y = smoothed(x)

    assert y == pytest.approx([-490, -490, -490, -500, -500, -500], rel=7e-3)
示例#15
0
def test_psf1d_step_v2():
    """Trying to track down why we have seen different behavior in
    test_regrid_unit.py.
    """

    smdl = StepLo1D()
    smdl.xcut = 100
    smdl.ampl = 10

    gsmooth = Gauss1D()
    psf = PSFModel('psf', gsmooth)

    x = np.arange(0, 200, 0.5)
    d = Data1D('fake', x, x * 0)
    psf.fold(d)

    smoothed = psf(smdl)
    y = smoothed(x)

    # So the output is not easy to describe analytically, hence
    # we just check parts of it.
    #
    assert y[(x >= 19.5) & (x <= 100)] == pytest.approx([10] * 162, abs=1e-4)
    assert y[x >= 119] == pytest.approx([0] * 162, abs=1e-4)

    # check that the x <= 19 values are in ascending order
    y1 = y[x <= 19]
    assert (y1[1:] > y1[:-1]).all()
示例#16
0
def test_sourceplot(caplog):

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('',
                   np.arange(10),
                   np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "energy"
    assert data.rate
    assert data.plot_fac == 0

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_energy(sp)
示例#17
0
def setUp2(hide_logging, reset_seed):

    x = numpy.linspace(-5., 5., 100)
    g1, g2 = Gauss1D(), Gauss1D()
    g1.fwhm = 1.14
    g1.pos = 1.2
    g2.fwhm = 4.13
    g2.pos = -1.3

    numpy.random.seed(0)
    y1 = g1(x) + numpy.random.normal(0.0, 0.05, x.shape)
    y2 = g2(x) + numpy.random.normal(0.0, 0.05, x.shape)

    d4 = Data1D('4', x, y1)
    d5 = Data1D('5', x, y2)

    return d4, d5
def test_sourceplot():

    bins = np.arange(0.1, 10.1, 0.1)
    data = DataPHA('', np.arange(10), np.ones(10),
                   bin_lo=bins[:-1].copy(),
                   bin_hi=bins[1:].copy())
    data.units = "energy"

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    sp.prepare(data, src)

    # add in several asserts to check that something has been
    # added to the object
    #
    assert sp.xlabel == 'Energy (keV)'

    # the following depends on the backend
    # assert sp.ylabel == 'f(E)  Photons/sec/cm$^2$/keV'

    assert sp.title == 'Source Model of '

    assert sp.xlo == pytest.approx(bins[:-1])
    assert sp.xhi == pytest.approx(bins[1:])

    # The check of the values is just to check that things are going
    # as expected, so the model values have been adjusted so that
    # an "integer" check can be used with enough precision to make
    # sure that the model is being evaluated correctly, but without
    # a very-high-precision check
    #
    yexp = np.asarray([9998, 9997, 9997, 9997, 9996, 9996, 9995, 9994,
                       9994, 9993, 9992, 9991, 9990, 9988, 9987, 9985,
                       9983, 9982, 9980, 9977, 9975, 9973, 9970, 9967,
                       9964, 9961, 9958, 9955, 9951, 9948, 9944, 9941,
                       9937, 9934, 9930, 9927, 9923, 9920, 9917, 9914,
                       9911, 9909, 9907, 9905, 9903, 9902, 9901, 9900,
                       9900, 9900, 9900, 9901, 9902, 9903, 9905, 9907,
                       9909, 9911, 9914, 9917, 9920, 9923, 9927, 9930,
                       9934, 9937, 9941, 9944, 9948, 9951, 9955, 9958,
                       9961, 9964, 9967, 9970, 9973, 9975, 9977, 9980,
                       9982, 9983, 9985, 9987, 9988, 9990, 9991, 9992,
                       9993, 9994, 9994, 9995, 9996, 9996, 9997, 9997,
                       9997, 9998, 9998])

    assert (sp.y.astype(np.int) == yexp).all()
示例#19
0
def test_regrid1d_wrapping_create_composite_instance():
    # This test depends on what we want the regridded model to look like, which is
    # somewhat arbitrary
    cmdl = Const1D()
    gmdl = Gauss1D()
    imdl = cmdl + gmdl
    rmdl = ModelDomainRegridder1D()
    mdl = rmdl.apply_to(imdl)
    assert isinstance(mdl, CompositeModel)
    assert len(mdl.parts) == 1
    assert mdl.parts[0] is imdl
def test_model_linked():
    """Check linking of models"""
    m = Gauss1D('g1')
    c = Const1D('c1')
    m.fwhm = 8 * c.c0
    r = m._repr_html_()

    assert r is not None

    assert '<tr><th class="model-odd" scope="rowgroup" rowspan=3>g1</th><td>fwhm</td><td>linked</td><td>8.0</td><td colspan=2>&#8656; 8 * c1.c0</td><td></td></tr>' in r
    assert '<tr><td>pos</td><td><input disabled type="checkbox" checked></input></td><td>0.0</td><td>-MAX</td><td>MAX</td><td></td></tr>' in r
    assert '<tr><td>ampl</td><td><input disabled type="checkbox" checked></input></td><td>1.0</td><td>-MAX</td><td>MAX</td><td></td></tr>' in r
def test_model():
    m = Gauss1D('ff')
    r = m._repr_html_()

    assert r is not None

    assert '<summary>Model</summary>' in r
    assert '<table class="model">' in r

    assert '<tr><th class="model-odd" scope="rowgroup" rowspan=3>ff</th><td>fwhm</td><td><input disabled type="checkbox" checked></input></td><td>10.0</td><td>TINY</td><td>MAX</td><td></td></tr>' in r
    assert '<tr><td>pos</td><td><input disabled type="checkbox" checked></input></td><td>0.0</td><td>-MAX</td><td>MAX</td><td></td></tr>' in r
    assert '<tr><td>ampl</td><td><input disabled type="checkbox" checked></input></td><td>1.0</td><td>-MAX</td><td>MAX</td><td></td></tr>' in r
示例#22
0
def test_psf1d_no_fold():
    """Error out if there's no kernel"""

    box = Box1D()
    psf = PSFModel('bob', box)

    cpt = Gauss1D()
    sm = psf(cpt)

    with pytest.raises(PSFErr) as exc:
        sm([1, 2, 3, 4, 5])

    assert "PSF model has not been folded" == str(exc.value)
def test_model_combined():
    """We can show a binary op"""
    m1 = Gauss1D('g1')
    m2 = Const1D('c1')
    m = m1 + m2
    r = m._repr_html_()

    assert r is not None

    assert '<th class="model-odd" scope="rowgroup" rowspan=3>g1</th>' in r
    assert '<th class="model-even" scope="rowgroup" rowspan=1>c1</th>' in r

    assert '<tr><th class="model-odd" scope="rowgroup" rowspan=3>g1</th><td>fwhm</td><td><input disabled type="checkbox" checked></input></td><td>10.0</td><td>TINY</td><td>MAX</td><td></td></tr>' in r
    assert '<tr><td>pos</td><td><input disabled type="checkbox" checked></input></td><td>0.0</td><td>-MAX</td><td>MAX</td><td></td></tr>' in r
    assert '<tr><td>ampl</td><td><input disabled type="checkbox" checked></input></td><td>1.0</td><td>-MAX</td><td>MAX</td><td></td></tr>' in r
    assert '<tr class="block"><th class="model-even" scope="rowgroup" rowspan=1>c1</th><td>c0</td><td><input disabled type="checkbox" checked></input></td><td>1.0</td><td>-MAX</td><td>MAX</td><td></td></tr>' in r
示例#24
0
def setup_1d(request):
    """Create Gauss1D + Const1D components."""

    gmdl = Gauss1D()
    cmdl = Const1D()
    gmdl.pos = 5000
    gmdl.fwhm = 30
    gmdl.ampl = 20
    cmdl.c0 = 10

    return_composite = request.param

    if return_composite:
        return gmdl + cmdl
    else:
        return cmdl
示例#25
0
def test_psf1d_flat():
    """This is based on
    sherpa.models.tests.test_regrid_unit.test_regrid1_works_with_convolution_style
    but I wanted to make sure we have an explicit check of the underlying
    code.
    """

    cmdl = Const1D()
    cmdl.c0 = -500

    gsmooth = Gauss1D()
    gsmooth.fwhm = 3
    psf = PSFModel('psf', gsmooth)

    x = np.arange(5, 23, 3)
    d = Data1D('fake', x, x * 0)
    psf.fold(d)

    smoothed = psf(cmdl)
    y = smoothed(x)

    assert y == pytest.approx([-500] * 6)
示例#26
0
def test_regrid1d_wrapping_name():
    """Check the name field of a wrapped model.

    This is also checked in test_regrid1d_wrapping_str.
    """

    internal_model = Const1D('con') + Gauss1D('gau')
    imodel_name = internal_model.name

    # a test where the Regrid1D model is named is in
    # test_regrid1d_wrapping_str.
    rmdl = ModelDomainRegridder1D()
    mdl = rmdl.apply_to(internal_model)

    # TODO: It is not clear what the syntactic constraints on
    #       the name field are; if it is to create an evaluable
    #       model at the UI layer then the following is
    #       incorrect. There is "prior art" here with PSF, ARF,
    #       and RMF models to look at.
    #
    expected_name = 'regrid1d({})'.format(imodel_name)
    assert mdl.name == expected_name
示例#27
0
def test_sourceplot_wavelength(caplog, make_basic_datapha):
    """Check we get wavelength units"""

    data = make_basic_datapha
    data.units = "wave"

    # Note that the model evaluation in done in Angstroms
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_wavelength(sp)
示例#28
0
def test_psf1d_step():
    """This is based on
    sherpa.models.tests.test_regrid_unit.test_regrid1_works_with_convolution_style
    but I wanted to make sure we have an explicit check of the underlying
    code.
    """

    smdl = StepLo1D()
    smdl.xcut = 12.5
    smdl.ampl = 10

    gsmooth = Gauss1D()
    gsmooth.fwhm = 3
    psf = PSFModel('psf', gsmooth)

    x = np.arange(5, 23, 3)
    d = Data1D('fake', x, x * 0)
    psf.fold(d)

    smoothed = psf(smdl)
    y = smoothed(x)

    assert y == pytest.approx([10.0, 10.0, 10.0, 0, 0, 0], abs=1e-4)
示例#29
0
def test_sourceplot_counts(caplog, make_basic_datapha):
    """test_sourceplot but when rate=False is chosen"""

    data = make_basic_datapha
    data.units = "energy"
    data.rate = False

    # Note that the model evaluation in done in Angstroms
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_energy(sp)
示例#30
0
def test_sourceplot_wavelength_counts(caplog, make_basic_datapha):
    """test_sourceplot_wavelength but when rate=False is chosen"""

    data = make_basic_datapha
    data.units = "wave"
    data.rate = False

    # use a model that is "okay" to use with keV bins
    #
    m1 = Const1D('bgnd')
    m2 = Gauss1D('abs1')
    src = 100 * m1 * (1 - m2) * 10000

    m1.c0 = 0.01
    m2.pos = 5.0
    m2.fwhm = 4.0
    m2.ampl = 0.1

    sp = SourcePlot()
    with caplog.at_level(logging.INFO, logger='sherpa'):
        sp.prepare(data, src)

    assert len(caplog.records) == 0
    check_sourceplot_wavelength(sp)