def test_pha_get_xerr_all_bad_energy_group(): """get_xerr handles all bad values [energy] The behavior with grouping is different, presumably because we assume we have grouping when we have a quality array. """ pha = DataPHA('name', [1, 2, 3], [1, 1, 1], grouping=[1, 1, 1], quality=[2, 2, 2]) ebins = np.asarray([3.0, 5., 8.0, 12.0]) rlo = ebins[:-1] rhi = ebins[1:] rmf = create_delta_rmf(rlo, rhi, e_min=rlo, e_max=rhi) pha.set_rmf(rmf) pha.units = 'energy' assert pha.get_xerr() == pytest.approx([2.0, 3.0, 4.0]) assert pha.grouped pha.ignore_bad() # Should this error out or not? assert pha.get_filter() == '' # with pytest.raises(DataErr) as de: # pha.get_filter() # assert str(de.value) == 'mask excludes all data' assert pha.get_xerr() == pytest.approx([])
def test_pha_get_filter_checks_ungrouped(chtype, expected, args): """Check we get the filter we expect chtype is channel, energy, or wavelength expected is the expected response args is a list of 3-tuples of (flag, loval, hival) where flag is True for notice and False for ignore; they define the filter to apply """ chans = np.arange(1, 11, dtype=int) counts = np.ones(10, dtype=int) pha = DataPHA('data', chans, counts) # Use an ARF to create a channel to energy mapping # The 0.2-2.2 keV range maps to 5.636-61.992 Angstrom # egrid = 0.2 * np.arange(1, 12) arf = DataARF('arf', egrid[:-1], egrid[1:], np.ones(10)) pha.set_arf(arf) pha.units = chtype for (flag, lo, hi) in args: if flag: pha.notice(lo, hi) else: pha.ignore(lo, hi) assert pha.get_filter(format='%.1f') == expected
def test_sourceplot_wavelength_counts(caplog): """test_sourceplot_wavelength but when rate=False is chosen""" bins = np.arange(0.1, 10.1, 0.1) data = DataPHA('', np.arange(10), np.ones(10), bin_lo=bins[:-1].copy(), bin_hi=bins[1:].copy()) data.units = "wave" data.rate = False # use a model that is "okay" to use with keV bins # m1 = Const1D('bgnd') m2 = Gauss1D('abs1') src = 100 * m1 * (1 - m2) * 10000 m1.c0 = 0.01 m2.pos = 5.0 m2.fwhm = 4.0 m2.ampl = 0.1 sp = SourcePlot() with caplog.at_level(logging.INFO, logger='sherpa'): sp.prepare(data, src) assert len(caplog.records) == 0 check_sourceplot_wavelength(sp)
def test_sourceplot_wavelength_facn(factor, caplog): """Change plot factor for test_sourceplot_wavelength""" bins = np.arange(0.1, 10.1, 0.1) data = DataPHA('', np.arange(10), np.ones(10), bin_lo=bins[:-1].copy(), bin_hi=bins[1:].copy()) data.units = "wavelength" data.plot_fac = factor assert data.rate m1 = Const1D('bgnd') m2 = Gauss1D('abs1') src = 100 * m1 * (1 - m2) * 10000 m1.c0 = 0.01 m2.pos = 5.0 m2.fwhm = 4.0 m2.ampl = 0.1 sp = SourcePlot() with caplog.at_level(logging.INFO, logger='sherpa'): sp.prepare(data, src) assert len(caplog.records) == 0 check_sourceplot_wavelength(sp, factor=factor)
def test_sourceplot_wavelength(caplog): """Check we get wavelength units""" bins = np.arange(0.1, 10.1, 0.1) data = DataPHA('', np.arange(10), np.ones(10), bin_lo=bins[:-1].copy(), bin_hi=bins[1:].copy()) data.units = "wave" # Note that the model evaluation in done in Angstroms # m1 = Const1D('bgnd') m2 = Gauss1D('abs1') src = 100 * m1 * (1 - m2) * 10000 m1.c0 = 0.01 m2.pos = 5.0 m2.fwhm = 4.0 m2.ampl = 0.1 sp = SourcePlot() with caplog.at_level(logging.INFO, logger='sherpa'): sp.prepare(data, src) assert len(caplog.records) == 0 check_sourceplot_wavelength(sp)
def test_sourceplot_channels(caplog): """Although we ask for channels we get energy units""" bins = np.arange(0.1, 10.1, 0.1) data = DataPHA('', np.arange(10), np.ones(10), bin_lo=bins[:-1].copy(), bin_hi=bins[1:].copy()) data.units = "channel" # use a model that is "okay" to use with keV bins # m1 = Const1D('bgnd') m2 = Gauss1D('abs1') src = 100 * m1 * (1 - m2) * 10000 m1.c0 = 0.01 m2.pos = 5.0 m2.fwhm = 4.0 m2.ampl = 0.1 sp = SourcePlot() with caplog.at_level(logging.INFO, logger='sherpa'): sp.prepare(data, src) assert len(caplog.records) == 1 lname, lvl, msg = caplog.record_tuples[0] assert lname == 'sherpa.astro.plot' assert lvl == logging.WARN assert msg == 'Channel space is unappropriate for the PHA unfolded source model,\nusing energy.' check_sourceplot_energy(sp)
def test_sourceplot(caplog): bins = np.arange(0.1, 10.1, 0.1) data = DataPHA('', np.arange(10), np.ones(10), bin_lo=bins[:-1].copy(), bin_hi=bins[1:].copy()) data.units = "energy" assert data.rate assert data.plot_fac == 0 # use a model that is "okay" to use with keV bins # m1 = Const1D('bgnd') m2 = Gauss1D('abs1') src = 100 * m1 * (1 - m2) * 10000 m1.c0 = 0.01 m2.pos = 5.0 m2.fwhm = 4.0 m2.ampl = 0.1 sp = SourcePlot() with caplog.at_level(logging.INFO, logger='sherpa'): sp.prepare(data, src) assert len(caplog.records) == 0 check_sourceplot_energy(sp)
def test_sourceplot(): bins = np.arange(0.1, 10.1, 0.1) data = DataPHA('', np.arange(10), np.ones(10), bin_lo=bins[:-1].copy(), bin_hi=bins[1:].copy()) data.units = "energy" # use a model that is "okay" to use with keV bins # m1 = Const1D('bgnd') m2 = Gauss1D('abs1') src = 100 * m1 * (1 - m2) * 10000 m1.c0 = 0.01 m2.pos = 5.0 m2.fwhm = 4.0 m2.ampl = 0.1 sp = SourcePlot() sp.prepare(data, src) # add in several asserts to check that something has been # added to the object # assert sp.xlabel == 'Energy (keV)' # the following depends on the backend # assert sp.ylabel == 'f(E) Photons/sec/cm$^2$/keV' assert sp.title == 'Source Model of ' assert sp.xlo == pytest.approx(bins[:-1]) assert sp.xhi == pytest.approx(bins[1:]) # The check of the values is just to check that things are going # as expected, so the model values have been adjusted so that # an "integer" check can be used with enough precision to make # sure that the model is being evaluated correctly, but without # a very-high-precision check # yexp = np.asarray([9998, 9997, 9997, 9997, 9996, 9996, 9995, 9994, 9994, 9993, 9992, 9991, 9990, 9988, 9987, 9985, 9983, 9982, 9980, 9977, 9975, 9973, 9970, 9967, 9964, 9961, 9958, 9955, 9951, 9948, 9944, 9941, 9937, 9934, 9930, 9927, 9923, 9920, 9917, 9914, 9911, 9909, 9907, 9905, 9903, 9902, 9901, 9900, 9900, 9900, 9900, 9901, 9902, 9903, 9905, 9907, 9909, 9911, 9914, 9917, 9920, 9923, 9927, 9930, 9934, 9937, 9941, 9944, 9948, 9951, 9955, 9958, 9961, 9964, 9967, 9970, 9973, 9975, 9977, 9980, 9982, 9983, 9985, 9987, 9988, 9990, 9991, 9992, 9993, 9994, 9994, 9995, 9996, 9996, 9997, 9997, 9997, 9998, 9998]) assert (sp.y.astype(np.int) == yexp).all()
def test_pha_get_xerr_all_bad_energy_no_group(): """get_xerr handles all bad values [energy] It's not obvious what it is meant to be doing here. """ pha = DataPHA('name', [1, 2, 3], [1, 1, 1], quality=[2, 2, 2]) ebins = np.asarray([3.0, 5., 8.0, 12.0]) rlo = ebins[:-1] rhi = ebins[1:] rmf = create_delta_rmf(rlo, rhi, e_min=rlo, e_max=rhi) pha.set_rmf(rmf) pha.units = 'energy' assert pha.get_xerr() == pytest.approx([2.0, 3.0, 4.0]) pha.ignore_bad() assert pha.get_filter() == '' assert pha.get_xerr() == pytest.approx([2.0, 3.0, 4.0])
def setup_single_pha(stat, sys, background=True, areascal="none"): """Return a single data set and model. This is aimed at wstat calculation. The data set is grouped which is a bit against the ethos of WSTAT (fit all the channels). Parameters ---------- stat, sys : bool Should statistical and systematic errors be explicitly set (True) or taken from the statistic (False)? background : bool Should a background data set be included (True) or not (False)? The background is *not* subtracted when True. areascal : {'none', 'scalar', 'array'} Is the AREASCAL set and, if so, to a scalar or array value? If background is True then it is also applied to the background data set. Returns ------- data, model DataPHA and Model objects. """ # For the array of areascals, ensure that areascal is not # constant within at least one group # areascals = { 'source': { 'none': None, 'scalar': 1.0, 'array': np.asarray([0.9, 0.9, 0.8, 0.9, 0.7], dtype=np.float32) }, 'background': { 'none': None, 'scalar': 0.8, 'array': np.asarray([1.2, 1.2, 1.2, 1.1, 1.4], dtype=np.float32) } } # If used the same bins as setup_single_1dint then could # re-use the results, but the bins are different, and it # is useful for the Data1DInt case to test non-consecutive # histogram bins. # channels = np.arange(1, 6, dtype=np.int16) counts = np.asarray([10, 13, 9, 17, 21], dtype=np.int16) rlo = channels - 0.5 rhi = channels + 0.5 if stat: staterror = np.asarray([3.0, 4.0, 3.0, 4.0, 5.0]) else: staterror = None if sys: syserror = 0.2 * counts else: syserror = None grouping = np.asarray([1, -1, 1, -1, 1], dtype=np.int16) # quality = np.asarray([0, 0, 0, 0, 0], dtype=np.int16) quality = None exposure = 150.0 backscal = 0.01 ascal = areascals['source'][areascal] # does not set areascal or header data = DataPHA(name='tstpha', channel=channels, counts=counts, staterror=staterror, syserror=syserror, grouping=grouping, quality=quality, exposure=exposure, backscal=backscal, areascal=ascal) rmf = create_delta_rmf(rlo, rhi, e_min=rlo, e_max=rhi) data.set_rmf(rmf) data.units = 'energy' if background: bgcounts = np.asarray([2, 1, 0, 2, 2], dtype=np.int16) if stat: bgstaterror = np.asarray([0.2, 0.4, 0.5, 0.3, 0.2]) else: bgstaterror = None if sys: bgsyserror = 0.3 * bgcounts else: bgsyserror = None bggrouping = None bgquality = None bgexposure = 550.0 bgbackscal = np.asarray([0.05, 0.06, 0.04, 0.04, 0.07]) bgascal = areascals['background'][areascal] bgdata = DataPHA(name='bgpha', channel=channels, counts=bgcounts, staterror=bgstaterror, syserror=bgsyserror, grouping=bggrouping, quality=bgquality, exposure=bgexposure, backscal=bgbackscal, areascal=bgascal) data.set_background(bgdata) # Trying a multi-component model, even though this actual # model is degenerate (cnst.c0 and poly.c0) cnst = Const1D('cnst') poly = Polynom1D('poly') cnst.c0 = 1.2 poly.c0 = 7.9 poly.c1 = 2.1 poly.c1.frozen = False mdl = cnst + poly return data, mdl