def test_default_background_issue_fit(clean_astro_ui): """Test issue #943 with fit See https://github.com/sherpa/sherpa/issues/943#issuecomment-696119982 """ ui.set_default_id('x') # use least-square as we don't really care about the fit ui.set_stat('leastsq') ui.load_arrays('x', [1, 2, 3, 4], [5, 4, 3, 4], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3, 4]), [1, 1, 0, 1]) arf = ui.create_arf(np.asarray([0.1, 0.2, 0.3, 0.4]), np.asarray([0.2, 0.3, 0.4, 0.5])) ui.set_arf(arf) bkg.set_arf(arf) ui.set_bkg(bkg) # The model being fitted is a constant to 1,1,0,1 for # the background, so that should be 0.75 / 0.1 (as the # bin width is constant), and for the source it is # 5,4,3,4 - <0.75> [here ignoring the bin-width], # so [4.25,3.25,2.25,3.25] -> 13 / 4 -> 3.25 # ui.set_source(ui.const1d.mdl1) ui.set_bkg_source(ui.const1d.mdl2) # Prior to #943 this would give a confusing error. # ui.fit() assert mdl1.c0.val == pytest.approx(3.25 / 0.1) assert mdl2.c0.val == pytest.approx(0.75 / 0.1)
def test_fake_pha_background_model(clean_astro_ui, reset_seed): """Check we can add a background component. See also test_fake_pha_basic. For simplicity we use perfect responses. """ np.random.seed(27347) id = 'qwerty' channels = np.arange(1, 4, dtype=np.int16) counts = np.ones(3, dtype=np.int16) bcounts = 100 * counts ui.load_arrays(id, channels, counts, ui.DataPHA) ui.set_exposure(id, 100) ui.set_backscal(id, 0.1) bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4) ebins = np.asarray([1.1, 1.2, 1.4, 1.6]) elo = ebins[:-1] ehi = ebins[1:] arf = ui.create_arf(elo, ehi) rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi) mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 0 bkgmdl = ui.create_model_component('const1d', 'mdl') bkgmdl.c0 = 2 ui.set_source(id, mdl) ui.set_bkg(id, bkg) ui.set_bkg_source(id, bkgmdl) ui.set_arf(id, arf, bkg_id=1) ui.set_rmf(id, rmf, bkg_id=1) ui.fake_pha(id, arf, rmf, 1000.0, bkg='model') faked = ui.get_data(id) assert faked.exposure == pytest.approx(1000.0) assert (faked.channel == channels).all() # check we've faked counts (the scaling is such that it is # very improbable that this condition will fail) assert (faked.counts > counts).all() # For reference the predicted source signal is # [200, 400, 400] # and the background signal is # [125, 125, 125] # so, even with randomly drawn values, the following # checks should be robust. # predicted_by_source = 1000 * mdl(elo, ehi) predicted_by_bkg = (1000 / 200) * (0.1 / 0.4) * bcounts assert (faked.counts > predicted_by_source).all() assert (faked.counts > predicted_by_bkg).all()
def test_get_syserror_missing(bid): """Does get_syserror error out?""" ui.load_arrays(1, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(bkg) with pytest.raises(DataErr) as exc: ui.get_syserror(bkg_id=bid) assert str(exc.value) == "data set '1' does not specify systematic errors"
def test_set_filter_mismatch(f, bid): """Does set_filter error when there's a mis-match? """ ui.load_arrays(1, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(bkg) with pytest.raises(DataErr) as exc: ui.set_filter(f, bkg_id=bid) assert str(exc.value) == 'size mismatch between 3 and 2'
def test_fix_background_id_error_checks2(): """Check error handling of background id""" ui.load_arrays(2, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(2, bkg) with pytest.raises(IdentifierErr) as exc: ui.get_bkg_source(id=2, bkg_id='bkg') assert str(exc.value) == "identifier 'bkg' is a reserved word"
def test_fix_background_id_error_checks1(): """Check error handling of background id""" ui.load_arrays(2, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(2, bkg) with pytest.raises(ArgumentTypeErr) as exc: ui.get_bkg_source(id=2, bkg_id=bkg) assert str(exc.value) == 'identifiers must be integers or strings'
def test_save_xxx_nodata(func, etype, emsg): """Does save_xxx error out if there's no data to save? DataPHA """ ui.load_arrays(1, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(bkg) with pytest.raises(etype) as exc: func("temp-file-that-should-not-be-created") assert str(exc.value) == emsg
def test_save_image_nodata(): """Does save_image error out if there's no data to save? DataPHA Unlike the other calls, this requires a FITS library """ ui.load_arrays(1, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(bkg) with pytest.raises(IOErr) as exc: ui.save_image("temp-file-that-should-not-be-created") assert str(exc.value) == "data set '' does not contain an image"
def test_bkg_id_get_bkg_source(clean_astro_ui): """Check the error message when the background model has not been set (issue #943)""" ui.set_default_id('x') ui.load_arrays('x', [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(bkg) with pytest.raises(ModelErr) as exc: ui.get_bkg_source() assert str( exc.value) == 'background model 1 for data set x has not been set'
def test_save_xxx_bkg_nodata(func, etype, emsg): """Does save_xxx error out if there's no data to save? DataPHA + bkg Note that save_image does not support a bkg_id parameter so is not tested here. """ ui.load_arrays(1, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(bkg) with pytest.raises(etype) as exc: func("temp-file-that-should-not-be-created", bkg_id=1) assert str(exc.value) == emsg
def test_save_filter_ignored(bid): """Does save_filter error out if everything is masked? We should be able to write out the filter in this case, as it's easy (all False). """ ui.load_arrays(1, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(bkg) ui.ignore(None, None) with pytest.raises(DataErr) as exc: ui.save_filter("temp-file-that-should-not-be-created", bkg_id=bid) assert str(exc.value) == "mask excludes all data"
def test_set_filter_mismatch_with_filter(f, bid): """Does set_filter error when there's a mis-match after a filter? test_set_filter_mismatch checks when .mask is a scalar, so now check when it's a NumPy array. """ ui.load_arrays(1, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(bkg) ui.ignore(3, None) # set the .mask attribute to an array with pytest.raises(DataErr) as exc: ui.set_filter(f, bkg_id=bid) assert str(exc.value) == 'size mismatch between 3 and 2'
def test_delete_bkg_model_with_bkgid(id, clean_astro_ui): """Check we call delete_bkg_model with non-default bkg_id""" ui.load_arrays(id, [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) ui.set_bkg(id, bkg, bkg_id=2) ui.set_bkg_source(id, ui.const1d.bmdl, bkg_id=2) assert ui.list_model_components() == ['bmdl'] assert ui.get_bkg_source(id, 2).name == 'const1d.bmdl' ui.delete_bkg_model(id, bkg_id=2) assert ui.list_model_components() == ['bmdl'] with pytest.raises(ModelErr) as exc: ui.get_bkg_source(id, 2) emsg = 'background model 2 for data set {} has not been set'.format(id) assert str(exc.value) == emsg
def test_show_bkg_model_issue943(clean_astro_ui): """Test issue #943 We do not check that show_bkg_model is creating anything useful, just that it can be called. See https://github.com/sherpa/sherpa/issues/943#issuecomment-696119982 """ ui.set_default_id('x') ui.load_arrays('x', [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) arf = ui.create_arf(np.asarray([0.1, 0.2, 0.3]), np.asarray([0.2, 0.3, 0.4])) bkg.set_arf(arf) ui.set_bkg(bkg) ui.set_bkg_source(ui.const1d.mdl2) ui.show_bkg_model()
def test_default_background_issue(clean_astro_ui): """Test issue #943""" ui.set_default_id('x') # use least-square as we don't really care about the fit ui.set_stat('leastsq') ui.load_arrays('x', [1, 2, 3], [5, 4, 3], ui.DataPHA) bkg = ui.DataPHA('bkg', np.asarray([1, 2, 3]), [1, 1, 0]) arf = ui.create_arf(np.asarray([0.1, 0.2, 0.3]), np.asarray([0.2, 0.3, 0.4])) bkg.set_arf(arf) ui.set_bkg(bkg) ui.set_bkg_source(ui.const1d.mdl2) # Ensure we can fit the background model. Prior to #943 being # fixed the fit_bkg call would error out. # ui.fit_bkg() assert mdl2.c0.val == pytest.approx(2 / 3 / 0.1)
def test_subtract_already_subtracted(idval): """Does subtract still work if the data is already subtracted?""" x = [1, 2, 3] y = [0, 4, 3] ui.load_arrays('bgnd', x, y, ui.DataPHA) bkg = ui.get_data('bgnd') if idval is None: ui.load_arrays(1, x, y, ui.DataPHA) ui.set_bkg(bkg) else: ui.load_arrays(idval, x, y, ui.DataPHA) ui.set_bkg(idval, bkg) data = ui.get_data(idval) assert not data.subtracted ui.subtract(idval) assert data.subtracted ui.subtract(idval) assert ui.get_dep(idval) == pytest.approx([0, 0, 0])
def test_fake_pha_basic(id, has_bkg, clean_astro_ui): """No background. See also test_fake_pha_add_background For simplicity we use perfect responses. A background dataset can be added, but it should not be used in the simulation. """ channels = np.arange(1, 4, dtype=np.int16) counts = np.ones(3, dtype=np.int16) bcounts = 100 * counts ui.load_arrays(id, channels, counts, ui.DataPHA) ui.set_exposure(id, 100) if has_bkg: bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4) ui.set_bkg(id, bkg, bkg_id='faked-bkg') ebins = np.asarray([1.1, 1.2, 1.4, 1.6]) elo = ebins[:-1] ehi = ebins[1:] arf = ui.create_arf(elo, ehi) rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi) mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 2 ui.set_source(id, mdl) ui.fake_pha(id, arf, rmf, 1000.0) faked = ui.get_data(id) assert faked.exposure == pytest.approx(1000.0) assert (faked.channel == channels).all() assert faked.name == 'faked' assert faked.get_arf().name == 'test-arf' assert faked.get_rmf().name == 'delta-rmf' if has_bkg and id is not None: assert faked.background_ids == ['faked-bkg'] bkg = ui.get_bkg(id, 'faked-bkg') assert bkg.name == 'bkg' assert bkg.counts == pytest.approx(bcounts) assert bkg.exposure == pytest.approx(200) else: assert faked.background_ids == [] # check we've faked counts (the scaling is such that it is # very improbable that this condition will fail) assert (faked.counts > counts).all() # For reference the predicted source signal is # [200, 400, 400] # # What we'd like to say is that the predicted counts are # similar, but this is not easy to do. What we can try # is summing the counts (to average over the randomness) # and then a simple check # assert faked.counts.sum() > 200