def test_ARFModelPHA(self): from sherpa.astro import ui ui.load_pha(self.make_path("3c120_meg_1.pha")) # remove the RMF to ensure this is an ARF-only analysis # (which is what is needed to trigger the bug that lead to #699) ui.get_data().set_rmf(None) ui.group_counts(20) ui.notice(0.5, 6) ui.subtract() ui.set_model(ui.xsphabs.abs1 * (ui.xsapec.bubble + ui.powlaw1d.p1)) ui.set_xsabund('angr') ui.set_xsxsect('vern') abs1.nh = 0.163 abs1.nh.freeze() p1.ampl = 0.017 p1.gamma = 1.9 bubble.kt = 0.5 bubble.norm = 4.2e-5 tol = 1.0e-2 ui.set_method_opt('ftol', tol) ui.fit() result = ui.get_fit_results() assert result.numpoints == self._fit_using_ARFModelPHA['numpoints'] assert result.dof == self._fit_using_ARFModelPHA['dof']
def test_plot_pvalue(make_data_path, clean_astro_ui, hide_log_output): fname = make_data_path("qso.pi") ui.load_pha(fname) ui.set_stat('cstat') ui.set_method("neldermead") ui.group_counts(10) ui.notice(0.3, 8) ui.set_model("xsphabs.abs1*xspowerlaw.p1") ui.set_model("abs1*(p1+gauss1d.g1)") # move the fit close to the best fit to save a small amount # of time. abs1.nh = 0.05 p1.phoindex = 1.28 p1.norm = 2e-4 g1.ampl = 1.8e-5 g1.pos = 3. ui.freeze(g1.pos) g1.fwhm = 0.1 ui.freeze(g1.fwhm) ui.fit() ui.plot_pvalue(p1, p1 + g1, num=100) tmp = ui.get_pvalue_results() assert tmp.null == pytest.approx(210.34566845619273) assert tmp.alt == pytest.approx(207.66618095925094) assert tmp.lr == pytest.approx(2.679487496941789)
def test_rsp(self): fname = self.make_path("qso.pi") ui.load_pha(fname) ui.set_stat("chi2xspecvar") ui.set_method("neldermead") ui.group_counts(10) ui.notice(0.3, 8) ui.set_model("xsphabs.abs1*xspowerlaw.p1") ui.set_model("abs1*(p1+gauss1d.g1)") g1.pos = 3. ui.freeze(g1.pos) g1.fwhm = 0.1 ui.freeze(g1.fwhm) ui.set_stat('cstat') ui.fit() ui.plot_pvalue(p1, p1 + g1, num=100) tmp = ui.get_pvalue_results() expected = [210.34566845619273, 207.66618095925094, 2.679487496941789] self.compare_results(expected, [tmp.null, tmp.alt, tmp.lr])
def group_setup(make_data_path): ui.set_stat('wstat') infile = make_data_path('9774.pi') ui.load_pha(1, infile) ui.group_counts(1, 20) # Unlike the test_wstat_two_scalar case, the grouping # is not copied over. # ui.set_grouping(1, bkg_id=1, val=ui.get_grouping(1)) ui.set_source(1, ui.const1d.c1 * ui.powlaw1d.pl1) # These should be the same as test_wstat_two_scalar # ui.set_par("pl1.gamma", 1.7) ui.set_par("pl1.ampl", 1.6e-4) ui.set_par("c1.c0", 45)
def setUp(self): self._old_logger_level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) ui.set_stat('wstat') infile1 = self.make_path('3c273.pi') infile2 = self.make_path('9774.pi') ui.load_pha(1, infile1) ui.load_pha(2, infile2) # Since 9774.pi isn't grouped, group it. Note that this # call groups the background to 20 counts per bin. In this # case we do not want that; instead we want to use the same # grouping scheme as the source file. # # Note: this is related to issue 227 # ui.group_counts(2, 20) ui.set_grouping(2, bkg_id=1, val=ui.get_grouping(2)) # There's no need to have the same model in both datasets, # but assume the same source model can be used, with a # normalization difference. # ui.set_source(1, ui.powlaw1d.pl1) ui.set_source(2, ui.const1d.c2 * ui.get_source(1)) # The powerlaw slope and normalization are # intended to be "a reasonable approximation" # to the data, just to make sure that any statistic # calculation doesn't blow-up too much. # # Note: the model values for 3c273 are slighly different # to the single-PHA-file case, so stat results are # slightly different # ui.set_par("pl1.gamma", 1.7) ui.set_par("pl1.ampl", 1.6e-4) ui.set_par("c2.c0", 45)
def setUp(self): self._old_logger_level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) ui.set_stat('wstat') infile = self.make_path('9774.pi') ui.load_pha(1, infile) ui.group_counts(1, 20) # Unlike the test_wstat_two_scalar case, the grouping # is not copied over. # ui.set_grouping(1, bkg_id=1, val=ui.get_grouping(1)) ui.set_source(1, ui.const1d.c1 * ui.powlaw1d.pl1) # These should be the same as test_wstat_two_scalar # ui.set_par("pl1.gamma", 1.7) ui.set_par("pl1.ampl", 1.6e-4) ui.set_par("c1.c0", 45)
def test_bug38(self): ui.load_pha('3c273', self.pha3c273) ui.notice_id('3c273', 0.3, 2) ui.group_counts('3c273', 30) ui.group_counts('3c273', 15)
def test_more_ui_bug38(make_data_path): ui.load_pha('3c273', make_data_path('3c273.pi')) ui.notice_id('3c273', 0.3, 2) ui.group_counts('3c273', 30) ui.group_counts('3c273', 15)
def test_bug38(self): ui.load_pha("3c273", self.pha3c273) ui.notice_id("3c273", 0.3, 2) ui.group_counts("3c273", 30) ui.group_counts("3c273", 15)
def test_load_pha2_compare_meg_order1(make_data_path): """Do we read in the MEG +/-1 orders?""" # The MEG -1 order is dataset 9 # The MEG +1 order is dataset 10 # pha2file = make_data_path('3c120_pha2') meg_p1file = make_data_path('3c120_meg_1.pha') meg_m1file = make_data_path('3c120_meg_-1.pha') ui.load_pha('meg_p1', meg_p1file) ui.load_pha('meg_m1', meg_m1file) orig_ids = set(ui.list_data_ids()) assert 'meg_p1' in orig_ids assert 'meg_m1' in orig_ids ui.load_pha(pha2file) for n, lbl in zip([9, 10], ["-1", "1"]): h = '3c120_meg_{}'.format(lbl) ui.load_arf(n, make_data_path(h + '.arf')) ui.load_rmf(n, make_data_path(h + '.rmf')) # check that loading the pha2 file doesn't overwrite existing # data new_ids = set(ui.list_data_ids()) for i in range(1, 13): orig_ids.add(i) assert orig_ids == new_ids # Check that the same model gives the same statistic # value; this should check that the data and response are # read in, that grouping and filtering work, and that # model evaluation is the same, without having to # check these steps individually. # # The model is not meant to be physically meaningful, # just one that reasonably represents the data and # can be evaluated without requiring XSPEC. # pmdl = ui.create_model_component('powlaw1d', 'pmdl') pmdl.gamma = 0.318 pmdl.ampl = 2.52e-3 ncts = 20 for i in [9, 10, "meg_m1", "meg_p1"]: ui.set_analysis(i, 'wave') ui.group_counts(i, ncts) ui.notice_id(i, 2, 12) ui.set_source(i, pmdl) ui.set_stat('chi2datavar') s9 = ui.calc_stat(9) s10 = ui.calc_stat(10) sm1 = ui.calc_stat('meg_m1') sp1 = ui.calc_stat('meg_p1') # Since these should be the same, we use an equality test # rather than approximation. At least until it becomes # a problem. # assert s9 == sm1 assert s10 == sp1 # The values were calculated using CIAO 4.9, Linux64, with # Python 3.5. # assert s9 == pytest.approx(1005.4378559390879) assert s10 == pytest.approx(1119.980439489647)
def test_plot_pvalue(make_data_path, clean_astro_ui, hide_logging): """Check plot_pvalue with PHA data.""" fname = make_data_path("qso.pi") ui.load_pha(fname) ui.set_stat('cstat') ui.set_method("neldermead") ui.group_counts(10) ui.notice(0.3, 8) ui.set_model("xsphabs.abs1*(xspowerlaw.p1 +gauss1d.g1)") # move the fit close to the best fit to save a small amount # of time. abs1.nh = 0.05 p1.phoindex = 1.28 p1.norm = 2e-4 g1.ampl = 1.8e-5 g1.pos = 3. ui.freeze(g1.pos) g1.fwhm = 0.1 ui.freeze(g1.fwhm) # Could we reduce the number of bins to save evaluation time? # We do want a non-default num value when checking the shapes # of the output attributes. # ui.fit() ui.plot_pvalue(p1, p1 + g1, num=100, bins=20) tmp = ui.get_pvalue_results() assert tmp.null == pytest.approx(210.34566845619273) assert tmp.alt == pytest.approx(207.66618095925094) assert tmp.lr == pytest.approx(2.679487496941789) # Have we returned the correct info? # # Is it worth checking the stored data (aka how randomised is this # output)? # assert tmp.samples.shape == (100, 2) assert tmp.stats.shape == (100, 2) assert tmp.ratios.shape == (100, ) # Check the plot # tmp = ui.get_pvalue_plot() assert tmp.lr == pytest.approx(2.679487496941789) assert tmp.xlabel == 'Likelihood Ratio' assert tmp.ylabel == 'Frequency' assert tmp.title == 'Likelihood Ratio Distribution' # It would be nice to check the values here # assert tmp.ratios.shape == (100, ) assert tmp.xlo.shape == (21, ) assert tmp.xhi.shape == (21, ) assert tmp.y.shape == (21, )