def setup_two(make_data_path): from sherpa.astro.io import read_pha, read_arf, read_rmf from sherpa.astro import xspec abs1 = xspec.XSphabs('abs1') p1 = PowLaw1D('p1') model = abs1 * p1 * 1e-4 pi2278 = make_data_path("pi2278.fits") pi2286 = make_data_path("pi2286.fits") data_pi2278 = read_pha(pi2278) data_pi2286 = read_pha(pi2286) data_pi2278.set_rmf(read_rmf(make_data_path('rmf2278.fits'))) data_pi2278.set_arf(read_arf(make_data_path('arf2278.fits'))) data_pi2286.set_rmf(read_rmf(make_data_path('rmf2286.fits'))) data_pi2286.set_arf(read_arf(make_data_path('arf2286.fits'))) rsp_pi2278 = Response1D(data_pi2278) rsp_pi2286 = Response1D(data_pi2286) return { 'data_pi2278': data_pi2278, 'data_pi2286': data_pi2286, 'model_pi2278': rsp_pi2278(model), 'model_pi2286': rsp_pi2286(model) }
def setup(make_data_path): from sherpa.astro.io import read_pha from sherpa.astro.xspec import XSwabs, XSpowerlaw old_level = logger.getEffectiveLevel() logger.setLevel(logging.CRITICAL) pha = make_data_path("refake_0934_1_21_1e4.fak") simarf = make_data_path("aref_sample.fits") pcaarf = make_data_path("aref_Cedge.fits") data = read_pha(pha) data.ignore(None, 0.3) data.ignore(7.0, None) rsp = Response1D(data) abs1 = XSwabs('abs1') p1 = XSpowerlaw('p1') model = rsp(abs1 * p1) abs1.nh = 0.092886 p1.phoindex = 0.994544 p1.norm = 9.26369 fit = Fit(data, model, CStat(), NelderMead(), Covariance()) yield {'simarf': simarf, 'pcaarf': pcaarf, 'niter': 10, 'fit': fit} # Reset the logger logger.setLevel(old_level)
def setup_bkg(make_data_path): from sherpa.astro.io import read_pha, read_arf, read_rmf from sherpa.astro import xspec infile = make_data_path("9774_bg.pi") bkg = read_pha(infile) bkg.exposure = 1 arf = read_arf(make_data_path('9774.arf')) rmf = read_rmf(make_data_path('9774.rmf')) bkg.set_arf(arf) bkg.set_rmf(rmf) bkg.set_analysis('energy') bkg.notice(0.5, 7.0) # We stay with a linear scale for the absorption model # here as the values don't seem to go below 0.1. # abs1 = xspec.XSwabs('abs1') p1 = PowLaw1D('p1') model = abs1 * p1 p1.ampl = 1e-4 rsp = Response1D(bkg) return {'bkg': bkg, 'model': rsp(model)}
def setUp(self): try: from sherpa.astro.io import read_pha from sherpa.astro.xspec import XSwabs, XSpowerlaw except: return # self.startdir = os.getcwd() self.old_level = logger.getEffectiveLevel() logger.setLevel(logging.CRITICAL) pha = self.make_path("refake_0934_1_21_1e4.fak") # rmf = self.make_path("ccdid7_default.rmf") # arf = self.make_path("quiet_0934.arf") self.simarf = self.make_path("aref_sample.fits") self.pcaarf = self.make_path("aref_Cedge.fits") data = read_pha(pha) data.ignore(None, 0.3) data.ignore(7.0, None) rsp = Response1D(data) self.abs1 = XSwabs('abs1') self.p1 = XSpowerlaw('p1') model = rsp(self.abs1 * self.p1) self.fit = Fit(data, model, CStat(), NelderMead(), Covariance())
def test_has_pha_response(): """Check the examples from the docstring""" exposure = 200.1 rdata = create_non_delta_rmf() specresp = create_non_delta_specresp() adata = create_arf(rdata.energ_lo, rdata.energ_hi, specresp, exposure=exposure) nchans = rdata.e_min.size channels = np.arange(1, nchans + 1, dtype=np.int16) counts = np.ones(nchans, dtype=np.int16) pha = DataPHA('test-pha', channel=channels, counts=counts, exposure=exposure) pha.set_arf(adata) pha.set_rmf(rdata) rsp = Response1D(pha) m1 = Gauss1D() m2 = PowLaw1D() assert not has_pha_response(m1) assert has_pha_response(rsp(m1)) assert not has_pha_response(m1 + m2) assert has_pha_response(rsp(m1 + m2)) assert has_pha_response(m1 + rsp(m2)) # reflexivity check assert has_pha_response(rsp(m1) + m2) assert has_pha_response(rsp(m1) + rsp(m2))
def test_rsp1d_empty(): # As there's currently no explicit check for the input arg # being set, the result of sending None in should be an # Attribute Error. # with pytest.raises(AttributeError): Response1D(None)
def fit(self): """Fit spectrum""" from sherpa.fit import Fit from sherpa.models import ArithmeticModel, SimulFitModel from sherpa.astro.instrument import Response1D from sherpa.data import DataSimulFit # Translate model to sherpa model if necessary if isinstance(self.model, models.SpectralModel): model = self.model.to_sherpa() else: model = self.model if not isinstance(model, ArithmeticModel): raise ValueError('Model not understood: {}'.format(model)) # Make model amplitude O(1e0) val = model.ampl.val * self.FLUX_FACTOR ** (-1) model.ampl = val if self.fit_range is not None: log.info('Restricting fit range to {}'.format(self.fit_range)) fitmin = self.fit_range[0].to('keV').value fitmax = self.fit_range[1].to('keV').value # Loop over observations pha = list() folded_model = list() nobs = len(self.obs_list) for ii in range(nobs): temp = self.obs_list[ii].to_sherpa() if self.fit_range is not None: temp.notice(fitmin, fitmax) if temp.get_background() is not None: temp.get_background().notice(fitmin, fitmax) temp.ignore_bad() if temp.get_background() is not None: temp.get_background().ignore_bad() pha.append(temp) # Forward folding resp = Response1D(pha[ii]) folded_model.append(resp(model) * self.FLUX_FACTOR) data = DataSimulFit('simul fit data', pha) fitmodel = SimulFitModel('simul fit model', folded_model) log.debug(fitmodel) fit = Fit(data, fitmodel, self.statistic) fitresult = fit.fit() log.debug(fitresult) # The model instance passed to the Fit now holds the best fit values covar = fit.est_errors() log.debug(covar) for ii in range(nobs): efilter = pha[ii].get_filter() shmodel = fitmodel.parts[ii] self.result[ii].fit = _sherpa_to_fitresult(shmodel, covar, efilter, fitresult)
def test_rsp_no_arf_matrix_call(analysis, phaexp): """Check out Response1D with matrix but no ARF analysis is the analysis setting arfexp determines whether the arf has an exposure time phaexp determines whether the PHA has an exposure time """ if phaexp: pha_exposure = 220.9 else: pha_exposure = None if phaexp: exposure = pha_exposure mdl_label = '({} * flat)'.format(exposure) else: exposure = 1.0 mdl_label = 'flat' rdata = create_non_delta_rmf() constant = 2.3 mdl = Const1D('flat') mdl.c0 = constant # Turn off integration on this model, so that it is not integrated # across the bin width. # mdl.integrate = False nchans = rdata.e_min.size channels = np.arange(1, nchans + 1, dtype=np.int16) counts = np.ones(nchans, dtype=np.int16) pha = DataPHA('test-pha', channel=channels, counts=counts, exposure=pha_exposure) pha.set_rmf(rdata) rsp = Response1D(pha) wrapped = rsp(mdl) assert isinstance(wrapped, ArithmeticModel) expname = 'apply_rmf({})'.format(mdl_label) assert wrapped.name == expname modvals = exposure * constant * np.ones(rdata.energ_lo.size) matrix = get_non_delta_matrix() expected = np.matmul(modvals, matrix) pha.set_analysis(analysis) out = wrapped([4, 5]) assert_allclose(out, expected)
def test_rsp1d_matrix_pha_zero_energy_bin(): """What happens when the first bin starts at 0, with replacement. Unlike test_rsp1d_delta_pha_zero_energy_bin this directly calls Response1D to create the model. """ ethresh = 1.0e-5 rdata = create_non_delta_rmf() # hack the first bin to have 0 energy rdata.energ_lo[0] = 0.0 # PHA and ARF have different exposure ties exposure_arf = 0.1 exposure_pha = 2.4 specresp = create_non_delta_specresp() with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") adata = create_arf(rdata.energ_lo, rdata.energ_hi, specresp, exposure=exposure_arf, ethresh=ethresh) validate_zero_replacement(ws, 'ARF', 'user-arf', ethresh) nchans = rdata.e_min.size channels = np.arange(1, nchans + 1, dtype=np.int16) counts = np.ones(nchans, dtype=np.int16) pha = DataPHA('test-pha', channel=channels, counts=counts, exposure=exposure_pha) pha.set_rmf(rdata) pha.set_arf(adata) pha.set_analysis('energy') mdl = MyPowLaw1D() rsp = Response1D(pha) wrapped = rsp(mdl) # Evaluate the statistic / model. The value was calculated using # commit a65fb94004664eab219cc09652172ffe1dad80a6 on a linux # system (Ubuntu 17.04). # f = Fit(pha, wrapped) ans = f.calc_stat() assert ans == pytest.approx(37971.8716151947)
def test_rsp1d_pha_empty(): # Create a PHA with no ARF or RMF channels = np.arange(1, 5, dtype=np.int16) counts = np.asarray([10, 5, 12, 7], dtype=np.int16) pha = DataPHA('test-pha', channel=channels, counts=counts, exposure=12.2) with pytest.raises(DataErr) as exc: Response1D(pha) emsg = 'No instrument response found for dataset test-pha' assert str(exc.value) == emsg
def test_rsp_norsp_error(): """Check that an error is raised when creating a wrapped model """ # rdata is only used to define the grids rdata = create_non_delta_rmf() nchans = rdata.e_min.size channels = np.arange(1, nchans + 1, dtype=np.int16) counts = np.ones(nchans, dtype=np.int16) pha = DataPHA('test-pha', channel=channels, counts=counts) with pytest.raises(DataErr) as exc: Response1D(pha) emsg = "No instrument response found for dataset test-pha" assert str(exc.value) == emsg
def setup(make_data_path): from sherpa.astro.io import read_pha from sherpa.astro import xspec infile = make_data_path("9774.pi") data = read_pha(infile) data.notice(0.3, 7.0) # Change the exposure time to make the fitted amplitude # > 1 # data.exposure = 1 # Use the wabs model because it is unlikely to change # (as scientifically it is no-longer useful). The problem # with using something like the phabs model is that # changes to that model in XSPEC could change the results # here. # # We fit the log of the nH since this makes the numbers # a bit closer to O(1), and so checking should be easier. # abs1 = xspec.XSwabs('abs1') p1 = PowLaw1D('p1') factor = Const1D('factor') factor.integrate = False model = abs1 * p1 + 0 * factor factor.c0 = 0 abs1.nh = 10**factor.c0 # Ensure the nh limits are honoured by factor (upper limit only). # If you don't do this then the fit can fail because a value # outside the abs1.nh but within factor.c0 can be picked. # factor.c0.max = numpy.log10(abs1.nh.max) rsp = Response1D(data) return {'data': data, 'model': rsp(model)}
def test_rsp_normf_call(arfexp, phaexp): """Check out Response1D with no RMF. analysis is the analysis setting arfexp determines whether the arf has an exposure time phaexp determines whether the PHA has an exposure time This only uses the channel setting """ # Chose different exposure times for ARF and PHA to see which # gets picked up. # if arfexp: arf_exposure = 200.1 else: arf_exposure = None if phaexp: pha_exposure = 220.9 else: pha_exposure = None if phaexp: exposure = pha_exposure mdl_label = '({} * flat)'.format(exposure) elif arfexp: exposure = arf_exposure mdl_label = '({} * flat)'.format(exposure) else: exposure = 1.0 mdl_label = 'flat' # rdata is only used to define the grids rdata = create_non_delta_rmf() specresp = create_non_delta_specresp() adata = create_arf(rdata.energ_lo, rdata.energ_hi, specresp, exposure=arf_exposure) constant = 2.3 mdl = Const1D('flat') mdl.c0 = constant # Turn off integration on this model, so that it is not integrated # across the bin width. # mdl.integrate = False nchans = rdata.e_min.size channels = np.arange(1, nchans + 1, dtype=np.int16) counts = np.ones(nchans, dtype=np.int16) pha = DataPHA('test-pha', channel=channels, counts=counts, exposure=pha_exposure) pha.set_arf(adata) rsp = Response1D(pha) wrapped = rsp(mdl) assert isinstance(wrapped, ArithmeticModel) expname = 'apply_arf({})'.format(mdl_label) assert wrapped.name == expname expected = exposure * constant * specresp pha.set_analysis('channel') out = wrapped([4, 5]) assert_allclose(out, expected)
def fit(self): """Fit spectrum""" from sherpa.fit import Fit from sherpa.models import ArithmeticModel, SimulFitModel from sherpa.astro.instrument import Response1D from sherpa.data import DataSimulFit # Reset results self._result = list() # Translate model to sherpa model if necessary if isinstance(self.model, models.SpectralModel): model = self.model.to_sherpa() else: model = self.model if not isinstance(model, ArithmeticModel): raise ValueError('Model not understood: {}'.format(model)) # Make model amplitude O(1e0) val = model.ampl.val * self.FLUX_FACTOR**(-1) model.ampl = val if self.fit_range is not None: log.info('Restricting fit range to {}'.format(self.fit_range)) fitmin = self.fit_range[0].to('keV').value fitmax = self.fit_range[1].to('keV').value # Loop over observations pha = list() folded_model = list() nobs = len(self.obs_list) for ii in range(nobs): temp = self.obs_list[ii].to_sherpa() if self.fit_range is not None: temp.notice(fitmin, fitmax) if temp.get_background() is not None: temp.get_background().notice(fitmin, fitmax) temp.ignore_bad() if temp.get_background() is not None: temp.get_background().ignore_bad() pha.append(temp) log.debug('Noticed channels obs {}: {}'.format( ii, temp.get_noticed_channels())) # Forward folding resp = Response1D(pha[ii]) folded_model.append(resp(model) * self.FLUX_FACTOR) if (len(pha) == 1 and len(pha[0].get_noticed_channels()) == 1): raise ValueError('You are trying to fit one observation in only ' 'one bin, error estimation will fail') data = DataSimulFit('simul fit data', pha) log.debug(data) fitmodel = SimulFitModel('simul fit model', folded_model) log.debug(fitmodel) fit = Fit(data, fitmodel, self.statistic) fitresult = fit.fit() log.debug(fitresult) # The model instance passed to the Fit now holds the best fit values covar = fit.est_errors() log.debug(covar) for ii in range(nobs): efilter = pha[ii].get_filter() # Skip observations not participating in the fit if efilter != '': shmodel = fitmodel.parts[ii] result = _sherpa_to_fitresult(shmodel, covar, efilter, fitresult) result.obs = self.obs_list[ii] else: result = None self._result.append(result) valid_result = np.nonzero(self.result)[0][0] global_result = copy.deepcopy(self.result[valid_result]) global_result.npred = None global_result.obs = None all_fitranges = [_.fit_range for _ in self._result if _ is not None] fit_range_min = min([_[0] for _ in all_fitranges]) fit_range_max = max([_[1] for _ in all_fitranges]) global_result.fit_range = u.Quantity((fit_range_min, fit_range_max)) self._global_result = global_result
report("mdl") egrid = np.arange(0.1, 10, 0.01) elo, ehi = egrid[:-1], egrid[1:] emid = (elo + ehi) / 2 plt.clf() plt.plot(emid, mdl(elo, ehi), label='Absorbed') plt.plot(emid, pl(elo, ehi), ':', label='Unabsorbed') plt.xscale('log') plt.ylim(0, 0.01) plt.legend() savefig('pha_model_energy.png') from sherpa.astro.instrument import Response1D rsp = Response1D(pha) full = rsp(mdl) report("full") dump("elo.size") dump("full(elo, ehi).size") dump("full([1, 2, 3]).size") dump("np.all(full(elo, ehi) == full([1, 2, 3]))") plt.clf() plt.plot(pha.channel, full(pha.channel)) plt.xlabel('Channel') plt.ylabel('Counts') savefig('pha_fullmodel_manual.png')
def test_pragbayes_pcaarf_limits(sampler, setup, caplog, reset_seed): """Try and trigger limit issues. """ from sherpa.astro.xspec import XSAdditiveModel, XSMultiplicativeModel, \ XSwabs, XSpowerlaw # Set the seed for the RNG. The seed was adjusted to try and make # sure the coverage was "good" (i.e. hits parts of # sherpa/astro/sim/*bayes.py) whilst still passing the test and # reducing the runtime. This is not a guarantee that this is the # "fastest" seed, just that it's one of the better ones I've seen. # np.random.seed(0x723c) class HackAbs(XSwabs): """Restrict hard limits""" def __init__(self, name='wabs'): self.nH = Parameter(name, 'nH', 0.1, 0, 1, 0, 1, '10^22 atoms / cm^2') XSMultiplicativeModel.__init__(self, name, (self.nH, )) class HackPowerLaw(XSpowerlaw): """Restrict hard limits""" def __init__(self, name='powerlaw'): self.PhoIndex = Parameter(name, 'PhoIndex', 1., 0.95, 1.05, 0.95, 1.05) self.norm = Parameter(name, 'norm', 9.2, 8.8, 9.7, 8.8, 9.7) XSAdditiveModel.__init__(self, name, (self.PhoIndex, self.norm)) fit = setup['fit'] mcmc = sim.MCMC() mcmc.set_sampler(sampler) mcmc.set_sampler_opt("simarf", setup['pcaarf']) mcmc.set_sampler_opt("p_M", 0.5) mcmc.set_sampler_opt("nsubiter", 5) covar_results = fit.est_errors() cov = covar_results.extra_output # Restrict the parameter values to try and trigger some # invalid proposal steps. It's not obvious how the soft, # hard, and prior function values all interact. # myabs = HackAbs() mypl = HackPowerLaw() pvals = np.asarray(covar_results.parvals) pmins = np.asarray(covar_results.parmins) pmaxs = np.asarray(covar_results.parmaxes) # Make sure we add the response rsp = Response1D(fit.data) fit.model = rsp(myabs * mypl) fit.model.thawedpars = pvals fit.model.thawedparmins = pvals + 2 * pmins # pmins are < 0 fit.model.thawedparmaxes = pvals + 2 * pmaxs # weight values away from the best-fit (does this actually # help?) # for par in fit.model.pars: mcmc.set_prior(par, inverse2) niter = setup['niter'] with caplog.at_level(logging.INFO, logger='sherpa'): # Do nothing with the warning at the moment, which could be # a RuntimeWarning about the covariance matrix not being # positive-semidefinite. This is just needed to make sure # we don't trigger the default warning check. # with pytest.warns(Warning): stats, accept, params = mcmc.get_draws(fit, cov, niter=niter) # This is a lower bound, in case there's any messages from # the sampling (either before or after displaying the # 'Using Priors' message). # nrecords = len(caplog.record_tuples) assert nrecords > 3 i = 0 while caplog.record_tuples[i][2] != 'Using Priors:': i += 1 assert i < nrecords assert i < (nrecords - 3) assert caplog.record_tuples[i + 1][2].startswith( 'wabs.nH: <function inverse2 at ') assert caplog.record_tuples[i + 2][2].startswith( 'powerlaw.PhoIndex: <function inverse2 at ') assert caplog.record_tuples[i + 3][2].startswith( 'powerlaw.norm: <function inverse2 at ') # It is not guaranteed what limits/checks we hit # have_hard_limit = False have_reject = False for loc, lvl, msg in caplog.record_tuples[i + 4:]: if msg.startswith('Draw rejected: parameter boundary exception'): have_reject = True assert lvl == logging.INFO elif msg.startswith('hard '): have_hard_limit = True assert lvl == logging.WARNING assert have_hard_limit assert have_reject