def test_calc_flux_pha_unabsorbed(make_data_path, clean_astro_ui): """Can we calculate an unabsorbed flux?""" # The idea is that with a model expression of # const1d.scale * powlaw1d.pl # when scale is not 1 (and not integrated) then we can # just look to see if the "absorbed" flux is scale * the # "unabsorbed" flux. # infile = make_data_path('3c273.pi') ui.load_pha(infile) scale = ui.create_model_component('const1d', 'scale') pl = ui.create_model_component('powlaw1d', 'pl') scale.c0 = 0.8 scale.integrate = False pl.gamma = 1.5 pl.ampl = 1e-4 ui.set_source(scale * pl) pflux_abs = ui.calc_photon_flux(0.5, 7) pflux_unabs = ui.calc_photon_flux(0.5, 7, model=pl) eflux_abs = ui.calc_energy_flux(0.5, 7) eflux_unabs = ui.calc_energy_flux(0.5, 7, model=pl) pflux_scale = pflux_abs / pflux_unabs eflux_scale = eflux_abs / eflux_unabs assert pflux_scale == pytest.approx(0.8) assert eflux_scale == pytest.approx(0.8)
def example_model(): """Create an example model.""" ui.create_model_component('const1d', 'cpt') cpt = ui.get_model_component('cpt') cpt.c0 = 1.02e2 return cpt
def example_bkg_model(): """Create an example background model.""" ui.create_model_component('powlaw1d', 'bcpt') bcpt = ui.get_model_component('bcpt') bcpt.gamma = 0.0 # use a flat model to make it easy to evaluate bcpt.ampl = 1e-1 return bcpt
def test_fake_pha_background_model(clean_astro_ui, reset_seed): """Check we can add a background component. See also test_fake_pha_basic. For simplicity we use perfect responses. """ np.random.seed(27347) id = 'qwerty' channels = np.arange(1, 4, dtype=np.int16) counts = np.ones(3, dtype=np.int16) bcounts = 100 * counts ui.load_arrays(id, channels, counts, ui.DataPHA) ui.set_exposure(id, 100) ui.set_backscal(id, 0.1) bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4) ebins = np.asarray([1.1, 1.2, 1.4, 1.6]) elo = ebins[:-1] ehi = ebins[1:] arf = ui.create_arf(elo, ehi) rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi) mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 0 bkgmdl = ui.create_model_component('const1d', 'mdl') bkgmdl.c0 = 2 ui.set_source(id, mdl) ui.set_bkg(id, bkg) ui.set_bkg_source(id, bkgmdl) ui.set_arf(id, arf, bkg_id=1) ui.set_rmf(id, rmf, bkg_id=1) ui.fake_pha(id, arf, rmf, 1000.0, bkg='model') faked = ui.get_data(id) assert faked.exposure == pytest.approx(1000.0) assert (faked.channel == channels).all() # check we've faked counts (the scaling is such that it is # very improbable that this condition will fail) assert (faked.counts > counts).all() # For reference the predicted source signal is # [200, 400, 400] # and the background signal is # [125, 125, 125] # so, even with randomly drawn values, the following # checks should be robust. # predicted_by_source = 1000 * mdl(elo, ehi) predicted_by_bkg = (1000 / 200) * (0.1 / 0.4) * bcounts assert (faked.counts > predicted_by_source).all() assert (faked.counts > predicted_by_bkg).all()
def test_calc_flux_pha_analysis(elo, ehi, setting, lo, hi, make_data_path, clean_astro_ui): """Do calc_photon/energy_flux return the expected results: fluxes + analysis setting Basic test for different analysis settings: the same range (modulo precision of conversion) gives the same results. """ infile = make_data_path('3c273.pi') pl = ui.create_model_component('powlaw1d', 'pl') ui.load_pha(infile) ui.set_source(pl) pflux = ui.calc_photon_flux(elo, ehi) eflux = ui.calc_energy_flux(elo, ehi) ui.set_analysis(setting) pflux2 = ui.calc_photon_flux(lo, hi) eflux2 = ui.calc_energy_flux(lo, hi) # use approx here since the bin edges are not guaranteed # to line up, and use a large tolerance. # assert pflux2 == pytest.approx(pflux, rel=1e-2) eflux = np.log10(eflux) eflux2 = np.log10(eflux2) assert eflux2 == pytest.approx(eflux, rel=1e-3)
def _test_can_evaluate_thcompc(): """Does this redistribute some emission? It does not test the result is actualy meaningful, but does check it's done something """ ui.clean() ui.dataspace1d(0.1, 10, 0.01, id='unconv') ui.dataspace1d(0.1, 10, 0.01, id='conv') mconv = ui.create_model_component('xsthcompc', 'conv') ui.set_source('conv', mconv(ui.xsgaussian.m1)) m1 = ui.get_model_component('m1') ui.set_source('unconv', m1) m1.lineE = 5.0 m1.Sigma = 1.0 yunconv = ui.get_model_plot('unconv').y.copy() yconv = ui.get_model_plot('conv').y.copy() assert (yunconv > 0).any() assert (yconv > 0).any() # not guaranteed the peak will be reduced (depends on what # the convolution is doing), and I would hope that flux # is at best conserved (ie not created), and that we don't # have to worry about numerical artifacts here. # assert yunconv.max() > yconv.max() assert yunconv.sum() >= yconv.sum()
def setup_imgdata_model(): """Use a model for the PSF""" # Fake an image # x1, x0 = np.mgrid[0:8, 0:10] ymod = 10 + 100 / ((x0 - 5.5)**2 + (x1 - 3.5)**2) x0 = x0.flatten() x1 = x1.flatten() y = ymod.flatten() y = y.astype(int) # convert to integers ui.load_arrays(1, x0, x1, y, (8, 10), ui.DataIMG) pmodel = ui.create_model_component('gauss2d', 'pmodel') pmodel.xpos = 0 pmodel.ypos = 0 pmodel.fwhm = 3 ui.load_psf('psf', pmodel) psf.size = [10, 10] # not sure if this is useful but leave in psf.center = [5, 4] ui.set_psf(psf) setup_imgdata_source()
def test_calc_flux_pha_invalid_range(id, func, clean_astro_ui): """Ensure an error is raised if lo > hi""" x = np.arange(3, 6) y = np.ones(x.size - 1) ui.load_arrays(id, x[:-1], x[1:], y, ui.Data1DInt) mdl = ui.create_model_component('const1d', 'm') if id is None: ui.set_source(mdl) else: ui.set_source(id, mdl) # Note: really the error message should not include energy since in # this case (Data1DInt) there's no energy, and if this were # a DataPHA case the message says energy even if analysis=wave # or channel. # emsg = 'the energy range is not consistent, 12 !< 5' with pytest.raises(IOErr, match=emsg): if id is None: func(12, 5) else: func(12, 5, id=id)
def test_list_pileup_ids_multi(clean_astro_ui): jdp = ui.create_model_component('jdpileup', "jdp") for id in [1, "2"]: ui.load_arrays(id, [1, 2, 3], [1, 1, 1], ui.DataPHA) ui.set_pileup_model(id, jdp) ans = ui.list_pileup_model_ids() assert ans == [1, "2"]
def test_load_multi_arfsrmfs(make_data_path, clean_astro_ui): """Added in #728 to ensure cache parameter is sent along by MultiResponseSumModel (fix #717). This has since been simplified to switch from xsapec to powlaw1d as it drops the need for XSPEC and is a simpler model, so is less affected by changes in the model code. A fit of the Sherpa powerlaw-model to 3c273.pi with a single response in CIAO 4.11 (background subtracted, 0.5-7 keV) returns gamma = 1.9298, ampl = 1.73862e-4 so doubling the response should halve the amplitude but leave the gamma value the same when using two responses, as below. This is with chi2datavar. """ pha_pi = make_data_path("3c273.pi") ui.load_pha(1, pha_pi) ui.load_pha(2, pha_pi) arf = make_data_path("3c273.arf") rmf = make_data_path("3c273.rmf") ui.load_multi_arfs(1, [arf, arf], [1, 2]) ui.load_multi_arfs(2, [arf, arf], [1, 2]) ui.load_multi_rmfs(1, [rmf, rmf], [1, 2]) ui.load_multi_rmfs(2, [rmf, rmf], [1, 2]) ui.notice(0.5, 7) ui.subtract(1) ui.subtract(2) src = ui.create_model_component('powlaw1d', 'src') ui.set_model(1, src) ui.set_model(2, src) # ensure the test is repeatable by running with a known # statistic and method # ui.set_method('levmar') ui.set_stat('chi2datavar') # Really what we care about for fixing #717 is that # fit does not error out, but it's useful to know that # the fit has changed the parameter values (which were # both 1 before the fit). # ui.fit() fr = ui.get_fit_results() assert fr.succeeded assert fr.datasets == (1, 2) assert src.gamma.val == pytest.approx(1.9298, rel=1.0e-4) assert src.ampl.val == pytest.approx(1.73862e-4 / 2, rel=1.0e-4)
def set_bkg_model(self, bkgmodel): """ Create a source model for each dataset. A dataset is associated with a specific extraction annulus. :param bkgmodel: string expression defining background model :rtype: None """ self.bkgmodel = bkgmodel bkg_norm = {} for obsid in self.obsids: bkg_norm_name = 'bkg_norm_%d' % obsid print 'Creating model component xsconstant.%s' % bkg_norm_name SherpaUI.create_model_component('xsconstant', bkg_norm_name) bkg_norm[obsid] = eval(bkg_norm_name) # Uggh, don't know proper model accessor for dataset in self.datasets: print 'Setting bkg model for dataset %d to bkg_norm_%d' % (dataset['id'], dataset['obsid']) SherpaUI.set_bkg_model(dataset['id'], bkg_norm[dataset['obsid']] * bkgmodel)
def check_eval_multi_rmf(): """Test that the data is handled correctly For use by test_eval_multi_rmf. """ mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 4 ui.set_source(mdl) # The analysis setting appears to depend on how the # data is set up. This is just to check it is energy. # d = ui.get_data() assert d.units == 'energy' # Easiest way to evaluate the model is to grab the # data from plot_source / plot_model # # The source doesn't care about how the instrument is set # up. # splot = ui.get_source_plot() assert (splot.y == 4).all() # The model plot does care about the instrument. There should # be two equal responses, offset from each other. As this is # done for each energy they should "cancel out" (apart from # doubling the signal) apart from the start/end bins # # I haven't been able to convince myself I understand the handling # at the start/end of the RMF, so I am using this just as a # regression test. # mplot = ui.get_model_plot() assert mplot.y[11:968] == pytest.approx(8) # handle "overflow" bins assert mplot.y[0] == pytest.approx(84.6) assert mplot.y[-1] == pytest.approx(43.0) # handle bins below and above the offsets range_is_low = np.arange(1, 11) is_low = [4.2, 4., 4., 4., 4., 4., 4., 4., 5.2, 7.6] assert mplot.y[range_is_low] == pytest.approx(is_low) is_high = [ 6.8, 4.4, 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.2 ] assert mplot.y[968:988] == pytest.approx(is_high)
def load_components(filename): with open(filename) as f: print('Loading ' + filename) numcomponents = f.readline().split(' ')[-1] for i in range(int(numcomponents)): comptype, compname, numpars = f.readline().split(' ') comp = shp.create_model_component(comptype, compname) for j in range(int(numpars)): parname, parmin, parval, parmax = f.readline().split(' ') for par in comp.pars: if parname == par.name: par.min, par.val, par.max = parmin, parval, parmax print(comp)
def test_create_thcompc(): """Can we create a thcompc instance?""" ui.clean() # mdl = ui.xsthcompc.conv mdl = ui.create_model_component('xsthcompc', 'conv') assert isinstance(mdl, XSConvolutionKernel) assert mdl.type == 'xsthcompc' assert mdl.name == 'xsthcompc.conv' assert len(mdl.pars) == 3 assert mdl.pars[0].name == 'gamma_tau' assert mdl.pars[1].units == 'keV' assert mdl.pars[1].val == pytest.approx(50)
def test_fake_pha_basic_arfrmf_set_in_advance(clean_astro_ui, reset_seed): """Similar to test_fake_pha_basic but instead of passing in the RMF, we set it before. The result should be the same, so we don't have ot go through all the parameterization of that test. """ np.random.seed(20348) channels = np.arange(1, 4, dtype=np.int16) counts = np.ones(3, dtype=np.int16) ui.load_arrays('id123', channels, counts, ui.DataPHA) ui.set_exposure('id123', 100) ebins = np.asarray([1.1, 1.2, 1.4, 1.6]) elo = ebins[:-1] ehi = ebins[1:] arf = ui.create_arf(elo, ehi) rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi) ui.set_rmf('id123', rmf) ui.set_arf('id123', arf) mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 2 ui.set_source('id123', mdl) ui.fake_pha('id123', None, None, 1000.0) faked = ui.get_data('id123') assert faked.exposure == pytest.approx(1000.0) assert (faked.channel == channels).all() assert faked.name == 'faked' assert faked.background_ids == [] # check we've faked counts (the scaling is such that it is # very improbable that this condition will fail) assert (faked.counts > counts).all() # For reference the predicted source signal is # [200, 400, 400] # # What we'd like to say is that the predicted counts are # similar, but this is not easy to do. What we can try # is summing the counts (to average over the randomness) # and then a simple check # assert (faked.counts.sum() > 200) and (faked.counts.sum() < 3000) # This is more likely to fail by chance, but still very unlikely assert faked.counts[1] > faked.counts[0]
def check_eval_multi_arfrmf(): """Test that the data is handled correctly For use by test_eval_multi_arfrmf. """ mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 4 ui.set_source(mdl) # The analysis setting appears to depend on how the # data is set up. This is just to check it is energy. # d = ui.get_data() assert d.units == 'energy' # Easiest way to evaluate the model is to grab the # data from plot_source / plot_model # # The source doesn't care about how the instrument is set # up. # splot = ui.get_source_plot() assert (splot.y == 4).all() # Comparison to the "truth" is harder than the previous checks # so just hard-code it. # y = ui.get_model_plot().y assert y[0] == pytest.approx(93.06) assert y[1] == pytest.approx(4.62) assert y[2:479] == pytest.approx(4.4) assert y[479] == pytest.approx(3.2) assert y[480] == pytest.approx(0.8) assert y[481:498] == pytest.approx(0.4) assert y[498] == pytest.approx(1.24) assert y[499] == pytest.approx(2.92) assert y[500:570] == pytest.approx(3.2) assert y[570] == pytest.approx(3.08) assert y[571] == pytest.approx(2.84) assert y[572:589] == pytest.approx(2.8) assert y[589] == pytest.approx(2.92) assert y[590] == pytest.approx(3.16) assert y[591:968] == pytest.approx(3.2) assert y[968] == pytest.approx(3.08) assert y[969] == pytest.approx(2.84) assert y[970:987] == pytest.approx(2.8) assert y[987] == pytest.approx(2.94) assert y[988] == pytest.approx(30.1)
def test_fake_pha_no_data(id, clean_astro_ui, reset_seed): """What happens if there is no data loaded at the id? """ np.random.seed(21347) ebins = np.asarray([1.1, 1.2, 1.4, 1.6]) elo = ebins[:-1] ehi = ebins[1:] arf = ui.create_arf(elo, ehi) rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi) mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 2 ui.set_source(id, mdl) ui.fake_pha(id, arf, rmf, 1000.0) # We don't really check anything sensible with the counts. # It is unlikely the simulated counts will be <= 1. # # For reference the predicted source signal is # [200, 400, 400] # channels = np.arange(1, 4) counts = [1, 1, 1] faked = ui.get_data(id) assert faked.exposure == pytest.approx(1000.0) assert (faked.channel == channels).all() assert faked.name == 'faked' assert faked.get_arf().name == 'test-arf' assert faked.get_rmf().name == 'delta-rmf' assert faked.background_ids == [] # check we've faked counts (the scaling is such that it is # very improbable that this condition will fail) assert (faked.counts > counts).all() # What we'd like to say is that the predicted counts are # similar, but this is not easy to do. What we can try # is summing the counts (to average over the randomness) # and then a simple check # assert (faked.counts.sum() > 200) and (faked.counts.sum() < 3000) # This is more likely to fail by chance, but still very unlikely assert faked.counts[1] > faked.counts[0]
def _create_src_model_components(self): """ Create source model components for each shell corresponding to the source model expression. """ # Find the generic components in source model expression RE_model = re.compile(r'\b \w+ \b', re.VERBOSE) for match in RE_model.finditer(self.srcmodel): model_type = match.group() self.srcmodel_comps.append(dict(type=model_type, start=match.start(), end=match.end())) # For each shell create the corresponding model components so they can # be used later to create composite source models for each dataset for shell in range(self.nshell): for srcmodel_comp in self.srcmodel_comps: model_comp = {} model_comp['type'] = srcmodel_comp['type'] model_comp['name'] = '%s_%d' % (model_comp['type'], shell) model_comp['shell'] = shell SherpaUI.create_model_component(model_comp['type'], model_comp['name']) model_comp['object'] = eval(model_comp['name']) # Work-around in lieu of accessor self.model_comps.append(model_comp)
def test_create_zkerrbb(): """Can we create a zkerrbb instance?""" ui.clean() # mdl = ui.xszkerrbb.zb mdl = ui.create_model_component('xszkerrbb', 'zb') assert isinstance(mdl, XSAdditiveModel) assert mdl.type == 'xszkerrbb' assert mdl.name == 'xszkerrbb.zb' assert len(mdl.pars) == 10 assert mdl.pars[0].name == 'eta' assert mdl.pars[2].units == 'degree' assert mdl.pars[3].frozen assert mdl.pars[5].val == pytest.approx(0.01) assert mdl.pars[8].alwaysfrozen assert mdl.pars[9].name == 'norm'
def check_eval_multi_arf(): """Test that the data is handled correctly For use by test_eval_multi_arf. """ mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 4 ui.set_source(mdl) # The analysis setting appears to depend on how the # data is set up. This is just to check it is energy. # d = ui.get_data() assert d.units == 'energy' # Easiest way to evaluate the model is to grab the # data from plot_source / plot_model # # The source doesn't care about how the instrument is set # up. # splot = ui.get_source_plot() assert (splot.y == 4).all() # The model plot does care about the instrument # yarf1, yarf2 = expected_arf2() expected = (yarf1 + yarf2) * 4 mplot = ui.get_model_plot() assert mplot.y == pytest.approx(expected) # spot checks (just to validate the expected arf code) # # ymid represents the region where ARF1 is 0 # yfirst = 4 * 1.1 ylast = 4 * (0.1 + 0.7) ymid = 4 * 0.7 assert mplot.y[0] == pytest.approx(yfirst) assert mplot.y[-1] == pytest.approx(ylast) assert mplot.y[600] == pytest.approx(ymid)
def test_create_agnslim(): """Can we create an agnslim instance?""" ui.clean() # Why do I have to use create_model_component? Is it because # we have only registered xsagnslim into the global namespace # and not ui? # # mdl = ui.xsagnslim.foo # mdl = ui.create_model_component('xsagnslim', 'foo') assert isinstance(mdl, XSAdditiveModel) assert mdl.type == 'xsagnslim' assert mdl.name == 'xsagnslim.foo' assert len(mdl.pars) == 15 assert mdl.pars[0].name == 'mass' assert mdl.pars[1].units == 'Mpc' assert mdl.pars[3].frozen assert mdl.pars[4].val == pytest.approx(0.5) assert mdl.pars[14].name == 'norm'
def test_can_evaluate_additive_models(mname): """Does this create some emission? It does not test the result is actualy meaningful, and relies on the slightly-more-involved tests in test_xspeclmodels.py for the model evaluation. """ ui.clean() m1 = ui.create_model_component(mname, 'm1') # test out a combined model; not really needed but this is # closer to how people will be using it. # ui.dataspace1d(0.1, 10, 0.01) ui.set_source(ui.xsphabs.m2 * m1) # rely on test_xspeclmodels.py for a more-complete test of # the model calling y = ui.get_model_plot().y.copy() # Assume there is some emission assert (y > 0).any()
def test_calc_flux_density_pha(id, energy, make_data_path, clean_astro_ui): """Do calc_photon/energy_flux return the expected results: densities The answer should be the same when lo is set and hi is None or vice versa. The flux densities are going to be in units of <value>/cm^2/s/keV {value=photon, erg} Note: this tests the "edge" condition when lo=hi; this is not documented, but left in as a check (and perhaps it should be documented). """ infile = make_data_path('3c273.pi') # The 3c273 RMF was generated over the range 0.1 to 11 keV # (inclusive). By picking gamma = 1 the photon flux # density is ampl / e and the energy flux is scale * ampl. # However, this is the exact calculation, but the one done by # Sherpa involves calculating the model over a bin and then # dividing by that bin width, which is different enough to # the analytic formula that we use this approach here when # calculating the expected values. The bin width is 0.01 keV, # with bins starting at 0.1. # ampl = 1e-4 # flux densities: exact # pflux_exp = ampl / energy # eflux_exp = 1.602e-9 * ampl # Note that you can calculate an answer at the left edge of the grid, but # not at the right (this is either a < vs <= comparison, or numeric # issues with the maximum grid value). # # Note that the RMF emin is just above 0.1 keV, ie # 0.10000000149011612, which is why an energy of 0.1 gives # an answer of 0. Now, sao_fcmp(0.1, 0.10000000149011612, sherpa.utils.eps) # returns 0, so we could consider these two values equal, but # that would complicate the flux calculation so is (currently) not # done. # de = 0.01 if energy <= 0.1 or energy >= 11: pflux_exp = 0.0 eflux_exp = 0.0 else: # assuming a bin centered on the energy; the actual grid # is not this, but this should be close hwidth = de / 2 pflux_exp = ampl * (np.log(energy + hwidth) - np.log(energy - hwidth)) / de eflux_exp = 1.602e-9 * energy * pflux_exp pl = ui.create_model_component('powlaw1d', 'pl') pl.ampl = ampl pl.gamma = 1 if id is None: ui.load_pha(infile) ui.set_source(pl) else: ui.load_pha(id, infile) ui.set_source(id, pl) # Use a subset of the data range (to check that the calc routines # ignores them, ie uses the full 0.1 to 11 keV range. # ui.ignore(None, 0.5) ui.ignore(7, None) # Do not use named arguments, but assume positional arguments if id is None: pflux1 = ui.calc_photon_flux(energy) pflux2 = ui.calc_photon_flux(None, energy) pflux3 = ui.calc_photon_flux(energy, energy) eflux1 = ui.calc_energy_flux(energy) eflux2 = ui.calc_energy_flux(None, energy) eflux3 = ui.calc_energy_flux(energy, energy) else: pflux1 = ui.calc_photon_flux(energy, None, id) pflux2 = ui.calc_photon_flux(None, energy, id) pflux3 = ui.calc_photon_flux(energy, energy, id) eflux1 = ui.calc_energy_flux(energy, None, id) eflux2 = ui.calc_energy_flux(None, energy, id) eflux3 = ui.calc_energy_flux(energy, energy, id) eflux1 = np.log10(eflux1) eflux2 = np.log10(eflux2) eflux3 = np.log10(eflux3) # Use equality here since the numbers should be the same assert pflux1 == pflux2 assert pflux1 == pflux3 assert eflux1 == eflux2 assert eflux1 == eflux3 # Note the "large" tolerance here eflux_exp = np.log10(eflux_exp) assert pflux1 == pytest.approx(pflux_exp, rel=5e-2) assert eflux1 == pytest.approx(eflux_exp, rel=1e-3)
resid.data = sh.get_data_image().y - sh.get_model_image().y resid_smooth = resid.smooth(width=3) resid_smooth.plot(); # ### Iteratively find and fit additional sources # Instantiate additional Gaussian components, and use them to iteratively fit sources, repeating the steps performed above for component g0. (The residuals map is shown after each additional source included in the model.) This takes some time... # In[ ]: # initialize components with fixed, zero amplitude for i in range(1, 10): model = sh.create_model_component("gauss2d", "g" + str(i)) model.ampl = 0 sh.freeze(model) gs = [g0, g1, g2] sh.set_full_model(bkg + psf(g0 + g1 + g2) * expo) # In[ ]: get_ipython().run_cell_magic('time', '', 'for i in range(1, len(gs)):\n yp, xp = np.unravel_index(\n np.nanargmax(resid_smooth.data), resid_smooth.data.shape\n )\n ampl = resid_smooth.get_by_pix((xp, yp))[0]\n gs[i].xpos, gs[i].ypos = xp, yp\n gs[i].fwhm = 10\n gs[i].ampl = ampl\n\n sh.thaw(gs[i].fwhm)\n sh.thaw(gs[i].ampl)\n sh.fit()\n\n sh.thaw(gs[i].xpos)\n sh.thaw(gs[i].ypos)\n sh.fit()\n sh.freeze(gs[i])\n\n resid.data = sh.get_data_image().y - sh.get_model_image().y\n resid_smooth = resid.smooth(width=6)') # In[ ]:
def test_psf_pars_are_frozen(self): ui.create_model_component("beta2d", "p1") p1 = ui.get_model_component("p1") ui.load_psf('psf', p1) self.assertEqual([], p1.thawedpars)
def acis_bkg_model(detnam, root='bkg_', as_str=False): """Empirically derived background model for the ACIS detector, based on fitting a broken powerlaw plus 6 gaussians to ACIS background data. These models *require* that the corresponding ARF be set to a constant value of 100 and that the RMF be the correct RMF for the source and detector. The model is only calibrated between 0.5 and 9 keV. The following code is an example:: from acis_bkg_model import acis_bkg_model load_pha(1, 'acisf04938_000N002_r0043_pha3.fits') arf = get_arf() rmf = get_rmf() # Load the background ARF/RMF. This must be done in addition # to load_pha, otherwise the source and background arfs are # always identical. load_bkg_arf(1, arf.name) load_bkg_rmf(1, rmf.name) bkg_arf = get_bkg_arf(1) bkg_rmf = get_bkg_rmf(1) # Stub the bkg_arf to be a constant. This is required for use # of the acis_bkg_model models. bkg_arf.specresp = bkg_arf.specresp * 0 + 100. # Set scaling between background and source apertures # Roughly equivalent to # bkg_scale = get_exposure() * get_backscal() / (get_exposure(bkg_id=1) * get_backscal(bkg_id=1)) bkg_scale = get_data(1).sum_background_data(lambda x,y: 1) # Set source and background models. This source is on ACIS-I CCDID = 2 (acis2i). bkg_model = const1d.c1 * acis_bkg_model('acis2i') set_full_model(rsp(powlaw1d.pow1) + bkg_scale * bkg_rsp(bkg_model)) set_bkg_full_model(bkg_rmf(bkg(arf(bkg_model)))) set_full_model(powlaw1d.pow1) set_bkg_full_model(bkg_rmf(bkg_arf( const1d.c1 * acis_bkg_model('acis2i')))) fit() # or fit_bkg() to only fit the background :param detnam: detector name 'acis<CCD_ID><aimpoint det: i or s>' :returns: sherpa model for background """ from sherpa.astro import ui global pars comps = (('powlaw1d', 'pow1'), ('powlaw1d', 'pow2'), ('gauss1d', 'g1'), ('gauss1d', 'g2'), ('gauss1d', 'g3'), ('gauss1d', 'g4'), ('gauss1d', 'g5'), ('gauss1d', 'g6')) model_comps = dict() for mtype, name in comps: ui.create_model_component(mtype, root + name) model_comp = model_comps[name] = ui.get_model_component(root + name) if mtype == 'gauss1d': model_comp.ampl.min = 0.0 ui.freeze(model_comp) model_comp.integrate = True if detnam in pars: for parname, parval in pars[detnam].items(): name, attr = parname.split('.') setattr(model_comps[name], attr, parval) else: raise ValueError( 'No background model available for "{0}". Must be one of {1}'. format(detnam, sorted(pars.keys()))) if as_str: out = ' + '.join([root + name for mtype, name in comps]) else: mc = model_comps out = mc['pow1'] + mc['pow2'] + mc['g1'] + mc['g2'] + mc['g3'] + mc[ 'g4'] + mc['g5'] + mc['g6'] return out
data = sh.get_data_image().y - sh.get_model_image().y resid = SkyImage(data=data, wcs=wcs) resid_smo6 = resid.smooth(radius = 6) resid_smo6.show(vmin = -0.5, vmax = 1) resid_table.append(resid_smo6) # ### Iteratively find and fit additional sources # Instantiate additional Gaussian components, and use them to iteratively fit sources, repeating the steps performed above for component g0. (The residuals map is shown after each additional source included in the model.) This takes some time... # In[6]: for i in range(1,6): sh.create_model_component('gauss2d', 'g' + str(i)) gs = [g0, g1, g2, g3, g4, g5] sh.set_full_model(bkg + psf(g0+g1+g2+g3+g4+g5) * expo) for i in range(1, len(gs)) : gs[i].ampl = 0 # initialize components with fixed, zero amplitude sh.freeze(gs[i]) for i in range(1, len(gs)) : maxcoord = resid_smo6.lookup_max() maxpix = resid_smo6.wcs_skycoord_to_pixel(maxcoord[0]) gs[i].xpos = maxpix[0] gs[i].ypos = maxpix[1] gs[i].fwhm = 10 gs[i].fwhm = maxcoord[1]
def test_load_pha2_compare_meg_order1(make_data_path): """Do we read in the MEG +/-1 orders?""" # The MEG -1 order is dataset 9 # The MEG +1 order is dataset 10 # pha2file = make_data_path('3c120_pha2') meg_p1file = make_data_path('3c120_meg_1.pha') meg_m1file = make_data_path('3c120_meg_-1.pha') ui.load_pha('meg_p1', meg_p1file) ui.load_pha('meg_m1', meg_m1file) orig_ids = set(ui.list_data_ids()) assert 'meg_p1' in orig_ids assert 'meg_m1' in orig_ids ui.load_pha(pha2file) for n, lbl in zip([9, 10], ["-1", "1"]): h = '3c120_meg_{}'.format(lbl) ui.load_arf(n, make_data_path(h + '.arf')) ui.load_rmf(n, make_data_path(h + '.rmf')) # check that loading the pha2 file doesn't overwrite existing # data new_ids = set(ui.list_data_ids()) for i in range(1, 13): orig_ids.add(i) assert orig_ids == new_ids # Check that the same model gives the same statistic # value; this should check that the data and response are # read in, that grouping and filtering work, and that # model evaluation is the same, without having to # check these steps individually. # # The model is not meant to be physically meaningful, # just one that reasonably represents the data and # can be evaluated without requiring XSPEC. # pmdl = ui.create_model_component('powlaw1d', 'pmdl') pmdl.gamma = 0.318 pmdl.ampl = 2.52e-3 ncts = 20 for i in [9, 10, "meg_m1", "meg_p1"]: ui.set_analysis(i, 'wave') ui.group_counts(i, ncts) ui.notice_id(i, 2, 12) ui.set_source(i, pmdl) ui.set_stat('chi2datavar') s9 = ui.calc_stat(9) s10 = ui.calc_stat(10) sm1 = ui.calc_stat('meg_m1') sp1 = ui.calc_stat('meg_p1') # Since these should be the same, we use an equality test # rather than approximation. At least until it becomes # a problem. # assert s9 == sm1 assert s10 == sp1 # The values were calculated using CIAO 4.9, Linux64, with # Python 3.5. # assert s9 == pytest.approx(1005.4378559390879) assert s10 == pytest.approx(1119.980439489647)
def acis_bkg_model(detnam, root='bkg_', as_str=False): """Empirically derived background model for the ACIS detector, based on fitting a broken powerlaw plus 6 gaussians to ACIS background data. These models *require* that the corresponding ARF be set to a constant value of 100 and that the RMF be the correct RMF for the source and detector. The model is only calibrated between 0.5 and 9 keV. The following code is an example:: from acis_bkg_model import acis_bkg_model load_pha(1, 'acisf04938_000N002_r0043_pha3.fits') arf = get_arf() rmf = get_rmf() # Load the background ARF/RMF. This must be done in addition # to load_pha, otherwise the source and background arfs are # always identical. load_bkg_arf(1, arf.name) load_bkg_rmf(1, rmf.name) bkg_arf = get_bkg_arf(1) bkg_rmf = get_bkg_rmf(1) # Stub the bkg_arf to be a constant. This is required for use # of the acis_bkg_model models. bkg_arf.specresp = bkg_arf.specresp * 0 + 100. # Set scaling between background and source apertures # Roughly equivalent to # bkg_scale = get_exposure() * get_backscal() / (get_exposure(bkg_id=1) * get_backscal(bkg_id=1)) bkg_scale = get_data(1).sum_background_data(lambda x,y: 1) # Set source and background models. This source is on ACIS-I CCDID = 2 (acis2i). bkg_model = const1d.c1 * acis_bkg_model('acis2i') set_full_model(rsp(powlaw1d.pow1) + bkg_scale * bkg_rsp(bkg_model)) set_bkg_full_model(bkg_rmf(bkg(arf(bkg_model)))) set_full_model(powlaw1d.pow1) set_bkg_full_model(bkg_rmf(bkg_arf( const1d.c1 * acis_bkg_model('acis2i')))) fit() # or fit_bkg() to only fit the background :param detnam: detector name 'acis<CCD_ID><aimpoint det: i or s>' :returns: sherpa model for background """ comps = (('powlaw1d', 'pow1'), ('powlaw1d', 'pow2'), ('gauss1d', 'g1'), ('gauss1d', 'g2'), ('gauss1d', 'g3'), ('gauss1d', 'g4'), ('gauss1d', 'g5'), ('gauss1d', 'g6')) model_comps = dict() for mtype, name in comps: ui.create_model_component(mtype, root + name) model_comp = model_comps[name] = eval(root + name) if mtype == 'gauss1d': model_comp.ampl.min = 0.0 ui.freeze(model_comp) model_comp.integrate = True if detnam in pars: for parname, parval in pars[detnam].items(): name, attr = parname.split('.') setattr(model_comps[name], attr, parval) else: raise ValueError('No background model available for "{0}". Must be one of {1}'.format( detnam, sorted(pars.keys()))) if as_str: out = ' + '.join([root + name for mtype, name in comps]) else: mc = model_comps out = mc['pow1'] + mc['pow2'] + mc['g1'] + mc['g2'] + mc['g3'] + mc['g4'] + mc['g5'] + mc['g6'] return out
def test_fake_pha_basic(id, has_bkg, clean_astro_ui): """No background. See also test_fake_pha_add_background For simplicity we use perfect responses. A background dataset can be added, but it should not be used in the simulation. """ channels = np.arange(1, 4, dtype=np.int16) counts = np.ones(3, dtype=np.int16) bcounts = 100 * counts ui.load_arrays(id, channels, counts, ui.DataPHA) ui.set_exposure(id, 100) if has_bkg: bkg = ui.DataPHA('bkg', channels, bcounts, exposure=200, backscal=0.4) ui.set_bkg(id, bkg, bkg_id='faked-bkg') ebins = np.asarray([1.1, 1.2, 1.4, 1.6]) elo = ebins[:-1] ehi = ebins[1:] arf = ui.create_arf(elo, ehi) rmf = ui.create_rmf(elo, ehi, e_min=elo, e_max=ehi) mdl = ui.create_model_component('const1d', 'mdl') mdl.c0 = 2 ui.set_source(id, mdl) ui.fake_pha(id, arf, rmf, 1000.0) faked = ui.get_data(id) assert faked.exposure == pytest.approx(1000.0) assert (faked.channel == channels).all() assert faked.name == 'faked' assert faked.get_arf().name == 'test-arf' assert faked.get_rmf().name == 'delta-rmf' if has_bkg and id is not None: assert faked.background_ids == ['faked-bkg'] bkg = ui.get_bkg(id, 'faked-bkg') assert bkg.name == 'bkg' assert bkg.counts == pytest.approx(bcounts) assert bkg.exposure == pytest.approx(200) else: assert faked.background_ids == [] # check we've faked counts (the scaling is such that it is # very improbable that this condition will fail) assert (faked.counts > counts).all() # For reference the predicted source signal is # [200, 400, 400] # # What we'd like to say is that the predicted counts are # similar, but this is not easy to do. What we can try # is summing the counts (to average over the randomness) # and then a simple check # assert faked.counts.sum() > 200
def test_746(make_data_path, clean_astro_ui): """Test https://github.com/sherpa/sherpa/issues/746 Something in #444 (reverted in #759) caused: - the fit to fail (niter=2) - the line amplitude not to change significantly - the statistic reported by the fit to be different to that returned by calc_stat Something with how the cache code handles analysis=wave appears to be the problem. This test takes the line data from 746 and adds it into the existing PHA2 file we have (3c120) to replicate the problem. Fortunately this wavelength range contains essentially no counts, so we can just add in the counts to make a fake line and check we can fit it. """ ui.load_pha(make_data_path('3c120_pha2.gz')) ui.load_arf(10, make_data_path('3c120_meg_1.arf.gz')) ui.load_rmf(10, make_data_path('3c120_meg_1.rmf.gz')) # Add in the line d10 = ui.get_data(10) idx = np.arange(4068, 4075, dtype=np.int) d10.counts[idx] = [1, 1, 1, 2, 3, 1, 1] # group the data ui.group_width(id=10, num=2) ui.set_analysis('wave') ui.notice(21.4, 21.7) # internal check that getting the expected data expected = np.zeros(32) expected[[9, 10, 11, 12]] = [2, 3, 4, 1] expected[26] = 1 # this count is from the 3c120 data d = d10.get_dep(filter=True) assert d == pytest.approx(expected) line = ui.create_model_component('gauss1d', 'name') # treat the line as a delta function line.fwhm.val = 0.00001 line.fwhm.frozen = True line.pos = 21.6 line.pos.frozen = True line.ampl = 2 ui.set_source(10, line) # the original fit used levmar, so use that here # (it looks like it also fails with simplex) ui.set_method('levmar') ui.set_stat('cstat') sinit = ui.calc_stat(10) ui.fit(10) fr = ui.get_fit_results() sfinal = ui.calc_stat(10) # Did the # - fit improve # - take more than two iterations # - report the same statistic values as calc_stat # - change the fit parameter # assert sfinal < sinit assert fr.succeeded assert fr.nfev > 2 assert fr.istatval == sinit assert fr.statval == sfinal assert line.ampl.val > 100 assert len(fr.parvals) == 1 assert fr.parvals[0] == line.ampl.val # some simple checks to throw in because we can assert fr.parnames == ('name.ampl', ) assert fr.datasets == (10,) assert fr.statname == 'cstat' assert fr.methodname == 'levmar' assert fr.itermethodname == 'none' assert fr.numpoints == 32 assert fr.dof == 31 # Now add in some "absolute" checks to act as regression tests. # If these fail then it doesn't necessarily mean something bad # has happened. # assert fr.nfev == 15 assert sinit == pytest.approx(82.72457294394245) assert sfinal == pytest.approx(15.39963248224592) assert line.ampl.val == pytest.approx(113.95646989927054)
def test_psf_pars_are_frozen(clean_astro_ui): "bug #12503" ui.create_model_component("beta2d", "p1") p1 = ui.get_model_component("p1") ui.load_psf('psf', p1) assert p1.thawedpars == []
def test_calc_flux_pha(id, lo, hi, make_data_path, clean_astro_ui): """Do calc_photon/energy_flux return the expected results: fluxes? This skips those combinations where only one of lo or hi is None, since this is handle by test_calc_flux_density_pha. Flues are in units of <value>/cm^2/s {value=photon, erg} The checks are made against ranges that are chosen to cover matching the grid, a subset of the grid (with and without matching the start/end), partial overlaps, and no overlap. """ infile = make_data_path('3c273.pi') # The 3c273 RMF was generated over the range 0.1 to 11 keV # (inclusive). By picking gamma = 1 the photon-flux # integral is just ampl * (log(ehi) - log(elo)) # and the energy-flux ampl is ampl * (ehi - elo) * scale # where scale converts 1 keV to erg. # ampl = 1e-4 if lo is None: loval = 0.1 elif lo < 0.1: loval = 0.1 else: loval = lo if hi is None: hival = 11.0 elif hi > 11.0: hival = 11.0 else: hival = hi # expected fluxes; special case the handling of there being no # overlap between the user grid and the data grid. # if lo is not None and (lo > 11.0 or hi < 0.1): pflux_exp = 0.0 eflux_exp = 0.0 else: pflux_exp = ampl * (np.log(hival) - np.log(loval)) eflux_exp = 1.602e-9 * ampl * (hival - loval) pl = ui.create_model_component('powlaw1d', 'pl') pl.ampl = ampl pl.gamma = 1 if id is None: ui.load_pha(infile) ui.set_source(pl) else: ui.load_pha(id, infile) ui.set_source(id, pl) # Use a subset of the data range (to check that the calc routines # ignores them, ie uses the full 0.1 to 11 keV range. # ui.ignore(None, 0.5) ui.ignore(7, None) # Do not use named arguments, but assume positional arguments if id is None: pflux = ui.calc_photon_flux(lo, hi) eflux = ui.calc_energy_flux(lo, hi) else: pflux = ui.calc_photon_flux(lo, hi, id) eflux = ui.calc_energy_flux(lo, hi, id) # Since the energy fluxes are ~1e-12 we want to rescale the # value before comparison. Here we use a log transform. # eflux_exp = np.log10(eflux_exp) eflux = np.log10(eflux) assert pflux == pytest.approx(pflux_exp, rel=1e-3) assert eflux == pytest.approx(eflux_exp, rel=1e-4)