Esempio n. 1
0
def test_bug_276(make_data_path):
    ui.load_pha(make_data_path('3c273.pi'))
    ui.set_model('polynom1d.p1')
    ui.fit()
    ui.covar()
    scal = ui.get_covar_results().parmaxes
    ui.sample_flux(ui.get_model_component('p1'), 0.5, 1, num=5, correlated=False, scales=scal)
Esempio n. 2
0
def test_background():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    kT_sim = 1.0
    Z_sim = 0.0
    norm_sim = 4.0e-2
    nH_sim = 0.04
    redshift = 0.01

    exp_time = (200., "ks")
    area = (1000., "cm**2")

    wcs = create_dummy_wcs()

    abs_model = WabsModel(nH_sim)

    events = EventList.create_empty_list(exp_time, area, wcs)

    spec_model = TableApecModel(0.05, 12.0, 5000, thermal_broad=False)
    spec = spec_model.return_spectrum(kT_sim, Z_sim, redshift, norm_sim)

    new_events = events.add_background(spec_model.ebins, spec, prng=prng,
                                       absorb_model=abs_model)

    new_events = ACIS_I(new_events, rebin=False, convolve_psf=False, prng=prng)

    new_events.write_spectrum("background_evt.pi", clobber=True)

    os.system("cp %s ." % new_events.parameters["ARF"])
    os.system("cp %s ." % new_events.parameters["RMF"])

    load_user_model(mymodel, "wapec")
    add_user_pars("wapec", ["nH", "kT", "metallicity", "redshift", "norm"],
                  [0.01, 4.0, 0.2, redshift, norm_sim*0.8],
                  parmins=[0.0, 0.1, 0.0, -20.0, 0.0],
                  parmaxs=[10.0, 20.0, 10.0, 20.0, 1.0e9],
                  parfrozen=[False, False, False, True, False])

    load_pha("background_evt.pi")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.5, 8.0:")
    set_model("wapec")
    fit()
    set_covar_opt("sigma", 1.6)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0]-nH_sim) < res.parmaxes[0]
    assert np.abs(res.parvals[1]-kT_sim) < res.parmaxes[1]
    assert np.abs(res.parvals[2]-Z_sim) < res.parmaxes[2]
    assert np.abs(res.parvals[3]-norm_sim) < res.parmaxes[3]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Esempio n. 3
0
def test_bug_276(make_data_path):
    ui.load_pha(make_data_path('3c273.pi'))
    ui.set_model('polynom1d.p1')
    ui.fit()
    ui.covar()
    scal = ui.get_covar_results().parmaxes
    ui.sample_flux(ui.get_model_component('p1'),
                   0.5,
                   1,
                   num=5,
                   correlated=False,
                   scales=scal)
Esempio n. 4
0
 def fit(self, do_covar=False, do_conf=False):
     """Perform fit using profile likelihood technique for background estimation and subtraction."""
     listnames = self.get_noticed_list()  # [ids.name for ids in self.listids[self.noticed_ids]]
     if len(listnames) > 0:
         wfit(listnames)
         print_fit()
         if do_covar is True:
             sau.covar(*listnames)
         if do_conf is True:
             sau.set_conf_opt('max_rstat', 10000)
             sau.conf(*listnames)
             print_conf()
     else:
         print("Empty noticed runs list. No fit")
Esempio n. 5
0
    def run_hspec_fit(self, model, thres_low, thres_high):
        """Run the gammapy.hspec fit

        Parameters
        ----------
        model : str
            Sherpa model
        thres_high : `~gammapy.spectrum.Energy`
            Upper threshold of the spectral fit
        thres_low : `~gammapy.spectrum.Energy`
            Lower threshold of the spectral fit
        """

        log.info("Starting HSPEC")
        import sherpa.astro.ui as sau
        from ..hspec import wstat
        from sherpa.models import PowLaw1D

        if model == 'PL':
            p1 = PowLaw1D('p1')
            p1.gamma = 2.2
            p1.ref = 1e9
            p1.ampl = 6e-19
        else:
            raise ValueError('Desired Model is not defined')

        thres = thres_low.to('keV').value
        emax = thres_high.to('keV').value

        sau.freeze(p1.ref)
        sau.set_conf_opt("max_rstat", 100)

        list_data = []
        for obs in self.observations:
            datid = obs.phafile.parts[-1][7:12]
            sau.load_data(datid, str(obs.phafile))
            sau.notice_id(datid, thres, emax)
            sau.set_source(datid, p1)
            list_data.append(datid)
        wstat.wfit(list_data)
        sau.covar()
        fit_val = sau.get_covar_results()
        fit_attrs = ('parnames', 'parvals', 'parmins', 'parmaxes')
        fit = dict((attr, getattr(fit_val, attr)) for attr in fit_attrs)
        fit = self.apply_containment(fit)
        sau.clean()
        self.fit = fit
Esempio n. 6
0
def get_fluxes(ids, z, fluxes, fitstats, nsims=10):
    shp.covar()
    dataScale = shp.get_covar_results().parmaxes

    if fitstats.rstat <= 3.0:
        if all(d is None for d in dataScale):
            fx = _calc_fluxes(ids, z=z, nsims=nsims)
            fluxes["fx"], fluxes["fx_obs"], fluxes["fx_int"] = fx[:, 0]

        else:
            fx = _calc_fluxes(ids, z=z, dataScale=dataScale, nsims=nsims)
            fluxes["fx"], fluxes["fx_ErrMin"], fluxes["fx_ErrMax"] = fx[0, :]
            fluxes["fx_obs"], fluxes["fx_obs_ErrMin"], fluxes[
                "fx_obs_ErrMax"] = fx[1, :]
            fluxes["fx_int"], fluxes["fx_int_ErrMin"], fluxes[
                "fx_int_ErrMax"] = fx[2, :]
    else:
        fx = _calc_fluxes(ids, z=z, nsims=nsims)
        fluxes["fx"], fluxes["fx_obs"], fluxes["fx_int"] = fx[:, 0]

    return fluxes
Esempio n. 7
0
def plaw_fit(alpha_sim):

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    nH_sim = 0.02
    norm_sim = 1.0e-4
    redshift = 0.01

    exp_time = 5.0e4
    area = 40000.0
    inst_name = "hdxi"

    spec = Spectrum.from_powerlaw(alpha_sim, redshift, norm_sim)
    spec.apply_foreground_absorption(nH_sim)
    e = spec.generate_energies(exp_time, area)

    pt_src = PointSourceModel(30.0, 45.0, e.size)

    write_photon_list("plaw_model",
                      "plaw_model",
                      e.flux,
                      pt_src.ra,
                      pt_src.dec,
                      e,
                      clobber=True)

    instrument_simulator("plaw_model_simput.fits",
                         "plaw_model_evt.fits",
                         exp_time,
                         inst_name, [30.0, 45.0],
                         astro_bkgnd=None,
                         instr_bkgnd_scale=0.0)

    inst = get_instrument_from_registry(inst_name)
    arf = AuxiliaryResponseFile(inst["arf"])
    rmf = RedistributionMatrixFile(inst["rmf"])
    os.system("cp %s ." % arf.filename)
    os.system("cp %s ." % rmf.filename)

    write_spectrum("plaw_model_evt.fits", "plaw_model_evt.pha", clobber=True)

    load_user_model(mymodel, "wplaw")
    add_user_pars("wplaw", ["nH", "norm", "redshift", "alpha"],
                  [0.01, norm_sim * 0.8, redshift, 0.9],
                  parmins=[0.0, 0.0, 0.0, 0.1],
                  parmaxs=[10.0, 1.0e9, 10.0, 10.0],
                  parfrozen=[False, False, True, False])

    load_pha("plaw_model_evt.pha")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.5, 9.0:")
    set_model("wplaw")
    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0] - nH_sim) < res.parmaxes[0]
    assert np.abs(res.parvals[1] - norm_sim) < res.parmaxes[1]
    assert np.abs(res.parvals[2] - alpha_sim) < res.parmaxes[2]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Esempio n. 8
0
def fit_sherpa(obsid_list, redshift, nH_Gal, energy, min_counts=25, kT_guess=3, Ab_guess=1, fix_nH_Gal=True, fix_abund=False, find_errors=True):

    spectra = []
    for obs in obsid_list:
        temp = glob.glob('xaf_*_' + obs + '.pi')  # get spectra of regions
        spectra.append(temp)  # spectra will be made of lists which have xaf_*_obs.pi filenames - access with spectra[i][j]
    spectra.sort()
    num_obs = len(obsid_list)
    num_reg = len(temp)
    filename = 'spectra_wabs_mekal.dat'
    results_file = open(filename, "w")
    results_file.write('# Fit results for wabs*mekal (zeros indicate that no fitting was performed)\n')
    results_file.write('# Reg_no.  kT  kT_loerr kT_hierr   Z    Z_loerr  Z_hierr  norm    norm_loerr norm_hierr nH_Gal  nH_loerr nH_hierr red_chisq total_counts num_bins\n')
    for i in range(num_reg):
        sherpa.clean()  # clean everything
        cnts = numpy.zeros(num_obs)  # make array of zeros with index length same as num_obs to store counts
        max_rate = numpy.zeros(num_obs)  # max count rate [counts/s/keV]
        data_set = 0  # data set number
        good_src_ids = numpy.zeros(num_obs, dtype=int) - 1

        for j in range(num_obs):
            sherpa.load_pha(data_set, spectra[j][i])  # load xaf_#_obs_####.pi and .arf and .rmf files.
            sherpa.ignore_id(data_set, 0.0, energy[0])
            sherpa.ignore_id(data_set, energy[1], None)
            cnts[j] = sherpa.calc_data_sum(energy[0], energy[1], data_set)
            cnt_rate = sherpa.get_rate(data_set, filter=True)
            if len(cnt_rate) == 0:
                max_rate[j] = 0.0  # when few counts (<50), get_rate can return zero-length array
            else:
                max_rate[j] = numpy.max(cnt_rate)
            sherpa.subtract(data_set)  # subtract background
            sherpa.set_source(data_set, sherpa.xswabs.abs1 * sherpa.xsmekal.plsm1)  # 1 temperature mekal model fit
            good_src_ids[j] = data_set
            data_set += 1  # same run for region but different obs

        # Filter out ignored obs
        good_src_ids_indx = numpy.where(good_src_ids >= 0)
        good_src_ids = good_src_ids[good_src_ids_indx]
        max_rate = max_rate[good_src_ids_indx]
        cnts = cnts[good_src_ids_indx]

        totcnts = numpy.sum(cnts)
        if totcnts >= min_counts:
            print('Fitting spectra in region: ' + str(i))
            abs1.nH = nH_Gal
            abs1.cache = 0
            if fix_nH_Gal:
                sherpa.freeze(abs1.nH)
            else:
                sherpa.thaw(abs1.nH)
            plsm1.kt = kT_guess
            sherpa.thaw(plsm1.kt)
            plsm1.Abundanc = Ab_guess
            if fix_abund:
                sherpa.freeze(plsm1.Abundanc)
            else:
                sherpa.thaw(plsm1.Abundanc)
            plsm1.redshift = redshift
            sherpa.freeze(plsm1.redshift)
            plsm1.cache = 0

            sherpa.fit()
            fit_result = sherpa.get_fit_results()
            red_chi2 = fit_result.rstat
            num_bins = fit_result.numpoints
            if fix_nH_Gal:
                nH = nH_Gal
                kT = fit_result.parvals[0]
                if fix_abund:
                    Z = Ab_guess
                    norm = fit_result.parvals[1]
                else:
                    Z = fit_result.parvals[1]
                    norm = fit_result.parvals[2]
            else:
                nH = fit_result.parvals[0]
                kT = fit_result.parvals[1]
                if fix_abund:
                    Z = Ab_guess
                    norm = fit_result.parvals[2]
                else:
                    Z = fit_result.parvals[2]
                    norm = fit_result.parvals[3]
            del fit_result

            if find_errors:
                sherpa.covar()
                covar_result = sherpa.get_covar_results()
                if fix_nH_Gal:
                    nH_loerr = 0.0
                    nH_hierr = 0.0
                    kT_loerr = covar_result.parmins[0]
                    kT_hierr = covar_result.parmaxes[0]
                    if fix_abund:
                        Z_loerr = 0.0
                        Z_hierr = 0.0
                        norm_loerr = covar_result.parmins[1]
                        norm_hierr = covar_result.parmaxes[1]
                    else:
                        Z_loerr = covar_result.parmins[1]
                        Z_hierr = covar_result.parmaxes[1]
                        norm_loerr = covar_result.parmins[2]
                        norm_hierr = covar_result.parmaxes[2]
                else:
                    nH_loerr = covar_result.parmins[0]
                    nH_hierr = covar_result.parmaxes[0]
                    kT_loerr = covar_result.parmins[1]
                    kT_hierr = covar_result.parmaxes[1]
                    if fix_abund:
                        Z_loerr = 0.0
                        Z_hierr = 0.0
                        norm_loerr = covar_result.parmins[2]
                        norm_hierr = covar_result.parmaxes[2]
                    else:
                        Z_loerr = covar_result.parmins[2]
                        Z_hierr = covar_result.parmaxes[2]
                        norm_loerr = covar_result.parmins[3]
                        norm_hierr = covar_result.parmaxes[3]
                del covar_result

                # Check for failed errors (= None) and set them to +/- best-fit value
                if not fix_nH_Gal:
                    if nH_loerr is None: nH_loerr = -nH  # is was ==
                    if nH_hierr is None: nH_hierr = nH
                if kT_loerr is None: kT_loerr = -kT
                if kT_hierr is None: kT_hierr = kT
                if not fix_abund:
                    if Z_loerr is None: Z_loerr = -Z
                    if Z_hierr is None: Z_hierr = Z
                if norm_loerr is None: norm_loerr = -norm
                if norm_hierr is None: norm_hierr = norm
            else:
                kT_loerr = 0.0
                Z_loerr = 0.0
                nH_loerr = 0.0
                norm_loerr = 0.0
                kT_hierr = 0.0
                Z_hierr = 0.0
                nH_hierr = 0.0
                norm_hierr = 0.0

        else:  # if total counts < min_counts, just write zeros
            print('\n Warning: no fit performed for for region: ' + str(i))
            print('\n Spectra have insufficient counts after filtering or do not exist.')
            print('\n --> All parameters for this region set to 0.0.')
            kT = 0.0
            Z = 0.0
            nH = 0.0
            norm = 0.0
            kT_loerr = 0.0
            Z_loerr = 0.0
            nH_loerr = 0.0
            norm_loerr = 0.0
            kT_hierr = 0.0
            Z_hierr = 0.0
            nH_hierr = 0.0
            norm_hierr = 0.0
            red_chi2 = 0.0
            num_bins = 0

        reg_id = spectra[0][i].split('_')  # Splits string after every underscore so that region number can be accessed. reg_id[1] is accessed because that is the region number after 'xaf'
        results_file.write('%7r %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %8.1f %8r\n' % (int(reg_id[1]), kT, kT_loerr, kT_hierr, Z, Z_loerr, Z_hierr, norm, norm_loerr, norm_hierr, nH, nH_loerr, nH_hierr, red_chi2, totcnts, num_bins))  # Write all data to a file

    results_file.close()
Esempio n. 9
0
filename = 'skymap_ex.fits'
nomposstr = '05h34m31.94s 22d00m52.2s'
header = fits.getheader(filename)
proj = wcs.Projection(header)
xc, yc = float(header['NAXIS1']) / 2., float(header['NAXIS2']) / 2.
ui.load_image(filename)
ui.notice2d('circle({0}, {1}, {2})'.format(xc, yc, float(header['NAXIS2']) / 4.))
ui.set_source(ui.gauss2d.g1 + ui.gauss2d.g2)
g1.xpos = xc
g1.ypos = yc
g2.fwhm = g1.fwhm = 3.
ui.link(g2.xpos, g1.xpos)
ui.link(g2.ypos, g1.ypos)
g2.ampl = 50.
g1.ampl = 50.
ui.guess()
ui.fit()
ui.image_fit()
ui.covar()
conf = ui.get_covar_results()
conf_dict = dict([(n,(v, l, h)) for n,v,l,h in
                   zip(conf.parnames, conf.parvals, conf.parmins, conf.parmaxes)])
x, y = proj.toworld((conf_dict['g1.xpos'][0], conf_dict['g1.ypos'][0]))
xmin, ymin = proj.toworld((conf_dict['g1.xpos'][0] + conf_dict['g1.xpos'][1],
                           conf_dict['g1.ypos'][0] + conf_dict['g1.ypos'][1]))
xmax, ymax = proj.toworld((conf_dict['g1.xpos'][0] + conf_dict['g1.xpos'][2],
                           conf_dict['g1.ypos'][0] + conf_dict['g1.ypos'][2]))
nompos = positions.str2pos(nomposstr, proj)    
print('{0} ({1}-{2}) vs {3}'.format(x, xmin, xmax, nompos[0][0][0]))
print('{0} ({1}-{2}) vs {3}'.format(y, ymin, ymax, nompos[0][0][1]))
Esempio n. 10
0
def do_beta_model(source, v_field, em_field):

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    ds = source.ds

    A = 3000.
    exp_time = 1.0e5
    redshift = 0.05
    nH_sim = 0.02

    apec_model = TableApecModel(0.1, 11.5, 20000, thermal_broad=False)
    abs_model = TBabsModel(nH_sim)

    sphere = ds.sphere("c", (0.5, "Mpc"))

    kT_sim = source.kT
    Z_sim = source.Z

    thermal_model = ThermalSourceModel(apec_model,
                                       Zmet=Z_sim,
                                       prng=source.prng)
    photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
                                          thermal_model)

    D_A = photons.parameters["FiducialAngularDiameterDistance"]

    norm_sim = sphere.quantities.total_quantity(em_field)
    norm_sim *= 1.0e-14 / (4 * np.pi * D_A * D_A * (1. + redshift) *
                           (1. + redshift))
    norm_sim = float(norm_sim.in_cgs())

    v1, v2 = sphere.quantities.weighted_variance(v_field, em_field)
    sigma_sim = float(v1.in_units("km/s"))
    mu_sim = -float(v2.in_units("km/s"))

    events = photons.project_photons("z",
                                     absorb_model=abs_model,
                                     prng=source.prng)
    events = ACIS_I(events, rebin=False, convolve_psf=False, prng=source.prng)

    events.write_spectrum("beta_model_evt.pi", clobber=True)

    os.system("cp %s ." % events.parameters["ARF"])
    os.system("cp %s ." % events.parameters["RMF"])

    load_user_model(mymodel, "tbapec")
    add_user_pars("tbapec", ["nH", "kT", "metallicity", "redshift", "norm"],
                  [0.01, 4.0, 0.2, redshift, norm_sim * 0.8],
                  parmins=[0.0, 0.1, 0.0, -20.0, 0.0],
                  parmaxs=[10.0, 20.0, 10.0, 20.0, 1.0e9],
                  parfrozen=[False, False, False, True, False])

    load_pha("beta_model_evt.pi")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.5, 8.0:")
    set_model("tbapec")
    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0] - nH_sim) < res.parmaxes[0]
    assert np.abs(res.parvals[1] - kT_sim) < res.parmaxes[1]
    assert np.abs(res.parvals[2] - Z_sim) < res.parmaxes[2]
    assert np.abs(res.parvals[3] - norm_sim) < res.parmaxes[3]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Esempio n. 11
0
g0.xpos = maxpix[0]
g0.ypos = maxpix[1]
sh.freeze(g0.xpos, g0.ypos)
expo.ampl = 1e-9
sh.freeze(expo)
sh.thaw(g0.fwhm, g0.ampl)
g0.fwhm = 10
g0.ampl = maxcoord[1]
sh.fit()

# In[57]:

sh.thaw(g0.xpos, g0.ypos)
sh.fit()
sh.covar()
sh.freeze(g0)

data = sh.get_data_image().y - sh.get_model_image().y
resid = SkyImage(data=data, wcs=ref_image.wcs)

resid_smo6 = resid.smooth(radius=6)
resid_smo6.show(vmin=-0.5, vmax=1, add_cbar=True)
resid_table.append(resid_smo6)

# In[58]:

from astropy.stats import gaussian_fwhm_to_sigma
coord = resid.wcs_pixel_to_skycoord(g0.xpos.val, g0.ypos.val)
pix_scale = resid.wcs_pixel_scale()[0].deg
sigma = g0.fwhm.val * pix_scale * gaussian_fwhm_to_sigma
Esempio n. 12
0
def plaw_fit(alpha_sim):

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    bms = BetaModelSource()
    ds = bms.ds

    def _hard_emission(field, data):
        return YTQuantity(1.0e-18, "s**-1*keV**-1")*data["density"]*data["cell_volume"]/mp
    ds.add_field(("gas", "hard_emission"), function=_hard_emission, units="keV**-1*s**-1")

    nH_sim = 0.02
    abs_model = WabsModel(nH_sim)

    A = YTQuantity(2000., "cm**2")
    exp_time = YTQuantity(2.0e5, "s")
    redshift = 0.01

    sphere = ds.sphere("c", (100.,"kpc"))

    plaw_model = PowerLawSourceModel(1.0, 0.01, 11.0, "hard_emission", 
                                     alpha_sim, prng=prng)

    photons = PhotonList.from_data_source(sphere, redshift, A, exp_time,
                                          plaw_model)

    D_A = photons.parameters["FiducialAngularDiameterDistance"]
    dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3).in_cgs()
    norm_sim = float((sphere["hard_emission"]).sum()*dist_fac.in_cgs())*(1.+redshift)

    events = photons.project_photons("z", absorb_model=abs_model,
                                     prng=bms.prng,
                                     no_shifting=True)
    events = ACIS_I(events, rebin=False, convolve_psf=False, prng=bms.prng)
    events.write_spectrum("plaw_model_evt.pi", clobber=True)

    os.system("cp %s ." % events.parameters["ARF"])
    os.system("cp %s ." % events.parameters["RMF"])

    load_user_model(mymodel, "wplaw")
    add_user_pars("wplaw", ["nH", "norm", "redshift", "alpha"],
                  [0.01, norm_sim*1.1, redshift, 0.9], 
                  parmins=[0.0, 0.0, 0.0, 0.1],
                  parmaxs=[10.0, 1.0e9, 10.0, 10.0],
                  parfrozen=[False, False, True, False])

    load_pha("plaw_model_evt.pi")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.6, 7.0:")
    set_model("wplaw")
    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0]-nH_sim) < res.parmaxes[0]
    assert np.abs(res.parvals[1]-norm_sim) < res.parmaxes[1]
    assert np.abs(res.parvals[2]-alpha_sim) < res.parmaxes[2]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Esempio n. 13
0
    def simulate_null_images(img_file: str,
                             psf_file: str,
                             n_null_sims: int,
                             no_core: bool = False,
                             mcmciter: int = 5000,
                             **kwargs) -> None:
        """
        Simulates a specified number of baseline images for a given input observation and a psf file

        :param img_file: Path to the input image file
        :param psf_file: Path to the psf image file
        :param n_null_sims: Number of baseline replicates to be simulated
        :param no_core: Setting this to True will only generate baseline replicates with a flat background while the default value includes a point source at the location of the core
        :param mcmciter: The number of MCMC samples to draw for simulating the baselines
        """
        print("Creating the null file")
        clean()
        set_stat("cstat")
        set_method("simplex")
        load_image(img_file)
        load_psf("mypsf", psf_file)
        set_psf(mypsf)

        if no_core:
            set_model(const2d.c0)
            set_par(c0.c0, min=0)
        else:
            set_model(gauss2d.q1 + const2d.c0)
            set_par(c0.c0, min=0)
            # set_par(q1.fwhm,max=0.5)
            guess(q1)
        fit()
        results = get_fit_results()
        save("core_source_fit.save", clobber=True)
        save_source("null_q1_c1.fits", clobber=True)
        covar()

        if no_core:
            for i in range(n_null_sims):
                fake()
                save_image("sim_null_{}.fits".format(i), clobber=True)
            clean()
            return

        normgauss1d.g1
        g1.pos = q1.fwhm
        g1.fwhm = get_covar_results().parmaxes[0]

        # check if there is a valid upper bound.
        print(get_covar_results())
        if (get_covar_results().parmaxes[0] is None
                or get_covar_results().parmins[1] is None
                or get_covar_results().parmins[0] is None):
            for i in range(n_null_sims):
                fake()
                save_image("sim_null_{}.fits".format(i), clobber=True)
            clean()
            return
        # if not go for the regular
        set_prior(q1.fwhm, g1)
        set_sampler_opt("defaultprior", False)
        set_sampler_opt("priorshape", [True, False, False, False, False])
        set_sampler_opt("originalscale", [True, True, True, True, True])
        if mcmciter < n_null_sims * 100:
            mcmciter = n_null_sims * 100

        # the following code throws an error sometimes #bug
        try:
            stats, accept, params = get_draws(1, niter=mcmciter)
        except:
            params = [np.repeat(q1.fwhm.val, mcmciter)]

        # print('Simulating the null files')
        for i in range(n_null_sims):
            set_par(q1.fwhm, params[0][(i + 1) * 100 - 1])
            fake()
            save_image("sim_null_{}.fits".format(i), clobber=True)
        save_all(outfile="lira_input_baseline_sim.log", clobber=True)
        clean()
Esempio n. 14
0
sau.set_stat('cstat')
# Ask for high-precision results
sau.set_method_opt('ftol', 1e-20)
sau.set_covar_opt('eps', 1e-20)

# Set start parameters close to simulation values to make the fit converge
sau.set_par('source.xpos', 101)
sau.set_par('source.ypos', 101)
sau.set_par('source.ampl', 1.1e3)
sau.set_par('source.fwhm', 10)
sau.set_par('background.c0', 1.1)

# Run fit and covariance estimation
# Results are automatically printed to the screen
sau.fit()
sau.covar()

# Sherpa uses fwhm instead of sigma as extension parameter ... need to convert
# http://cxc.harvard.edu/sherpa/ahelp/gauss2d.html
fwhm_to_sigma = 1. / np.sqrt(8 * np.log(2))
cov = sau.get_covar_results()
sigma = fwhm_to_sigma * cov.parvals[0]
sigma_err = fwhm_to_sigma * cov.parmaxes[0]
print('sigma: {0} +- {1}'.format(sigma, sigma_err))

# Compute correlation coefficient for sigma and norm
c = cov.extra_output
c_norm = c[3, 3]
c_sigma = fwhm_to_sigma ** 2 * c[0, 0]
c_norm_sigma = fwhm_to_sigma * c[0, 3]
corr_norm_sigma = c_norm_sigma / np.sqrt(c_norm * c_sigma)
Esempio n. 15
0
def fit_sherpa(obsid_list,
               redshift,
               nH_Gal,
               energy,
               min_counts=25,
               kT_guess=3,
               Ab_guess=1,
               fix_nH_Gal=True,
               fix_abund=False,
               find_errors=True):

    spectra = []
    for obs in obsid_list:
        temp = glob.glob('xaf_*_' + obs + '.pi')  # get spectra of regions
        spectra.append(
            temp
        )  # spectra will be made of lists which have xaf_*_obs.pi filenames - access with spectra[i][j]
    spectra.sort()
    num_obs = len(obsid_list)
    num_reg = len(temp)
    filename = 'spectra_wabs_mekal.dat'
    results_file = open(filename, "w")
    results_file.write(
        '# Fit results for wabs*mekal (zeros indicate that no fitting was performed)\n'
    )
    results_file.write(
        '# Reg_no.  kT  kT_loerr kT_hierr   Z    Z_loerr  Z_hierr  norm    norm_loerr norm_hierr nH_Gal  nH_loerr nH_hierr red_chisq total_counts num_bins\n'
    )
    for i in range(num_reg):
        sherpa.clean()  # clean everything
        cnts = numpy.zeros(
            num_obs
        )  # make array of zeros with index length same as num_obs to store counts
        max_rate = numpy.zeros(num_obs)  # max count rate [counts/s/keV]
        data_set = 0  # data set number
        good_src_ids = numpy.zeros(num_obs, dtype=int) - 1

        for j in range(num_obs):
            sherpa.load_pha(
                data_set, spectra[j]
                [i])  # load xaf_#_obs_####.pi and .arf and .rmf files.
            sherpa.ignore_id(data_set, 0.0, energy[0])
            sherpa.ignore_id(data_set, energy[1], None)
            cnts[j] = sherpa.calc_data_sum(energy[0], energy[1], data_set)
            cnt_rate = sherpa.get_rate(data_set, filter=True)
            if len(cnt_rate) == 0:
                max_rate[
                    j] = 0.0  # when few counts (<50), get_rate can return zero-length array
            else:
                max_rate[j] = numpy.max(cnt_rate)
            sherpa.subtract(data_set)  # subtract background
            sherpa.set_source(
                data_set, sherpa.xswabs.abs1 *
                sherpa.xsmekal.plsm1)  # 1 temperature mekal model fit
            good_src_ids[j] = data_set
            data_set += 1  # same run for region but different obs

        # Filter out ignored obs
        good_src_ids_indx = numpy.where(good_src_ids >= 0)
        good_src_ids = good_src_ids[good_src_ids_indx]
        max_rate = max_rate[good_src_ids_indx]
        cnts = cnts[good_src_ids_indx]

        totcnts = numpy.sum(cnts)
        if totcnts >= min_counts:
            print('Fitting spectra in region: ' + str(i))
            abs1.nH = nH_Gal
            abs1.cache = 0
            if fix_nH_Gal:
                sherpa.freeze(abs1.nH)
            else:
                sherpa.thaw(abs1.nH)
            plsm1.kt = kT_guess
            sherpa.thaw(plsm1.kt)
            plsm1.Abundanc = Ab_guess
            if fix_abund:
                sherpa.freeze(plsm1.Abundanc)
            else:
                sherpa.thaw(plsm1.Abundanc)
            plsm1.redshift = redshift
            sherpa.freeze(plsm1.redshift)
            plsm1.cache = 0

            sherpa.fit()
            fit_result = sherpa.get_fit_results()
            red_chi2 = fit_result.rstat
            num_bins = fit_result.numpoints
            if fix_nH_Gal:
                nH = nH_Gal
                kT = fit_result.parvals[0]
                if fix_abund:
                    Z = Ab_guess
                    norm = fit_result.parvals[1]
                else:
                    Z = fit_result.parvals[1]
                    norm = fit_result.parvals[2]
            else:
                nH = fit_result.parvals[0]
                kT = fit_result.parvals[1]
                if fix_abund:
                    Z = Ab_guess
                    norm = fit_result.parvals[2]
                else:
                    Z = fit_result.parvals[2]
                    norm = fit_result.parvals[3]
            del fit_result

            if find_errors:
                sherpa.covar()
                covar_result = sherpa.get_covar_results()
                if fix_nH_Gal:
                    nH_loerr = 0.0
                    nH_hierr = 0.0
                    kT_loerr = covar_result.parmins[0]
                    kT_hierr = covar_result.parmaxes[0]
                    if fix_abund:
                        Z_loerr = 0.0
                        Z_hierr = 0.0
                        norm_loerr = covar_result.parmins[1]
                        norm_hierr = covar_result.parmaxes[1]
                    else:
                        Z_loerr = covar_result.parmins[1]
                        Z_hierr = covar_result.parmaxes[1]
                        norm_loerr = covar_result.parmins[2]
                        norm_hierr = covar_result.parmaxes[2]
                else:
                    nH_loerr = covar_result.parmins[0]
                    nH_hierr = covar_result.parmaxes[0]
                    kT_loerr = covar_result.parmins[1]
                    kT_hierr = covar_result.parmaxes[1]
                    if fix_abund:
                        Z_loerr = 0.0
                        Z_hierr = 0.0
                        norm_loerr = covar_result.parmins[2]
                        norm_hierr = covar_result.parmaxes[2]
                    else:
                        Z_loerr = covar_result.parmins[2]
                        Z_hierr = covar_result.parmaxes[2]
                        norm_loerr = covar_result.parmins[3]
                        norm_hierr = covar_result.parmaxes[3]
                del covar_result

                # Check for failed errors (= None) and set them to +/- best-fit value
                if not fix_nH_Gal:
                    if nH_loerr is None: nH_loerr = -nH  # is was ==
                    if nH_hierr is None: nH_hierr = nH
                if kT_loerr is None: kT_loerr = -kT
                if kT_hierr is None: kT_hierr = kT
                if not fix_abund:
                    if Z_loerr is None: Z_loerr = -Z
                    if Z_hierr is None: Z_hierr = Z
                if norm_loerr is None: norm_loerr = -norm
                if norm_hierr is None: norm_hierr = norm
            else:
                kT_loerr = 0.0
                Z_loerr = 0.0
                nH_loerr = 0.0
                norm_loerr = 0.0
                kT_hierr = 0.0
                Z_hierr = 0.0
                nH_hierr = 0.0
                norm_hierr = 0.0

        else:  # if total counts < min_counts, just write zeros
            print('\n Warning: no fit performed for for region: ' + str(i))
            print(
                '\n Spectra have insufficient counts after filtering or do not exist.'
            )
            print('\n --> All parameters for this region set to 0.0.')
            kT = 0.0
            Z = 0.0
            nH = 0.0
            norm = 0.0
            kT_loerr = 0.0
            Z_loerr = 0.0
            nH_loerr = 0.0
            norm_loerr = 0.0
            kT_hierr = 0.0
            Z_hierr = 0.0
            nH_hierr = 0.0
            norm_hierr = 0.0
            red_chi2 = 0.0
            num_bins = 0

        reg_id = spectra[0][i].split(
            '_'
        )  # Splits string after every underscore so that region number can be accessed. reg_id[1] is accessed because that is the region number after 'xaf'
        results_file.write(
            '%7r %7.4f %7.4f %7.4f %7.4f %7.4f %7.4f %6.4e %6.4e %6.4e %7.4f %7.4f %7.4f %7.4f %8.1f %8r\n'
            % (int(reg_id[1]), kT, kT_loerr, kT_hierr, Z, Z_loerr, Z_hierr,
               norm, norm_loerr, norm_hierr, nH, nH_loerr, nH_hierr, red_chi2,
               totcnts, num_bins))  # Write all data to a file

    results_file.close()
Esempio n. 16
0
sau.set_stat('cstat')
# Ask for high-precision results
sau.set_method_opt('ftol', 1e-20)
sau.set_covar_opt('eps', 1e-20)

# Set start parameters close to simulation values to make the fit converge
sau.set_par('source.xpos', 101)
sau.set_par('source.ypos', 101)
sau.set_par('source.ampl', 1.1e3)
sau.set_par('source.fwhm', 10)
sau.set_par('background.c0', 1.1)

# Run fit and covariance estimation
# Results are automatically printed to the screen
sau.fit()
sau.covar()

# Sherpa uses fwhm instead of sigma as extension parameter ... need to convert
# http://cxc.harvard.edu/sherpa/ahelp/gauss2d.html
fwhm_to_sigma = 1. / np.sqrt(8 * np.log(2))
cov = sau.get_covar_results()
sigma = fwhm_to_sigma * cov.parvals[0]
sigma_err = fwhm_to_sigma * cov.parmaxes[0]
print('sigma: {0} +- {1}'.format(sigma, sigma_err))

# Compute correlation coefficient for sigma and norm
c = cov.extra_output
c_norm = c[3, 3]
c_sigma = fwhm_to_sigma**2 * c[0, 0]
c_norm_sigma = fwhm_to_sigma * c[0, 3]
corr_norm_sigma = c_norm_sigma / np.sqrt(c_norm * c_sigma)
Esempio n. 17
0
def test_beta_model():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    r_c = 20.0
    beta = 1.0

    exp_time = Quantity(500.0, "ks")

    e = spec.generate_energies(exp_time, area, prng=prng)

    beta_src = BetaModel(ra0, dec0, r_c, beta, e.size, prng=prng)

    write_photon_list("beta",
                      "beta",
                      e.flux,
                      beta_src.ra,
                      beta_src.dec,
                      e,
                      overwrite=True)

    instrument_simulator("beta_simput.fits",
                         "beta_evt.fits",
                         exp_time,
                         "hdxi", [ra0, dec0],
                         ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         foreground=False,
                         prng=prng)

    inst = get_instrument_from_registry("hdxi")
    arf = AuxiliaryResponseFile(inst["arf"])
    cspec = ConvolvedSpectrum(spec, arf)
    ph_flux = cspec.get_flux_in_band(0.5, 7.0)[0].value
    S0 = 3.0 * ph_flux / (2.0 * np.pi * r_c * r_c)

    write_radial_profile("beta_evt.fits",
                         "beta_evt_profile.fits", [ra0, dec0],
                         0.0,
                         100.0,
                         200,
                         ctr_type="celestial",
                         emin=0.5,
                         emax=7.0,
                         overwrite=True)

    load_data(1, "beta_evt_profile.fits", 3,
              ["RMID", "SUR_BRI", "SUR_BRI_ERR"])
    set_stat("chi2")
    set_method("levmar")
    set_source("beta1d.src")
    src.beta = 1.0
    src.r0 = 10.0
    src.ampl = 0.8 * S0
    freeze(src.xpos)

    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0] - r_c) < res.parmaxes[0]
    assert np.abs(res.parvals[1] - beta) < res.parmaxes[1]
    assert np.abs(res.parvals[2] - S0) < res.parmaxes[2]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Esempio n. 18
0
def test_beta_model_flux():
    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    r_c = 20.0
    beta = 1.0

    prng = 34

    e = spec.generate_energies(exp_time, area, prng=prng)

    beta_src = BetaModel(ra0, dec0, r_c, beta, e.size, prng=prng)

    write_photon_list("beta",
                      "beta",
                      e.flux,
                      beta_src.ra,
                      beta_src.dec,
                      e,
                      overwrite=True)

    instrument_simulator("beta_simput.fits",
                         "beta_evt.fits",
                         exp_time,
                         "acisi_cy0", [ra0, dec0],
                         ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         foreground=False,
                         roll_angle=37.0,
                         prng=prng)

    ph_flux = spec.get_flux_in_band(0.5, 7.0)[0].value
    S0 = 3.0 * ph_flux / (2.0 * np.pi * r_c * r_c)

    wspec = spec.new_spec_from_band(0.5, 7.0)

    make_exposure_map("beta_evt.fits",
                      "beta_expmap.fits",
                      wspec.emid.value,
                      weights=wspec.flux.value,
                      overwrite=True)

    write_radial_profile("beta_evt.fits",
                         "beta_evt_profile.fits", [ra0, dec0],
                         0.0,
                         100.0,
                         200,
                         ctr_type="celestial",
                         emin=0.5,
                         emax=7.0,
                         expmap_file="beta_expmap.fits",
                         overwrite=True)

    load_data(1, "beta_evt_profile.fits", 3,
              ["RMID", "SUR_FLUX", "SUR_FLUX_ERR"])
    set_stat("chi2")
    set_method("levmar")
    set_source("beta1d.src")
    src.beta = 1.0
    src.r0 = 10.0
    src.ampl = 0.8 * S0
    freeze(src.xpos)

    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0] - r_c) < res.parmaxes[0]
    assert np.abs(res.parvals[1] - beta) < res.parmaxes[1]
    assert np.abs(res.parvals[2] - S0) < res.parmaxes[2]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Esempio n. 19
0
nomposstr = '05h34m31.94s 22d00m52.2s'
header = fits.getheader(filename)
proj = wcs.Projection(header)
xc, yc = float(header['NAXIS1']) / 2., float(header['NAXIS2']) / 2.
ui.load_image(filename)
ui.notice2d('circle({0}, {1}, {2})'.format(xc, yc,
                                           float(header['NAXIS2']) / 4.))
ui.set_source(ui.gauss2d.g1 + ui.gauss2d.g2)
g1.xpos = xc
g1.ypos = yc
g2.fwhm = g1.fwhm = 3.
ui.link(g2.xpos, g1.xpos)
ui.link(g2.ypos, g1.ypos)
g2.ampl = 50.
g1.ampl = 50.
ui.guess()
ui.fit()
ui.image_fit()
ui.covar()
conf = ui.get_covar_results()
conf_dict = dict([(n, (v, l, h)) for n, v, l, h in zip(
    conf.parnames, conf.parvals, conf.parmins, conf.parmaxes)])
x, y = proj.toworld((conf_dict['g1.xpos'][0], conf_dict['g1.ypos'][0]))
xmin, ymin = proj.toworld((conf_dict['g1.xpos'][0] + conf_dict['g1.xpos'][1],
                           conf_dict['g1.ypos'][0] + conf_dict['g1.ypos'][1]))
xmax, ymax = proj.toworld((conf_dict['g1.xpos'][0] + conf_dict['g1.xpos'][2],
                           conf_dict['g1.ypos'][0] + conf_dict['g1.ypos'][2]))
nompos = positions.str2pos(nomposstr, proj)
print('{0} ({1}-{2}) vs {3}'.format(x, xmin, xmax, nompos[0][0][0]))
print('{0} ({1}-{2}) vs {3}'.format(y, ymin, ymax, nompos[0][0][1]))
Esempio n. 20
0
def test_annulus():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    r_in = 10.0
    r_out = 30.0

    e = spec.generate_energies(exp_time, area, prng=prng)

    ann_src = AnnulusModel(ra0, dec0, r_in, r_out, e.size, prng=prng)

    write_photon_list("ann",
                      "ann",
                      e.flux,
                      ann_src.ra,
                      ann_src.dec,
                      e,
                      overwrite=True)

    instrument_simulator("ann_simput.fits",
                         "ann_evt.fits",
                         exp_time,
                         "hdxi", [ra0, dec0],
                         ptsrc_bkgnd=False,
                         instr_bkgnd=False,
                         foreground=False,
                         prng=prng)

    inst = get_instrument_from_registry("hdxi")
    arf = AuxiliaryResponseFile(inst["arf"])
    cspec = ConvolvedSpectrum(spec, arf)
    ph_flux = cspec.get_flux_in_band(0.5, 7.0)[0].value
    S0 = ph_flux / (np.pi * (r_out**2 - r_in**2))

    write_radial_profile("ann_evt.fits",
                         "ann_evt_profile.fits", [ra0, dec0],
                         1.1 * r_in,
                         0.9 * r_out,
                         100,
                         ctr_type="celestial",
                         emin=0.5,
                         emax=7.0,
                         overwrite=True)

    load_data(1, "ann_evt_profile.fits", 3, ["RMID", "SUR_BRI", "SUR_BRI_ERR"])
    set_stat("chi2")
    set_method("levmar")
    set_source("const1d.src")
    src.c0 = 0.8 * S0

    fit()
    set_covar_opt("sigma", 1.645)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0] - S0) < res.parmaxes[0]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Esempio n. 21
0
def test_point_source():

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    nH_sim = 0.02
    norm_sim = 1.0e-4
    alpha_sim = 0.95
    redshift = 0.02

    exp_time = (100., "ks")
    area = (3000., "cm**2")

    wcs = create_dummy_wcs()

    ebins = np.linspace(0.1, 11.5, 2001)
    emid = 0.5*(ebins[1:]+ebins[:-1])
    spec = norm_sim*(emid*(1.0+redshift))**(-alpha_sim)
    de = np.diff(ebins)[0]

    abs_model = TBabsModel(nH_sim)

    events = EventList.create_empty_list(exp_time, area, wcs)

    positions = [(30.01, 45.0)]

    new_events = events.add_point_sources(positions, ebins, spec, prng=prng,
                                          absorb_model=abs_model)

    new_events = ACIS_S(new_events, prng=prng)

    scalex = float(np.std(new_events['xpix'])*sigma_to_fwhm*new_events.parameters["dtheta"])
    scaley = float(np.std(new_events['ypix'])*sigma_to_fwhm*new_events.parameters["dtheta"])

    psf_scale = ACIS_S.psf_scale

    assert (scalex - psf_scale)/psf_scale < 0.01
    assert (scaley - psf_scale)/psf_scale < 0.01

    new_events.write_spectrum("point_source_evt.pi", clobber=True)

    os.system("cp %s ." % new_events.parameters["ARF"])
    os.system("cp %s ." % new_events.parameters["RMF"])

    load_user_model(mymodel, "tplaw")
    add_user_pars("tplaw", ["nH", "norm", "redshift", "alpha"],
                  [0.01, norm_sim*0.8, redshift, 0.9],
                  parmins=[0.0, 0.0, 0.0, 0.1],
                  parmaxs=[10.0, 1.0e9, 10.0, 10.0],
                  parfrozen=[False, False, True, False])

    load_pha("point_source_evt.pi")
    set_stat("cstat")
    set_method("simplex")
    ignore(":0.5, 9.0:")
    set_model("tplaw")
    fit()
    set_covar_opt("sigma", 1.6)
    covar()
    res = get_covar_results()

    assert np.abs(res.parvals[0]-nH_sim) < res.parmaxes[0]
    assert np.abs(res.parvals[1]-norm_sim/de) < res.parmaxes[1]
    assert np.abs(res.parvals[2]-alpha_sim) < res.parmaxes[2]

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
Esempio n. 22
0
import sherpa.astro.ui as ui
from spec import load_data, report_results

# Load data
x, y, y_err = load_data()
ui.load_arrays(1, x, y, y_err)

# Set up the model
ui.set_model(ui.powlaw1d.pl)
pl.gamma, pl.ampl = 2, 1e-12

# Perform fit
ui.fit()  # Compute best-fit parameters
ui.covar()  # Compute covariance matrix (i.e. errors)
# ui.conf() # Compute profile errors
# ui.show_all() # Print a very nice summary of your session to less

# Report results
fr = ui.get_fit_results()
cr = ui.get_covar_results()

package = "sherpa"
gamma, norm = fr.parvals
chi2 = fr.statval
gamma_err, norm_err = cr.parmaxes
cov = cr.extra_output[1, 0]
corr = cov / (norm_err * gamma_err)
report_results(package, norm, norm_err, gamma, gamma_err, chi2, cov, corr)