def spec_extract(observation, region_file, region_num, min_counts): infile = "{data_clean}[sky=region({region_file})]".format( data_clean=observation.cluster.acisI_clean_obs(observation.id), region_file=region_file ) outroot = "{outdir}/acisI_region_{obsid}_{region_num}".format( outdir=observation.cluster.super_comp_dir, region_num=region_num, obsid=observation.id ) print("Starting specextract on {}".format(infile)) rt.specextract(infile=infile, outroot=outroot, weight='yes', correct='no', asp='@{}'.format(observation.pcad_asol), combine='no', mskfile=observation.acis_mask_sc, # bkgfile=observation.back, bkgresp="no", badpixfile=observation.bad_pixel_file, binspec=1, clobber=True ) io.append_to_file(observation.cluster.spec_lis(region_num), "{}.pi\n".format(outroot))
def extract_multi(info): info = info.split('_') obs = info[2] reg = info[0] + '_' + info[1] + '.reg' root = info[0] + '_' + info[1] + '_' + info[2] evt2_file = '_' + info[3] + '_' + info[4] wght = info[5] reg_total = len(glob.glob('xaf_*.reg')) reg_done = len(glob.glob('xaf_*_%s.pi') % str(obs)) print('Completed %s/%s regions' % (reg_done, reg_total)) if os.path.isfile(root + '.pi') is False: evt2_filter = '../reprojected_data/' + obs + evt2_file + '[sky=region(%s)]' % reg # evt file for obsid with xaf.reg filter bkg_reg = '../reprojected_data/background.reg' bkg_filter = '../reprojected_data/' + obs + evt2_file + '[sky=region(%s)]' % bkg_reg # using the reproj_evt file with the background region asp_file = '../reprojected_data/' + obs + '.asol' bpix_file = '../reprojected_data/' + obs + '.bpix' msk_file = '../reprojected_data/' + obs + '.mask' os.system('punlearn specextract') # Extract spectra specextract(infile=evt2_filter, outroot=root, bkgfile=bkg_filter, asp=asp_file, badpixfile=bpix_file, mskfile=msk_file, weight=wght) else: print('File %s.pi found' % root)
def getSpectrum(evt2, srcReg, nametag, rtag, hkfiles, obsID, dataPath, bkgReg=''): rt.specextract.punlearn() [asol1, asol1lis, msk1, bpix] = hkfiles inEvtFile = evt2 + "[sky=region(" + srcReg + ")]" root = dataPath + str(obsID) + "_" + nametag + "_" + rtag if bkgReg == '': bkg = '' else: bkg = evt2 + "[sky=region(" + bkgReg + ")]" rt.specextract.punlearn() #print( inEvtFile) #print( asol1) #print( root) rt.specextract(infile=inEvtFile, outroot=root, bkgfile=bkg, asp=asol1, mskfile=msk1, badpixfile=bpix, weight='no', correctpsf='yes', clobber='yes')
def extract_spec(obsid_list, regions, evt2_file, wght): for reg in regions: for obs in obsid_list: root = reg[:-4] + '_' + obs # Check if spectra has been extracted for the region and obsid # if os.path.isfile(root + '.pi') is False: print( 'Extracting spectra for obsid %s/%s: Completed %s/%s regions...' % (obsid_list.index(obs) + 1, len(obsid_list), regions.index(reg) + 1, len(regions))) evt2 = glob.glob('../reprojected_data/' + str(obs) + evt2_file) # evt file for obsid evt2_filter = evt2[ 0] + '[sky=region(%s)]' % reg # add xaf.reg filter bkg_reg = glob.glob('../reprojected_data/background.reg') bkg_filter = evt2[0] + '[sky=region(%s)]' % bkg_reg[ 0] # using the reproj_evt file with the background region asp_file = glob.glob('../reprojected_data/' + str(obs) + '.asol') asp_file = asp_file[0] # Turn into string from glob list bpix_file = glob.glob('../reprojected_data/' + str(obs) + '.bpix') bpix_file = bpix_file[0] msk_file = glob.glob('../reprojected_data/' + str(obs) + '.mask') msk_file = msk_file[0] os.system('punlearn specextract') # Reset specextract # Extract spectra specextract(infile=evt2_filter, outroot=root, bkgfile=bkg_filter, asp=asp_file, badpixfile=bpix_file, mskfile=msk_file, weight=wght) else: print( 'Already extracted spectra for %s/%s obsids in %s/%s regions' % (obsid_list.index(obs) + 1, len(obsid_list), regions.index(reg) + 1, len(regions)))
def extract_spec(obsid_list, regions, evt2_file, wght): for reg in regions: for obs in obsid_list: root = reg[:-4] + '_' + obs # Check if spectra has been extracted for the region and obsid # if os.path.isfile(root + '.pi') is False: print('Extracting spectra for obsid %s/%s: Completed %s/%s regions...' % (obsid_list.index(obs)+1, len(obsid_list), regions.index(reg)+1, len(regions))) evt2 = glob.glob('../reprojected_data/' + str(obs) + evt2_file) # evt file for obsid evt2_filter = evt2[0] + '[sky=region(%s)]' % reg # add xaf.reg filter bkg_reg = glob.glob('../reprojected_data/background.reg') bkg_filter = evt2[0] + '[sky=region(%s)]' % bkg_reg[0] # using the reproj_evt file with the background region asp_file = glob.glob('../reprojected_data/' + str(obs) + '.asol') asp_file = asp_file[0] # Turn into string from glob list bpix_file = glob.glob('../reprojected_data/' + str(obs) + '.bpix') bpix_file = bpix_file[0] msk_file = glob.glob('../reprojected_data/' + str(obs) + '.mask') msk_file = msk_file[0] os.system('punlearn specextract') # Reset specextract # Extract spectra specextract(infile=evt2_filter, outroot=root, bkgfile=bkg_filter, asp=asp_file, badpixfile=bpix_file, mskfile=msk_file, weight=wght) else: print('Already extracted spectra for %s/%s obsids in %s/%s regions' % (obsid_list.index(obs)+1, len(obsid_list), regions.index(reg)+1, len(regions)))
def create_global_response_file_for(cluster, obsid, region_file): observation = cluster.observation(obsid) #min_counts = 525 obs_analysis_dir = observation.analysis_directory global_response_dir = "{}/globalresponse/".format(obs_analysis_dir) io.make_directory(global_response_dir) clean = observation.clean back = observation.back pbk0 = io.get_filename_matching("{}/acis*pbk0*.fits".format(obs_analysis_dir))[0] bad_pixel_file = io.get_filename_matching("{}/bpix1_new.fits".format(obs_analysis_dir))[0] rt.ardlib.punlearn() rt.acis_set_ardlib(badpixfile=bad_pixel_file) mask_file = io.get_filename_matching("{}/*msk1.fits".format(obs_analysis_dir)) make_pcad_lis(cluster, obsid) infile = "{}[sky=region({})]".format(clean, region_file) outroot = "{}/acisI_region_0".format(global_response_dir) weight = True correct_psf = False pcad = "@{}/pcad_asol1.lis".format(obs_analysis_dir) combine = False bkg_file = "" bkg_resp = False group_type = "NUM_CTS" binspec = 1 clobber = True rt.specextract(infile=infile, outroot=outroot, weight=weight, correctpsf=correct_psf, asp=pcad, combine=combine, mskfile=mask_file, bkgfile=bkg_file, bkgresp=bkg_resp, badpixfile=bad_pixel_file, grouptype=group_type, binspec=binspec, clobber=clobber) infile = "{}[sky=region({})][bin pi]".format(back, region_file) outfile = "{}/acisI_back_region_0.pi".format(global_response_dir) clobber = True rt.dmextract.punlearn() print("Running: dmextract infile={}, outfile={}, clobber={}".format(infile, outfile, clobber)) rt.dmextract(infile=infile, outfile=outfile, clobber=clobber) rt.dmhedit.punlearn() infile = "{}/acisI_region_0.pi".format(global_response_dir) filelist = "" operation = "add" key = "BACKFILE" value = outfile rt.dmhedit(infile=infile, filelist=filelist, operation=operation, key=key, value=value) observation = cluster.observation(obsid) aux_response_file = '{global_response_directory}/acisI_region_0.arf'.format( global_response_directory=observation.global_response_directory) redist_matrix_file = '{global_response_directory}/acisI_region_0.rmf'.format( global_response_directory=observation.global_response_directory) io.copy(aux_response_file, observation.aux_response_file) io.copy(redist_matrix_file, observation.redistribution_matrix_file)
def extract_and_fit_spectra(evt_file: str, img_file: str, reg_file: str, bkg_file: str, extract_spectra: bool = True, fit_spectra: bool = True, **kwargs) -> Optional[FluxObj]: """ Extract the spectrum from a specified region and fit it :param evt_file: Path to the events file :param img_file: Path to the image file :param reg_file: Path to the region file that will be used to extract the spectrum :param bkg_file: Path to the background region file :param extract_spectra: Set this to True to extract the spectrum :param fit_spectra: Set this to fit and save the model spectrum ... :return: If the number of counts inside is greater than 40, an absorbed powerlaw will be fit. If not, a representative flux and energy will be chosen. The flux can be interactively scaled run time :rtype: Tuple[float,float] or None """ dmstat.punlearn() dmstat(f"{img_file}[sky=region({reg_file})]", centroid=False) use_flux = False photon_flux = None monoenergy = None if float(dmstat.out_sum) < 40: print( f"The counts in the core {dmstat.out_sum} are too small for spectral fitting" ) print(f"Flux at a monoenergy will be used for the PSF simulation") use_flux = True # extract the spectrum if extract_spectra: specextract.punlearn() specextract( infile="{0}[sky=region({1})]".format(evt_file, reg_file), outroot="core_spectrum", bkgfile="{0}[sky=region({1})]".format(evt_file, bkg_file), clobber=True, ) if fit_spectra and not use_flux: SpecUtils.prepare_spectra(**kwargs) return None # get the ra dec from sky coords ra_dec = CoordUtils.get_centroid(evt_file, reg_file) if use_flux: # calculate the monoenergy # Using case 2 in https://cxc.cfa.harvard.edu/ciao/why/monochromatic_energy.html dmtcalc.punlearn() dmtcalc( "core_spectrum.arf", "arf_weights", "mid_energy=(energ_lo+energ_hi)/2.0;weights=(mid_energy*specresp)", clobber=True, ) dmstat.punlearn() dmstat("arf_weights[mid_energy=2.0:7.0][cols weights,specresp]", verbose="0") out_sum = dmstat.out_sum out_sum = [float(val) for val in out_sum.split(",")] monoenergy = out_sum[0] / out_sum[1] srcflux.punlearn() srcflux( evt_file, ",".join(ra_dec), "flux", bands=f"0.5:7.0:{monoenergy}", psfmethod="quick", verbose="0", clobber=True, ) dmkeypar.punlearn() dmkeypar("flux_0.5-7.0.flux", "net_photflux_aper") photon_flux = dmkeypar.rval dmkeypar.punlearn() dmkeypar("flux_0.5-7.0.flux", "net_rate") print(f"Net count rate: {dmkeypar.rval}") print( f"Monoenergy: {monoenergy} keV, photon flux: {photon_flux} photons/s/cm^2" ) scale_fac: Union[str, float] = input("Scaling for photon flux (1): ") if scale_fac == "": scale_fac = 1 photon_flux *= float(scale_fac) return FluxObj(photon_flux, monoenergy) return None