def get_saotrace_keywords(evt_file: str, keys: Union[List, Tuple, np.array, None] = None, **kwargs) -> Dict: """ Get keywords from the events file that are required for simulating a PSF using SAOTrace :param evt_file: Path to the events file :param keys: The keys for which values need to be extracted. If None, ``"ROLL_PNT", "RA_PNT", "DEC_PNT", "ASOLFILE", "EXPOSURE"`` will be used ... :return: The dictionary containing key-value pairs :rtype: Dict """ if keys is None: keys = ["ROLL_PNT", "RA_PNT", "DEC_PNT", "ASOLFILE", "EXPOSURE"] out = dict() for key in keys: dmkeypar.punlearn() dmkeypar(evt_file, key=key) out[key] = dmkeypar.value dmlist.punlearn() data = dmlist(f"{evt_file}[GTI7][#row=1]", "data,raw") out["TSTART"] = data.splitlines()[1].split()[0] return out
def ccd_id_finder(obsid_list, ccd_id): for obs in obsid_list: if 1000 <= int(obs) < 10000: # Note that obsID's with #### have a (0) in front of the _repro ccd = dmkeypar("%s/repro/acisf0%s_repro_evt2.fits" % (obs, obs), keyword='CCD_ID', echo='True') # Get value of keyword 'CCD_ID' elif 0 < int(obs) < 1000: ccd = dmkeypar("%s/repro/acisf00%s_repro_evt2.fits" % (obs, obs), keyword='CCD_ID', echo='True') else: ccd = dmkeypar("%s/repro/acisf%s_repro_evt2.fits" % (obs, obs), keyword='CCD_ID', echo='True') ID = int(ccd) # Make the number an integer instead of a ciao.string ccd_id.append(ID) # stick to list return ccd_id
def ccd_sort(cluster): print("Running ccd_sort on {}.".format(cluster.name)) for observation in cluster.observations: print("Working on {}/{}".format(cluster.name, observation.id)) analysis_path = observation.analysis_directory os.chdir(analysis_path) evt1_filename = io.get_path("{}/{}".format(analysis_path, io.get_filename_matching("acis*evt1.fits")[0])) evt2_filename = io.get_path("{}/{}".format(analysis_path, io.get_filename_matching("evt2.fits")[0])) detname = rt.dmkeypar(infile=evt1_filename, keyword="DETNAM", echo=True) print("evt1 : {}\nevt2 : {}\ndetname : {}".format(evt1_filename, evt2_filename, detname)) assert not isinstance(detname, type(None)), "detname returned nothing!" detnums = [int(x) for x in detname.split('-')[-1]] for acis_id in detnums: print("{cluster}/{observation}: Making level 2 event file for ACIS Chip id: {acis_id}".format( cluster=cluster.name, observation=observation.id, acis_id=acis_id)) rt.dmcopy(infile="{evt2_file}[ccd_id={acis_id}]".format(evt2_file=evt2_filename, acis_id=acis_id), outfile="acis_ccd{acis_id}.fits".format(acis_id=acis_id), clobber=True) acisI_list = io.get_filename_matching("acis_ccd[0-3].fits") for i in range(len(acisI_list)): acisI_list[i] = io.get_path("{obs_analysis_dir}/{file}".format(obs_analysis_dir=observation.analysis_directory, file=acisI_list[i])) io.write_contents_to_file("\n".join(acisI_list), observation.ccd_merge_list, binary=False) merge_data_and_backgrounds(cluster, acisI_list) return
def get_keyword_value(filename, keyword): value = rt.dmkeypar(infile=filename, keyword=keyword, echo=True) # print("{infile}['{keyword}'] = {value}".format( # infile=filename, # keyword=keyword, # value=value # )) return value
def ccd_id_finder(obsid_list, ccd_id): for obs in obsid_list: if 1000 <= int( obs ) < 10000: # Note that obsID's with #### have a (0) in front of the _repro ccd = dmkeypar("%s/repro/acisf0%s_repro_evt2.fits" % (obs, obs), keyword='CCD_ID', echo='True') # Get value of keyword 'CCD_ID' elif 0 < int(obs) < 1000: ccd = dmkeypar("%s/repro/acisf00%s_repro_evt2.fits" % (obs, obs), keyword='CCD_ID', echo='True') else: ccd = dmkeypar("%s/repro/acisf%s_repro_evt2.fits" % (obs, obs), keyword='CCD_ID', echo='True') ID = int(ccd) # Make the number an integer instead of a ciao.string ccd_id.append(ID) # stick to list return ccd_id
def merge_data_and_backgrounds(cluster, acis_list): rt.dmmerge.punlearn() merged_file = "acisI.fits" rt.dmmerge(infile="@acisI.lis[subspace -expno]", outfile=merged_file, clobber=True) detname = rt.dmkeypar(infile=io.get_filename_matching("acis*evt1.fits"), keyword="DETNAM") # acisI3 = detname.find("3") # acisS3 = detname.find("7") rt.dmlist.punlearn() rt.dmlist(infile=merged_file, opt="header") return None
if args.evt_in == 'default': evt_infile = hg.fetch_file(pwd,pat='evt',prompt='yes') else: evt_infile = args.evt_in print "\nInput File: "+evt_infile#+'\n' #----Observation Information---- #obsid, target name, pointing, observation mode, read mode, # data mode, detector names #--get obsid, object name, dates, and modes-- obsid = chandra.get_obsid(args.evt_in) objectname = chandra.get_objectname(args.evt_in) start_date = crt.dmkeypar(args.evt_in,"DATE-OBS","echo+") obs_pointing = chandra.get_pointing(args.evt_in) obs_mode = crt.dmkeypar(args.evt_in,"OBS_MODE","echo+") data_mode = crt.dmkeypar(args.evt_in,"DATAMODE","echo+") det_name = crt.dmkeypar(args.evt_in,"DETNAM","echo+") common_name = '' if det_name == 'ACIS-0123': common_name = '(ACIS-I)' if det_name == 'ACIS-456789': common_name = '(ACIS-S)' if det_name[:4]=='ACIS': read_mode = crt.dmkeypar(args.evt_in,"READMODE","echo+")
#--Open Output Text File-- if os.path.isfile(args.outfile) and args.clobber == 'no': print args.outfile+" exists and clobber=no. Quitting." else: outfile = args.outfile countfile = open(outfile,'w') #--------------------------------------- # Loop Over Observations #--------------------------------------- for s in range(len(spectra)): # counts = crt.dmlist(args.specdir+spectra[s],"TOTCTS") counts = crt.dmkeypar(args.specdir+spectra[s],"TOTCTS","echo") obsid = crt.dmkeypar(args.specdir+spectra[s],"OBS_ID","echo") print obsid, counts countfile.write(obsid+'\t'+counts+'\n') #--close file-- countfile.close() #--------------------------------------- # Print out final status #---------------------------------------
if args.evt_in == 'default': evt_infile = hg.fetch_file(pwd, pat='evt', prompt='yes') else: evt_infile = args.evt_in print "\nInput File: " + evt_infile #+'\n' #----Observation Information---- #obsid, target name, pointing, observation mode, read mode, # data mode, detector names #--get obsid, object name, dates, and modes-- obsid = chandra.get_obsid(args.evt_in) objectname = chandra.get_objectname(args.evt_in) start_date = crt.dmkeypar(args.evt_in, "DATE-OBS", "echo+") obs_pointing = chandra.get_pointing(args.evt_in) obs_mode = crt.dmkeypar(args.evt_in, "OBS_MODE", "echo+") data_mode = crt.dmkeypar(args.evt_in, "DATAMODE", "echo+") det_name = crt.dmkeypar(args.evt_in, "DETNAM", "echo+") common_name = '' if det_name == 'ACIS-0123': common_name = '(ACIS-I)' if det_name == 'ACIS-456789': common_name = '(ACIS-S)' if det_name[:4] == 'ACIS': read_mode = crt.dmkeypar(args.evt_in, "READMODE", "echo+")
def lightcurves_with_exclusion(cluster): for observation in cluster.observations: # data_nosrc_hiEfilter = "{}/acisI_nosrc_fullE.fits".format(obs_analysis_dir) data_nosrc_hiEfilter = "{}/acisI_nosrc_hiEfilter.fits".format(observation.analysis_directory) print("Creating the image with sources removed") data = observation.acis_nosrc_filename image_nosrc = "{}/img_acisI_nosrc_fullE.fits".format(observation.analysis_directory) if io.file_exists(observation.exclude_file): print("Removing sources from event file to be used in lightcurve") infile = "{}[exclude sky=region({})]".format(data_nosrc_hiEfilter, observation.exclude) outfile = "{}/acisI_lcurve.fits".format(observation.analysis_directory) clobber = True rt.dmcopy.punlearn() rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber) data_lcurve = "{}/acisI_lcurve.fits".format(observation.analysis_directory) else: yes_or_no = io.check_yes_no( "Are there sources to be excluded from observation {} while making the lightcurve? ".format(observation.id)) if yes_or_no: # yes_or_no == True print("Create the a region file with the region to be excluded and save it as {}".format(observation.exclude_file)) else: data_lcurve = data_nosrc_hiEfilter backbin = 259.28 echo = True tstart = rt.dmkeypar(infile=data_nosrc_hiEfilter, keyword="TSTART", echo=echo) tstop = rt.dmkeypar(infile=data_nosrc_hiEfilter, keyword="TSTOP", echo=echo) print("Creating lightcurve from the events list with dmextract") infile = "{}[bin time={}:{}:{}]".format(data_lcurve, tstart, tstop, backbin) outfile = "{}/acisI_lcurve.lc".format(observation.analysis_directory) opt = "ltc1" rt.dmextract.punlearn() rt.dmextract(infile=infile, outfile=outfile, opt=opt, clobber=clobber) lcurve = outfile print("Cleaning the lightcurve by removing flares with deflare. Press enter to continue.") rt.deflare.punlearn() infile = lcurve outfile = "{}/acisI_gti.gti".format(observation.analysis_directory) method = "clean" save = "{}/acisI_lcurve".format(observation.analysis_directory) rt.deflare(infile=infile, outfile=outfile, method=method, save=save) gti = outfile print("filtering the event list using GTI info just obtained.") infile = "{}[@{}]".format(data_nosrc_hiEfilter, gti) outfile = observation.clean clobber = True rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber) data_clean = outfile print("Don't forget to check the light curves!")
def generate_light_curve(observation): # filter out high energy background flares obsid_analysis_dir = observation.analysis_directory data = observation.acis_nosrc_filename background = observation.background_nosrc_filename infile = "{}[energy=9000:12000]".format(data) outfile = "{}/acisI_hiE.fits".format(obsid_analysis_dir) clobber = True rt.dmcopy.punlearn() rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber) data_hiE = outfile infile = "{}[bin sky=8]".format(data_hiE) outfile = "{}/img_acisI_hiE.fits".format(obsid_analysis_dir) rt.dmcopy.punlearn() rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber) backbin = 259.28 echo = True tstart = rt.dmkeypar(infile=data_hiE, keyword="TSTART", echo=echo) tstop = rt.dmkeypar(infile=data_hiE, keyword="TSTOP", echo=echo) print("Creating a lightcurve from the high energy events list with dmextract") rt.dmextract.punlearn() infile = "{}[bin time={}:{}:{}]".format(data_hiE, tstart, tstop, backbin) outfile = "{}/acisI_lcurve_hiE.lc".format(obsid_analysis_dir) print('Running dmextract infile={} outfile={} opt=ltc1 clobber=True'.format(infile, outfile)) rt.dmextract(infile=infile, outfile=outfile, opt='ltc1', clobber=True) lcurve_hiE = outfile print("cleaning the lightcurve for {}, press enter to continue.".format(observation.id)) rt.deflare.punlearn() outfile = "{}/acisI_gti_hiE.gti".format(obsid_analysis_dir) method = "clean" save = "{}/acisI_lcurve_hiE".format(obsid_analysis_dir) rt.deflare(infile=lcurve_hiE, outfile=outfile, method=method, save=save) gti_hiE = outfile print("Filtering the event list using GTI info from high energy flares.") infile = "{}[@{}]".format(data, gti_hiE) outfile = "{}/acisI_nosrc_hiEfilter.fits".format(obsid_analysis_dir) print("running: dmcopy infile={} outfile={} clobber={}".format(infile, outfile, clobber)) rt.dmcopy.punlearn() rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber) data_nosrc_hiEfilter = outfile infile = "{}[bin sky=8]".format(data_nosrc_hiEfilter) outfile = "{}/img_acisI_nosrc_fullE.fits".format(obsid_analysis_dir) rt.dmcopy.punlearn() rt.dmcopy(infile=infile, outfile=outfile, clobber=clobber)
def ciao_back(cluster, overwrite=False): print("Running ciao_back on {}.".format(cluster.name)) for observation in cluster.observations: pcad_file = make_pcad_lis(cluster, observation.id) backI_lis = [] backS_lis = [] analysis_path = observation.analysis_directory filelist = io.read_contents_of_file(observation.ccd_merge_list).split('\n') pcad = io.read_contents_of_file(pcad_file) for acis_file in filelist: rt.acis_bkgrnd_lookup.punlearn() print("Finding background for {}".format(acis_file)) path_to_background = rt.acis_bkgrnd_lookup(infile=acis_file) print("Found background at {}".format(path_to_background)) acis_id = int(acis_file.split('/')[-1].split('.')[-2][-1]) assert isinstance(acis_id, int), "acis_id = {}".format(acis_id) assert not isinstance(path_to_background, type(None)), "Cannot find background {}".format(acis_file) local_background_path = io.get_path("{}/back_ccd{}.fits".format(analysis_path, acis_id)) try: if io.file_exists(local_background_path) and overwrite: io.delete(local_background_path) io.copy(path_to_background, local_background_path) except OSError: print("Problem copying background file {}. Do you have the right permissions and a full CALDB?".format( path_to_background)) raise acis_gain = rt.dmkeypar(infile=acis_file, keyword="GAINFILE", echo=True) background_gain = rt.dmkeypar(infile=local_background_path, keyword="GAINFILE", echo=True) print("{}/{}/acis_ccd{}.fits gain: {}".format(cluster.name, observation.id, acis_id, acis_gain)) print("{}/{}/back_ccd{}.fits gain: {}".format(cluster.name, observation.id, acis_id, background_gain)) if dates_and_versions_match(acis_gain, background_gain): print("Date/version numbers don't match on the acis data and background. Reprocessing.") local_background_path = reprocess(cluster, observation.id, acis_gain, background_gain, acis_id) print("Reprojecting background") rt.reproject_events.punlearn() infile = local_background_path outfile = io.get_path("{local_path}/back_reproj_ccd{acis_id}.fits".format(local_path=analysis_path, acis_id=acis_id)) match = acis_file print( "Running:\n reproject_events(infile={infile}, outfile={outfile}, aspect={pcad}, match={match})".format( infile=infile, outfile=outfile, pcad=pcad, match=match) ) rt.reproject_events(infile=infile, outfile=outfile, aspect="{pcad_file}".format(pcad_file=pcad), match=match, random=0, clobber=True) back_reproject = outfile datamode = rt.dmkeypar(infile=io.get_filename_matching(io.get_path("{}/acis*evt1*.fits".format(analysis_path))), keyword="DATAMODE") if datamode == "VFAINT": print("VFAINT Mode, resetting setting status bits") rt.dmcopy.punlearn() rt.dmcopy(infile="{}[status=0]".format(back_reproject), outfile=outfile, clobber=True) if acis_id <= 3: backI_lis.append(back_reproject) else: backS_lis.append(back_reproject) merged_back_list = backI_lis + backS_lis print("writing backI.lis and backS.lis") io.write_contents_to_file("\n".join(backI_lis), io.get_path("{}/backI.lis".format(analysis_path)), binary=False) io.write_contents_to_file("\n".join(backS_lis), io.get_path("{}/backS.lis".format(analysis_path)), binary=False) io.write_contents_to_file("\n".join(merged_back_list), observation.merged_back_lis, binary=False) return
def extract_and_fit_spectra(evt_file: str, img_file: str, reg_file: str, bkg_file: str, extract_spectra: bool = True, fit_spectra: bool = True, **kwargs) -> Optional[FluxObj]: """ Extract the spectrum from a specified region and fit it :param evt_file: Path to the events file :param img_file: Path to the image file :param reg_file: Path to the region file that will be used to extract the spectrum :param bkg_file: Path to the background region file :param extract_spectra: Set this to True to extract the spectrum :param fit_spectra: Set this to fit and save the model spectrum ... :return: If the number of counts inside is greater than 40, an absorbed powerlaw will be fit. If not, a representative flux and energy will be chosen. The flux can be interactively scaled run time :rtype: Tuple[float,float] or None """ dmstat.punlearn() dmstat(f"{img_file}[sky=region({reg_file})]", centroid=False) use_flux = False photon_flux = None monoenergy = None if float(dmstat.out_sum) < 40: print( f"The counts in the core {dmstat.out_sum} are too small for spectral fitting" ) print(f"Flux at a monoenergy will be used for the PSF simulation") use_flux = True # extract the spectrum if extract_spectra: specextract.punlearn() specextract( infile="{0}[sky=region({1})]".format(evt_file, reg_file), outroot="core_spectrum", bkgfile="{0}[sky=region({1})]".format(evt_file, bkg_file), clobber=True, ) if fit_spectra and not use_flux: SpecUtils.prepare_spectra(**kwargs) return None # get the ra dec from sky coords ra_dec = CoordUtils.get_centroid(evt_file, reg_file) if use_flux: # calculate the monoenergy # Using case 2 in https://cxc.cfa.harvard.edu/ciao/why/monochromatic_energy.html dmtcalc.punlearn() dmtcalc( "core_spectrum.arf", "arf_weights", "mid_energy=(energ_lo+energ_hi)/2.0;weights=(mid_energy*specresp)", clobber=True, ) dmstat.punlearn() dmstat("arf_weights[mid_energy=2.0:7.0][cols weights,specresp]", verbose="0") out_sum = dmstat.out_sum out_sum = [float(val) for val in out_sum.split(",")] monoenergy = out_sum[0] / out_sum[1] srcflux.punlearn() srcflux( evt_file, ",".join(ra_dec), "flux", bands=f"0.5:7.0:{monoenergy}", psfmethod="quick", verbose="0", clobber=True, ) dmkeypar.punlearn() dmkeypar("flux_0.5-7.0.flux", "net_photflux_aper") photon_flux = dmkeypar.rval dmkeypar.punlearn() dmkeypar("flux_0.5-7.0.flux", "net_rate") print(f"Net count rate: {dmkeypar.rval}") print( f"Monoenergy: {monoenergy} keV, photon flux: {photon_flux} photons/s/cm^2" ) scale_fac: Union[str, float] = input("Scaling for photon flux (1): ") if scale_fac == "": scale_fac = 1 photon_flux *= float(scale_fac) return FluxObj(photon_flux, monoenergy) return None
parser.add_argument('--clobber',help='Overwrite existing files.',default='no') args = parser.parse_args() args.burnin = int(args.burnin) args.niterations = int(args.niterations) #----Set file paths---- #--------------------------------------- # Prep Image Files #--------------------------------------- # get obsids target_obsid = crt.dmkeypar(args.targetimg,"OBS_ID") ref_obsid = crt.dmkeypar(args.refimg,"OBS_ID") # Create normalized images # Target image # read file targetimgfile = fits.open(args.targetimg) targetimg = targetimgfile[0].data targetimgfile.close() # divide image targetimgtotal = np.sum(targetimg) targetimg = targetimg/targetimgtotal # Reference image # read image