def run(configObj=None): # start by interpreting filename and hdrlet inputs filename = parseinput.parseinput(configObj['filename'])[0] hdrlet = parseinput.parseinput(configObj['hdrlet'])[0] if configObj['primary']: # Call function with properly interpreted input parameters # Syntax: apply_headerlet_as_primary(filename, hdrlet, attach=True, # archive=True, force=False, verbose=False) headerlet.apply_headerlet_as_primary(filename, hdrlet, attach=configObj['attach'], archive=configObj['archive'], force=configObj['force'], logging=configObj['logging']) else: wcsname = configObj['wcsname'] if wcsname in ['', ' ', 'INDEF']: wcsname = None wcskey = configObj['wcskey'] if wcskey == '': wcskey = None # Call function with properly interpreted input parameters # apply_headerlet_as_alternate(filename, hdrlet, attach=True, # wcskey=None, wcsname=None, verbose=False) headerlet.apply_headerlet_as_alternate(filename, hdrlet, attach=configObj['attach'], wcsname=wcsname, wcskey=wcskey, logging=configObj['logging'])
def wf3ccd(input, output="", dqicorr="PERFORM", atodcorr="PERFORM",blevcorr="PERFORM", biascorr="PERFORM", flashcorr="PERFORM", verbose=False, quiet=True ): """Run the ``wf3ccd.e`` executable as from the shell.""" call_list = ['wf3ccd.e'] if verbose: call_list += ['-v','-t'] if (dqicorr == "PERFORM"): call_list.append('-dqi') if (atodcorr == "PERFORM"): call_list.append('-atod') if (blevcorr == "PERFORM"): call_list.append('-blev') if (biascorr == "PERFORM"): call_list.append('-bias') if (flashcorr == "PERFORM"): call_list.append('-flash') infiles, dummpy_out= parseinput.parseinput(input) call_list.append(','.join(infiles)) call_list.append(str(output)) subprocess.call(call_list)
def wf32d(input, output="", dqicorr="PERFORM", darkcorr="PERFORM",flatcorr="PERFORM", shadcorr="PERFORM", photcorr="PERFORM", verbose=False, quiet=True ): """ Call the wf32d.e executable """ if verbose: call_list += ['-v','-t'] if debug: call_list.append('-d') if (darkcorr == "PERFORM"): call_list.append('-dark') if (dqicorr == "PERFORM"): call_list.append('-dqi') if (flatcorr == "PERFORM"): call_list.append('-flat') if (shadcorr == "PERFORM"): call_list.append('-shad') if (photcorr == "PERFORM"): call_list.append('-phot') infiles, dummpy_out= parseinput.parseinput(input) call_list.append(','.join(infiles)) call_list.append(str(output)) subprocess.call(call_list)
def wf3cte(input, out=None, parallel=True, verbose=False, log_func=print): """Run the ``wf3cte.e`` executable as from the shell.""" call_list = ['wf3cte.e'] if verbose: call_list.append('-v') if not parallel: call_list.append('-1') infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if out: call_list.append(str(out)) print(call_list) proc = subprocess.Popen( call_list, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, ) if log_func is not None: for line in proc.stdout: log_func(line.decode('utf8')) return_code = proc.wait() if return_code != 0: raise RuntimeError("wf3cte.e exited with code {}".format(return_code))
def acssum(input, output, exec_path='', time_stamps=False, verbose=False, quiet=False): """ Run the acssum.e executable as from the shell. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a Python list of filenames * a partial filename with wildcards ('\*flt.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) output : str Output filename. If `output` is '' and `input` is '\*_asn.fits', `output` will be automatically set to '\*_sfl.fits'. Otherwise, it is an error not to provide a specific `output`. exec_path : str, optional The complete path to ACSSUM executable. If not given, run ACSSUM given by 'acssum.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. """ if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acssum.e'] # Parse input to get list of filenames to process. # acssum.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) call_list.append(output) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') subprocess.call(call_list)
def calwf3(input=None, output=None, printtime=False, save_tmp=False, verbose=False, debug=False, parallel=True, version=False, log_func=print): call_list = ['calwf3.e'] return_code = None if version and input is None: call_list.append('-r') else: if printtime: call_list.append('-t') if save_tmp: call_list.append('-s') if verbose: call_list.append('-v') if debug: call_list.append('-d') if not parallel: call_list.append('-1') infiles, dummy = parseinput.parseinput(input) if len(parseinput.irafglob(input)) == 0: raise IOError("No valid image specified") if len(parseinput.irafglob(input)) > 1: raise IOError("calwf3 can only accept 1 file for" "input at a time: {0}".format(infiles)) for image in infiles: if not os.path.exists(image): raise IOError("Input file not found: {0}".format(image)) call_list.append(input) if output: call_list.append(str(output)) proc = subprocess.Popen( call_list, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, ) if log_func is not None: for line in proc.stdout: log_func(line.decode('utf8')) return_code = proc.wait() ec = error_code(return_code) if ec: if ec is None: print("Unknown return code found!") ec = return_code raise RuntimeError("calwf3.e exited with code {}".format(ec))
def run(input, reference=None, output='shifts.txt', findmode='catalog', **input_dict): """ Control the entire shift finding operation. """ print('Tweakshifts Version ', __version__, '\n') # Start by determining what input files we are to work with... flist, outfile = parseinput.parseinput(input) verbose = False if 'verbose' in input_dict and input_dict['verbose']: print('Computing shifts for :') print(flist) verbose = input_dict['verbose'] # Insure that a valid filename gets setup for reference WCS if reference != None: refname = reference if os.path.exists(refname): err_str = 'Output Reference WCS "' + refname + '" already exists! Please delete.' raise ValueError(err_str) else: refname = 'tweak_wcs.fits' # split remainder of parameters into algorithm specific sets # and use them for depending on what algorithm was selected. if findmode == 'catalog': match_pars = parseInputPars(input_dict, MATCH_PARS) match_pars['input'] = input shift_dict = runCatalog(flist, refname, match_pars) else: # Insure that flist does not include the name of the # reference WCS as well. ref_img = input_dict['crossref'] if flist.count(ref_img) > 0: flist.remove(ref_img) # Refname can not be derived from 'refimage' parameter # as the refimage parameter serves as an output, not an # input, as needed here... WJH 11 May 2005 crosscor_pars = parseInputPars(input_dict, CROSSCORR_PARS) shift_dict = runCrosscorr(flist, ref_img, crosscor_pars) w = wcsutil.WCSObject(ref_img) w.createReferenceWCS(refname) if len(shift_dict['order']) > 1: writeShiftFile(shift_dict, output, refname) if 'updatehdr' in input_dict and input_dict['updatehdr']: update_file_headers(shift_dict, refname, verbose=verbose) else: print('No suitable data for finding shifts.') print('No shift file written out.') if os.path.exists(refname): os.remove(refname)
def readWCS(input, exts=None, extname=None): if isinstance(input, str): if input[0] == '@': # input is an @ file filelist = irafglob.irafglob(input) else: try: filelist, output = parseinput.parseinput(input) except IOError: raise elif isinstance(input, list): if isinstance(input[0], wcsutil.HSTWCS): # a list of HSTWCS objects return input else: filelist = input[:] wcso = [] fomited = [] # figure out which FITS extension(s) to use if exts is None and extname is None: # Assume it's simple FITS and the data is in the primary HDU for f in filelist: try: wcso = wcsutil.HSTWCS(f) except AttributeError: fomited.append(f) continue elif exts is not None and validateExt(exts): exts = [exts] for f in filelist: try: wcso.extend([wcsutil.HSTWCS(f, ext=e) for e in exts]) except KeyError: fomited.append(f) continue elif extname is not None: for f in filelist: fobj = fits.open(f) for i in range(len(fobj)): try: ename = fobj[i].header['EXTNAME'] except KeyError: continue if ename.lower() == extname.lower(): wcso.append(wcsutil.HSTWCS(f, ext=i)) else: continue fobj.close() if fomited != []: print("These files were skipped:") for f in fomited: print(f) return wcso
def run(configObj=None): # Interpret primary parameters from configObj instance extname = configObj['extname'] input = configObj['input'] # create dictionary of remaining parameters, deleting extraneous ones # such as those above cdict = configObj.dict() # remove any rules defined for the TEAL interface if "_RULES_" in cdict: del cdict['_RULES_'] del cdict['_task_name_'] del cdict['input'] del cdict['extname'] # parse input input, altfiles = parseinput.parseinput(configObj['input']) # Insure that all input files have a correctly archived # set of OPUS WCS keywords # Legacy files from OTFR, like all WFPC2 data from OTFR, will only # have the OPUS WCS keywords archived using a prefix of 'O' # These keywords need to be converted to the Paper I alternate WCS # standard using a wcskey (suffix) of 'O' # If an alternate WCS with wcskey='O' already exists, this will copy # the values from the old prefix-'O' WCS keywords to insure the correct # OPUS keyword values get archived for use with updatewcs. # for file in input: # Check to insure that there is a valid reference file to be used idctab = fits.getval(file, 'idctab') if not os.path.exists(fileutil.osfn(idctab)): print('No valid distortion reference file ', idctab, ' found in ', file, '!') raise ValueError # Re-define 'cdict' to only have switches for steps supported by that instrument # the set of supported steps are defined by the dictionary # updatewcs.apply_corrections.allowed_corrections # for file in input: # get instrument name from input file instr = fits.getval(file, 'INSTRUME') # make copy of input parameters dict for this file fdict = cdict.copy() # Remove any parameter that is not part of this instrument's allowed corrections for step in allowed_corr_dict: if allowed_corr_dict[ step] not in updatewcs.apply_corrections.allowed_corrections[ instr]: fdict[step] # Call 'updatewcs' on correctly archived file updatewcs.updatewcs(file, **fdict)
def __init__( self, input_file, edit_type=None, hdr_key=None, err_key=None, nref_par=None, force=None, noclean=False, dry_run=1, verbosity=0): """constructor Parameters ----------- input_file : string name of the file or filelist to be processed edit_type : string type of file to update hdr_key : string name of keyword to update in file err_key : string name of keyword for error estimate nref_par : string name of the directory containing the nonlinearity file force : string name of algorithm whose value is to be returned noclean : {'True', 'False'} flag to force use of UNCLEANed 0th read. dry_run : {0,1} [Default: 1] flag to force not writing to header verbosity : {0,1,2} verbosity level (0 for quiet, 1 verbose, 2 very verbose) """ if (edit_type == None): edit_type = tfbutil.edit_type if (hdr_key == None): hdr_key = tfbutil.hdr_key if (err_key == None): err_key = tfbutil.err_key if (force == None): force = tfbutil.force self.input_file = input_file self.edit_type = edit_type self.hdr_key = hdr_key self.err_key = err_key self.nref_par = nref_par self.force = force self.noclean = noclean self.dry_run = dry_run self.verbosity = verbosity self.tfb_version = __version__ self.tfb_run = time.asctime() outlist = parseinput.parseinput(input_file) self.num_files = parseinput.countinputs(input_file)[0] self.outlist0 = outlist[0] if (( dry_run == 0) & (self.verbosity >0)): print(' The dry_run option has been selected so keys will not be written.') if (self.verbosity >1): print(' Temp_from_bias run on ',self.tfb_run, ', version: ', self.tfb_version)
def countInput(input): files = parseinput.parseinput(input) count = len(files[0]) for f in files[0]: if fileutil.isFits(f)[0]: try: ins = fits.getval(f, 'INSTRUME') except: # allow odd fits files; do not stop the count ins = None if ins == 'STIS': count += (stisObsCount(f)-1) return count
def countInput(input): files = parseinput.parseinput(input) count = len(files[0]) for f in files[0]: if fileutil.isFits(f)[0]: try: ins = fits.getval(f, 'INSTRUME') except: # allow odd fits files; do not stop the count ins = None if ins == 'STIS': count += (stisObsCount(f) - 1) return count
def run(configObj=None): flist, oname = parseinput.parseinput(configObj['filename']) if len(flist) == 0: print('=' * 60) print('ERROR:') print(' No valid "filename" parameter value provided!') print(' Please check the working directory and restart this task.') print('=' * 60) return if configObj['hdrname'] in ['', ' ', 'INDEF']: print('=' * 60) print('ERROR:') print(' No valid "hdrname" parameter value provided!') print( ' Please restart this task and provide a value for this parameter.' ) print('=' * 60) return if configObj['output'] in ['', ' ', 'INDEF']: configObj['output'] = None str_kw = [ 'wcsname', 'destim', 'sipname', 'npolfile', 'd2imfile', 'descrip', 'history', 'author', 'output', 'catalog' ] # create dictionary of remaining parameters, deleting extraneous ones # such as those above cdict = configObj.dict() # remove any rules defined for the TEAL interface if "_RULES_" in cdict: del cdict['_RULES_'] del cdict['_task_name_'] del cdict['filename'] del cdict['hdrname'] # Convert blank string input as None for kw in str_kw: if cdict[kw] == '': cdict[kw] = None if cdict['wcskey'].lower() == 'primary': cdict['wcskey'] = ' ' # Call function with properly interpreted input parameters # Syntax: write_headerlet(filename, hdrname, output, sciext='SCI', # wcsname=None, wcskey=None, destim=None, # sipname=None, npolfile=None, d2imfile=None, # author=None, descrip=None, history=None, # attach=True, clobber=False) headerlet.write_headerlet(flist, configObj['hdrname'], **cdict)
def parseMultipleInput(input): if isinstance(input, str): if input[0] == '@': # input is an @ file filelist = irafglob.irafglob(input) else: try: filelist, output = parseinput.parseinput(input) except IOError: raise elif isinstance(input, list): #if isinstance(input[0], HSTWCS): ## a list of HSTWCS objects #return input #else: filelist = input[:] return filelist
def parse_input(input, prodonly=False, sort_wildcards=True): catlist = None if (isinstance(input, list) == False) and \ ('_asn' in input or '_asc' in input) : # Input is an association table # Get the input files oldasndict = asnutil.readASNTable(input, prodonly=prodonly) filelist = [ fileutil.buildRootname(fname) for fname in oldasndict['order'] ] elif (isinstance(input, list) == False) and \ (input[0] == '@') : # input is an @ file f = open(input[1:]) # Read the first line in order to determine whether # catalog files have been specified in a second column... line = f.readline() f.close() # Parse the @-file with irafglob to extract the input filename filelist = irafglob.irafglob(input, atfile=atfile_sci) print(line) # If there are additional columns for catalog files... if len(line.split()) > 1: # ...parse out the names of the catalog files as well catlist, catdict = parse_atfile_cat(input) elif (isinstance(input, list)): # input a python list filelist = [] for fn in input: flist, output = parse_input(fn, prodonly=prodonly) # if wild-cards are given, sort for uniform usage: if fn.find('*') > -1 and sort_wildcards: flist.sort() filelist += flist else: # input is either a string or something unrecognizable, so give it a try: try: filelist, output = parseinput.parseinput(input) # if wild-cards are given, sort for uniform usage: if input.find('*') > -1 and sort_wildcards: filelist.sort() except IOError: raise return filelist, catlist
def run(configObj=None): if configObj['hdrname'] == '' and configObj['hdrext'] is None and \ configObj['distname'] == '': print('=' * 60) print('ERROR:') print(' No valid "hdrname", "hdrext" or "distname" parameter value provided!') print(' Please restart this task and provide a value for one of these parameters.') print('=' * 60) return filename = parseinput.parseinput(configObj['filename'])[0] # Call function with properly interpreted input parameters # Syntax: delete_headerlet(filename, hdrname=None, hdrext=None, distname=None) headerlet.delete_headerlet(filename, hdrname=configObj['hdrname'], hdrext=configObj['hdrext'], distname=configObj['distname'], logging=configObj['logging'])
def parse_input(input, prodonly=False, sort_wildcards=True): catlist = None if (isinstance(input, list) == False) and \ ('_asn' in input or '_asc' in input) : # Input is an association table # Get the input files oldasndict = asnutil.readASNTable(input, prodonly=prodonly) filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']] elif (isinstance(input, list) == False) and \ (input[0] == '@') : # input is an @ file f = open(input[1:]) # Read the first line in order to determine whether # catalog files have been specified in a second column... line = f.readline() f.close() # Parse the @-file with irafglob to extract the input filename filelist = irafglob.irafglob(input, atfile=atfile_sci) print(line) # If there are additional columns for catalog files... if len(line.split()) > 1: # ...parse out the names of the catalog files as well catlist,catdict = parse_atfile_cat(input) elif (isinstance(input, list)): # input a python list filelist = [] for fn in input: flist, output = parse_input(fn, prodonly=prodonly) # if wild-cards are given, sort for uniform usage: if fn.find('*') > -1 and sort_wildcards: flist.sort() filelist += flist else: # input is either a string or something unrecognizable, so give it a try: try: filelist, output = parseinput.parseinput(input) # if wild-cards are given, sort for uniform usage: if input.find('*') > -1 and sort_wildcards: filelist.sort() except IOError: raise return filelist,catlist
def wf3ir(input, output=None, verbose=False, quiet=True, log_func=print): """Call the wf3ir.e executable """ call_list = ['wf3ir.e'] return_code = None if verbose: call_list += ['-v', '-t'] infiles, dummy = parseinput.parseinput(input) if "_asn" in input: raise IOError("wf3ir does not accept association tables") if len(parseinput.irafglob(input)) == 0: raise IOError("No valid image specified") if len(parseinput.irafglob(input)) > 1: raise IOError("wf3ir can only accept 1 file for" "input at a time: {0}".format(infiles)) for image in infiles: if not os.path.exists(image): raise IOError("Input file not found: {0}".format(image)) call_list.append(input) if output: call_list.append(str(output)) proc = subprocess.Popen( call_list, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, ) if log_func is not None: for line in proc.stdout: log_func(line.decode('utf8')) return_code = proc.wait() ec = error_code(return_code) if return_code: if ec is None: print("Unknown return code found!") ec = return_code raise RuntimeError("wf3ir.e exited with code {}".format(ec))
def _process_input_wcs(infiles, wcskey, updatewcs): """ This is a subset of process_input(), for internal use only. This is the portion of input handling which sets/updates WCS data, and is a performance hit - a target for parallelization. Returns the expanded list of filenames. """ # Run parseinput though it's likely already been done in processFilenames outfiles = parseinput.parseinput(infiles)[0] # Disable parallel processing here for now until hardware I/O gets "wider". # Since this part is IO bound, parallelizing doesn't help more than a little # in most cases, and may actually slow this down on some desktop nodes. # cfgval_num_cores = None # get this from paramDict # pool_size = util.get_pool_size(cfgval_num_cores, len(outfiles)) pool_size = 1 # do the WCS updating if wcskey in ['', ' ', 'INDEF', None]: if updatewcs: log.info('Updating input WCS using "updatewcs"') else: log.info('Resetting input WCS to be based on WCS key = %s' % wcskey) if pool_size > 1: log.info('Executing %d parallel workers' % pool_size) subprocs = [] mp_ctx = multiprocessing.get_context('fork') for fname in outfiles: p = mp_ctx.Process( target=_process_input_wcs_single, name='processInput._process_input_wcs()', # for err msgs args=(fname, wcskey, updatewcs)) subprocs.append(p) mputil.launch_and_wait(subprocs, pool_size) # blocks till all done else: log.info('Executing serially') for fname in outfiles: _process_input_wcs_single(fname, wcskey, updatewcs) return outfiles
def __init__(self, filename_list, keyword_list, extension='all', expr="None"): """ set initial parameters, call class functions for preforming selection and formatting to masked astropy table. """ self.filename_list = parseinput.parseinput(filename_list)[0] self.final_key_dict = {} self.keyword_list = keyword_list.split(",") self.final_key_set = set() self.extension = extension self.expr = expr self.table = Table() self.select() self.__dict_to_table()
def _process_input_wcs(infiles, wcskey, updatewcs): """ This is a subset of process_input(), for internal use only. This is the portion of input handling which sets/updates WCS data, and is a performance hit - a target for parallelization. Returns the expanded list of filenames. """ # Run parseinput though it's likely already been done in processFilenames outfiles = parseinput.parseinput(infiles)[0] # Disable parallel processing here for now until hardware I/O gets "wider". # Since this part is IO bound, parallelizing doesn't help more than a little # in most cases, and may actually slow this down on some desktop nodes. # cfgval_num_cores = None # get this from paramDict # pool_size = util.get_pool_size(cfgval_num_cores, len(outfiles)) pool_size = 1 # do the WCS updating if wcskey in ['', ' ', 'INDEF', None]: if updatewcs: log.info('Updating input WCS using "updatewcs"') else: log.info('Resetting input WCS to be based on WCS key = %s' % wcskey) if pool_size > 1: log.info('Executing %d parallel workers' % pool_size) subprocs = [] for fname in outfiles: p = multiprocessing.Process(target=_process_input_wcs_single, name='processInput._process_input_wcs()', # for err msgs args=(fname, wcskey, updatewcs) ) subprocs.append(p) mputil.launch_and_wait(subprocs, pool_size) # blocks till all done else: log.info('Executing serially') for fname in outfiles: _process_input_wcs_single(fname, wcskey, updatewcs) return outfiles
def replace(input, **pars): """ Replace pixels in `input` that have a value of `pixvalue` with a value given by `newvalue`. """ pixvalue = pars.get('pixvalue', np.nan) if pixvalue is None: pixvalue = np.nan # insure that None == np.nan newvalue = pars.get('newvalue', 0.0) ext = pars.get('ext',None) if ext in ['',' ','None',None]: ext = None files = parseinput.parseinput(input)[0] for f in files: fimg = fits.open(f, mode='update') if ext is None: # replace pixels in ALL extensions extn = [i for i in fimg] else: if type(ext) == type([]): extn = [fimg[e] for e in ext] else: extn = [fimg[ext]] for e in extn: if e.data is not None and e.is_image: # ignore empty Primary HDUs print("Converting {}[{},{}] value of {} to {}".format( f,e.name,e.ver,pixvalue,newvalue)) if np.isnan(pixvalue): e.data[np.isnan(e.data)] = newvalue else: e.data[np.where(e.data == pixvalue)] = newvalue fimg.close()
def replace(input, **pars): """ Replace pixels in `input` that have a value of `pixvalue` with a value given by `newvalue`. """ pixvalue = pars.get('pixvalue', np.nan) if pixvalue is None: pixvalue = np.nan # insure that None == np.nan newvalue = pars.get('newvalue', 0.0) ext = pars.get('ext', None) if ext in ['', ' ', 'None', None]: ext = None files = parseinput.parseinput(input)[0] for f in files: fimg = fits.open(f, mode='update') if ext is None: # replace pixels in ALL extensions extn = [i for i in fimg] else: if type(ext) == type([]): extn = [fimg[e] for e in ext] else: extn = [fimg[ext]] for e in extn: if e.data is not None and e.is_image: # ignore empty Primary HDUs print("Converting {}[{},{}] value of {} to {}".format( f, e.name, e.ver, pixvalue, newvalue)) if np.isnan(pixvalue): e.data[np.isnan(e.data)] = newvalue else: e.data[np.where(e.data == pixvalue)] = newvalue fimg.close()
def acssum(input, output, exec_path='', time_stamps=False, verbose=False, quiet=False, exe_args=None): r""" Run the acssum.e executable as from the shell. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a Python list of filenames * a partial filename with wildcards ('\*flt.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) output : str Output filename. If `output` is '' and `input` is '\*_asn.fits', `output` will be automatically set to '\*_sfl.fits'. Otherwise, it is an error not to provide a specific `output`. exec_path : str, optional The complete path to ACSSUM executable. If not given, run ACSSUM given by 'acssum.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ from stsci.tools import parseinput # Optional package dependency if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acssum.e'] # Parse input to get list of filenames to process. # acssum.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) call_list.append(output) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') if exe_args: call_list.extend(exe_args) subprocess.check_call(call_list)
def acscteforwardmodel(input, exec_path='', time_stamps=False, verbose=False, quiet=False, single_core=False, exe_args=None): r""" Run the acscteforwardmodel.e executable as from the shell. Expect input to be ``*_blc_tmp.fits`` or ``*_flc.fits``. Output is automatically named ``*_ctefmod.fits``. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_blc_tmp.fits') * a Python list of filenames * a partial filename with wildcards ('\*blc_tmp.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACSCTE forward model executable. If not given, run ACSCTE given by 'acscteforwardmodel.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. single_core : bool, optional CTE correction in the ACSCTE forward model will by default try to use all available CPUs on your computer. Set this to True to force the use of just one CPU. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ from stsci.tools import parseinput # Optional package dependency if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acscteforwardmodel.e'] # Parse input to get list of filenames to process. # acscte.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') if single_core: call_list.append('-1') if exe_args: call_list.extend(exe_args) subprocess.check_call(call_list) # nosec
def create_astrometric_catalog(inputs, **pars): """Create an astrometric catalog that covers the inputs' field-of-view. Parameters =========== input : str Filenames of images to be aligned to astrometric catalog catalog : str, optional Name of catalog to extract astrometric positions for sources in the input images' field-of-view. Default: GAIADR2. Options available are documented on the catalog web page. output : str, optional Filename to give to the astrometric catalog read in from the master catalog web service. If 'None', no file will be written out. Default: ref_cat.ecsv gaia_only : bool, optional Specify whether or not to only use sources from GAIA in output catalog Default: False existing_wcs : HST.wcs object existing WCS object specified by the user note :: This function will point to astrometric catalog web service defined through the use of the ASTROMETRIC_CATALOG_URL environment variable. Returns ======= ref_table : object Astropy Table object of the catalog """ # interpret input parameters catalog = pars.get("catalog", 'GAIADR2') output = pars.get("output", 'ref_cat.ecsv') gaia_only = pars.get("gaia_only", False) table_format = pars.get("table_format", 'ascii.ecsv') existing_wcs = pars.get("existing_wcs", None) inputs, _ = parseinput.parseinput(inputs) # start by creating a composite field-of-view for all inputs # This default output WCS will have the same plate-scale and orientation # as the first chip in the list, which for WFPC2 data means the PC. # Fortunately, for alignment, this doesn't matter since no resampling of # data will be performed if existing_wcs: outwcs = existing_wcs else: outwcs = build_reference_wcs(inputs) radius = compute_radius(outwcs) ra, dec = outwcs.wcs.crval # perform query for this field-of-view ref_dict = get_catalog(ra, dec, sr=radius, catalog=catalog) colnames = ('ra', 'dec', 'mag', 'objID', 'GaiaID') col_types = ('f8', 'f8', 'f4', 'U25', 'U25') ref_table = Table(names=colnames, dtype=col_types) # rename coordinate columns to be consistent with tweakwcs ref_table.rename_column('ra', 'RA') ref_table.rename_column('dec', 'DEC') # extract just the columns we want... num_sources = 0 for source in ref_dict: if 'GAIAsourceID' in source: g = source['GAIAsourceID'] if gaia_only and g.strip() is '': continue else: g = -1 # indicator for no source ID extracted r = float(source['ra']) d = float(source['dec']) m = -999.9 # float(source['mag']) o = source['objID'] num_sources += 1 ref_table.add_row((r, d, m, o, g)) # Write out table to a file, if specified if output: ref_table.write(output, format=table_format) print("Created catalog '{}' with {} sources".format(output, num_sources)) return ref_table
def updatewcs(input, vacorr=True, tddcorr=True, npolcorr=True, d2imcorr=True, checkfiles=True, verbose=False, use_db=True): """ Updates HST science files with the best available calibration information. This allows users to retrieve from the archive self contained science files which do not require additional reference files. Basic WCS keywords are updated in the process and new keywords (following WCS Paper IV and the SIP convention) as well as new extensions are added to the science files. Examples -------- >>> from stwcs import updatewcs >>> updatewcs.updatewcs(filename) Dependencies `stsci.tools` `astropy.io.fits` `astropy.wcs` `requests` `lxml` Parameters ---------- input : a python list of file names or a string (wild card characters allowed) input files may be in fits, geis or waiver fits format vacorr : boolean If True, vecocity aberration correction will be applied tddcorr : boolean If True, time dependent distortion correction will be applied npolcorr : boolean If True, a Lookup table distortion will be applied d2imcorr : boolean If True, detector to image correction will be applied checkfiles : boolean If True, the format of the input files will be checked, geis and waiver fits files will be converted to MEF format. Default value is True for standalone mode. use_db : boolean If True, attempt to add astrometric solutions from the MAST astrometry database. Default value is True. """ if not verbose: logger.setLevel(100) write_db_log = False else: fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" formatter = logging.Formatter(fmt) log_filename = 'stwcs.log' fh = logging.FileHandler(log_filename, mode='w') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) logger.setLevel(verbose) write_db_log = True args = "vacorr=%s, tddcorr=%s, npolcorr=%s, d2imcorr=%s, checkfiles=%s, \ " % (str(vacorr), str(tddcorr), str(npolcorr), str(d2imcorr), str(checkfiles)) logger.info('\n\tStarting UPDATEWCS: %s', time.asctime()) toclose = True if isinstance(input, fits.HDUList): input = [input] if isinstance(input, list) and isinstance(input[0], fits.HDUList): files = input file_names = [inp.filename() for inp in files] toclose = False else: file_names = parseinput.parseinput(input)[0] files = [] for item in file_names: files.append(fits.open(item, mode='update')) logger.info("\n\tInput files: {}".format(file_names)) logger.info("\n\tInput arguments: %s" % args) if checkfiles: files = checkFiles(files) file_names = [inp.filename() for inp in files] if not files: print('No valid input, quitting ...\n') return if use_db: # Establish any available connection to # an accessible astrometry web-service astrometry = astrometry_utils.AstrometryDB(write_log=write_db_log) for f in files: acorr = apply_corrections.setCorrections(f, vacorr=vacorr, tddcorr=tddcorr, npolcorr=npolcorr, d2imcorr=d2imcorr) if 'MakeWCS' in acorr and newIDCTAB(f): logger.warning("\n\tNew IDCTAB file detected. All current WCSs will be deleted") cleanWCS(f) makecorr(f, acorr) if use_db: # Add any new astrometry solutions available from # an accessible astrometry web-service astrometry.updateObs(f) if toclose: f.close() return file_names
def clean(input, suffix, stat="pmode1", maxiter=15, sigrej=2.0, lower=None, upper=None, binwidth=0.3, mask1=None, mask2=None, dqbits=None, rpt_clean=0, atol=0.01, clobber=False, verbose=True): r"""Remove horizontal stripes from ACS WFC post-SM4 data. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a Python list of filenames * a partial filename with wildcards ('\*flt.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) suffix : str The string to use to add to each input file name to indicate an output product. This string will be appended to the suffix in each input filename to create the new output filename. For example, setting `suffix='csck'` will create '\*_csck.fits' images. stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1') Specifies the statistics to be used for computation of the background in image rows: * 'pmode1' - SEXTRACTOR-like mode estimate based on a modified `Pearson's rule <https://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``2.5*median-1.5*mean``; * 'pmode2' - mode estimate based on `Pearson's rule <https://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``3*median-2*mean``; * 'mean' - the mean of the distribution of the "good" pixels (after clipping, masking, etc.); * 'mode' - the mode of the distribution of the "good" pixels; * 'median' - the median of the distribution of the "good" pixels; * 'midpt' - estimate of the median of the distribution of the "good" pixels based on an algorithm similar to IRAF's ``imagestats`` task (``CDF(midpt)=1/2``). .. note:: The midpoint and mode are computed in two passes through the image. In the first pass the standard deviation of the pixels is calculated and used with the *binwidth* parameter to compute the resolution of the data histogram. The midpoint is estimated by integrating the histogram and computing by interpolation the data value at which exactly half the pixels are below that data value and half are above it. The mode is computed by locating the maximum of the data histogram and fitting the peak by parabolic interpolation. maxiter : int This parameter controls the maximum number of iterations to perform when computing the statistics used to compute the row-by-row corrections. sigrej : float This parameters sets the sigma level for the rejection applied during each iteration of statistics computations for the row-by-row corrections. lower : float, None (Default = None) Lower limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). upper : float, None (Default = None) Upper limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). binwidth : float (Default = 0.1) Histogram's bin width, in sigma units, used to sample the distribution of pixel brightness values in order to compute the background statistics. This parameter is aplicable *only* to *stat* parameter values of `'mode'` or `'midpt'`. clobber : bool Specify whether or not to 'clobber' (delete then replace) previously generated products with the same names. mask1 : str, numpy.ndarray, None, or list of these types Mask images for ``SCI,1``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. mask2 : str, numpy.ndarray, None, or list of these types Mask images for ``SCI,2``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. This is not used for subarrays. dqbits : int, str, None (Default = None) Integer sum of all the DQ bit values from the input image's DQ array that should be considered "good" when building masks for de-striping computations. For example, if pixels in the DQ array can be combinations of 1, 2, 4, and 8 flags and one wants to consider DQ "defects" having flags 2 and 4 as being acceptable for de-striping computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel having values 2,4, or 6 will be considered a good pixel, while a DQ pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged as a "bad" pixel. Alternatively, one can enter a comma- or '+'-separated list of integer bit flags that should be added to obtain the final "good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to setting `dqbits` to 12. | Set `dqbits` to 0 to make *all* non-zero pixels in the DQ mask to be considered "bad" pixels, and the corresponding image pixels not to be used for de-striping computations. | Default value (`None`) will turn off the use of image's DQ array for de-striping computations. | In order to reverse the meaning of the `dqbits` parameter from indicating values of the "good" DQ flags to indicating the "bad" DQ flags, prepend '~' to the string value. For example, in order not to use pixels with DQ flags 4 and 8 for sky computations and to consider as "good" all other pixels (regardless of their DQ flag), set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the same effect with an `int` input value (except for 0), enter -(4+8+1)=-9. Following this convention, a `dqbits` string value of ``'~0'`` would be equivalent to setting ``dqbits=None``. .. note:: DQ masks (if used), *will be* combined with user masks specified in the `mask1` and `mask2` parameters (if any). rpt_clean : int An integer indicating how many *additional* times stripe cleaning should be performed on the input image. Default = 0. atol : float, None The threshold for maximum absolute value of bias stripe correction below which repeated cleanings can stop. When `atol` is `None` cleaning will be repeated `rpt_clean` number of times. Default = 0.01 [e]. verbose : bool Print informational messages. Default = True. """ from stsci.tools import parseinput # Optional package dependency flist = parseinput.parseinput(input)[0] if isinstance(mask1, str): mlist1 = parseinput.parseinput(mask1)[0] elif isinstance(mask1, np.ndarray): mlist1 = [mask1.copy()] elif mask1 is None: mlist1 = [] elif isinstance(mask1, list): mlist1 = [] for m in mask1: if isinstance(m, np.ndarray): mlist1.append(m.copy()) elif isinstance(m, str): mlist1 += parseinput.parseinput(m)[0] else: raise TypeError("'mask1' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'mask1' must be either a str, or a " "numpy.ndarray, or a list of the two type of " "values.") if isinstance(mask2, str): mlist2 = parseinput.parseinput(mask2)[0] elif isinstance(mask2, np.ndarray): mlist2 = [mask2.copy()] elif mask2 is None: mlist2 = [] elif isinstance(mask2, list): mlist2 = [] for m in mask2: if isinstance(m, np.ndarray): mlist2.append(m.copy()) elif isinstance(m, str): mlist2 += parseinput.parseinput(m)[0] else: raise TypeError("'mask2' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'mask2' must be either a str or a " "numpy.ndarray, or a list of the two type of " "values.") n_input = len(flist) n_mask1 = len(mlist1) n_mask2 = len(mlist2) if n_input == 0: raise ValueError("No input file(s) provided or " "the file(s) do not exist") if n_mask1 == 0: mlist1 = [None] * n_input elif n_mask1 != n_input: raise ValueError('Insufficient masks for [SCI,1]') if n_mask2 == 0: mlist2 = [None] * n_input elif n_mask2 != n_input: raise ValueError('Insufficient masks for [SCI,2]') for image, maskfile1, maskfile2 in zip(flist, mlist1, mlist2): # Skip processing pre-SM4 images if (fits.getval(image, 'EXPSTART') <= SM4_MJD): LOG.warning(f'{image} is pre-SM4. Skipping...') continue # Data must be in ELECTRONS if (fits.getval(image, 'BUNIT', ext=1) != 'ELECTRONS'): LOG.warning(f'{image} is not in ELECTRONS. Skipping...') continue # Skip processing CTECORR-ed images if (fits.getval(image, 'PCTECORR') == 'COMPLETE'): LOG.warning(f'{image} already has PCTECORR applied. Skipping...') continue # generate output filename for each input based on specification # of the output suffix output = image.replace('.fits', '_' + suffix + '.fits') LOG.info('Processing ' + image) # verify masks defined (or not) simultaneously: if (fits.getval(image, 'CCDAMP') == 'ABCD' and ((mask1 is not None and mask2 is None) or (mask1 is None and mask2 is not None))): raise ValueError("Both 'mask1' and 'mask2' must be specified " "or not specified together.") maskdata = _read_mask(maskfile1, maskfile2) perform_correction(image, output, stat=stat, maxiter=maxiter, sigrej=sigrej, lower=lower, upper=upper, binwidth=binwidth, mask=maskdata, dqbits=dqbits, rpt_clean=rpt_clean, atol=atol, clobber=clobber, verbose=verbose) LOG.info(output + ' created')
def acscte(input, exec_path='', time_stamps=False, verbose=False, quiet=False, single_core=False): """ Run the acscte.e executable as from the shell. Expect input to be ``*_blv_tmp.fits``. Output is automatically named ``*_blc_tmp.fits``. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_blv_tmp.fits') * a Python list of filenames * a partial filename with wildcards ('\*blv_tmp.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACSCTE executable. If not given, run ACSCTE given by 'acscte.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. single_core : bool, optional CTE correction in ACSCTE will by default try to use all available CPUs on your computer. Set this to True to force the use of just one CPU. """ if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acscte.e'] # Parse input to get list of filenames to process. # acscte.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') if single_core: call_list.append('-1') subprocess.call(call_list)
def acsccd(input, exec_path='', time_stamps=False, verbose=False, quiet=False): """ Run the acsccd.e executable as from the shell. Expect input to be ``*_raw.fits``. Output is automatically named ``*_blv_tmp.fits``. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_raw.fits') * a Python list of filenames * a partial filename with wildcards ('\*raw.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACSCCD executable. If not given, run ACSCCD given by 'acsccd.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. """ if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acsccd.e'] # Parse input to get list of filenames to process. # acsccd.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') #if dqicorr: # call_list.append('-dqi') #if atodcor: # call_list.append('-atod') #if blevcorr: # call_list.append('-blev') #if biascorr: # call_list.append('-bias') subprocess.call(call_list)
def wf3rej(input, output="", crrejtab="", scalense="", initgues="", skysub="", crsigmas="", crradius=0, crthresh=0, badinpdq=0, crmask=False, shadcorr=False, verbose=False): """call the calwf3.e executable""" call_list = ["wf3rej.e"] infiles, dummy_out= parseinput.parseinput(input) call_list.append(','.join(infiles)) call_list.append(str(output)) if verbose: call_list.append("-v") call_list.append("-t") if (shadcorr): call_list.append("-shadcorr") if (crmask): call_list.append("-crmask") if (crrejtab != ""): call_list += ["-table",crrejtab] if (scalense != ""): call_list += ["-scale",str(scalense)] if (initgues != ""): options=["min","med"] if initgues not in options: print("Invalid option for intigues") return ValueError else: call_list += ["-init",str(initgues)] if (skysub != ""): options=["none","mode","median"] if skysub not in options: print(("Invalid skysub option: %s")%(skysub)) print(options) return ValueError else: call_list += ["-sky",str(skysub)] if (crsigmas != ""): call_list += ["-sigmas",str(crsigmas)] if (crradius >= 0.): call_list += ["-radius",str(crradius)] else: print("Invalid crradius specified") return ValueError if (crthresh >= 0.): call_list += ["-thresh",str(crthresh)] else: print("Invalid crthresh specified") return ValueError if (badinpdq >= 0): call_list += ["-pdq",str(badinpdq)] else: print("Invalid DQ value specified") return ValueError subprocess.call(call_list)
def acs2d(input, exec_path='', time_stamps=False, verbose=False, quiet=False, exe_args=None): r""" Run the acs2d.e executable as from the shell. Output is automatically named based on input suffix: +--------------------+----------------+------------------------------+ | INPUT | OUTPUT | EXPECTED DATA | +====================+================+==============================+ | ``*_raw.fits`` | ``*_flt.fits`` | SBC image. | +--------------------+----------------+------------------------------+ | ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. | +--------------------+----------------+------------------------------+ | ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. | +--------------------+----------------+------------------------------+ | ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. | +--------------------+----------------+------------------------------+ | ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. | +--------------------+----------------+------------------------------+ Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_blv_tmp.fits') * a Python list of filenames * a partial filename with wildcards ('\*blv_tmp.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACS2D executable. If not given, run ACS2D given by 'acs2d.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ from stsci.tools import parseinput # Optional package dependency if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acs2d.e'] # Parse input to get list of filenames to process. # acs2d.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') if exe_args: call_list.extend(exe_args) subprocess.check_call(call_list)
def acsrej(input, output, exec_path='', time_stamps=False, verbose=False, shadcorr=False, crrejtab='', crmask=False, scalense=None, initgues='', skysub='', crsigmas='', crradius=None, crthresh=None, badinpdq=None, newbias=False, readnoise_only=False, exe_args=None): r""" Run the acsrej.e executable as from the shell. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a Python list of filenames * a partial filename with wildcards ('\*flt.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) output : str Output filename. exec_path : str, optional The complete path to ACSREJ executable. If not given, run ACSREJ given by 'acsrej.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. shadcorr : bool, optional Perform shutter shading correction. If this is False but SHADCORR is set to PERFORM in the header of the first image, the correction will be applied anyway. Only use this with CCD image, not SBC MAMA. crrejtab : str, optional CRREJTAB to use. If not given, will use CRREJTAB given in the primary header of the first input image. crmask : bool, optional Flag CR-rejected pixels in input files. If False, will use CRMASK value in CRREJTAB. scalense : float, optional Multiplicative scale factor (in percents) applied to noise. Acceptable values are 0 to 100, inclusive. If None, will use SCALENSE from CRREJTAB. initgues : {'med', 'min'}, optional Scheme for computing initial-guess image. If not given, will use INITGUES from CRREJTAB. skysub : {'none', 'mode'}, optional Scheme for computing sky levels to be subtracted. If not given, will use SKYSUB from CRREJTAB. crsigmas : str, optional Cosmic ray rejection thresholds given in the format of 'sig1,sig2,...'. Number of sigmas given will be the number of rejection iterations done. At least 1 and at most 20 sigmas accepted. If not given, will use CRSIGMAS from CRREJTAB. crradius : float, optional Radius (in pixels) to propagate the cosmic ray. If None, will use CRRADIUS from CRREJTAB. crthresh : float, optional Cosmic ray rejection propagation threshold. If None, will use CRTHRESH from CRREJTAB. badinpdq : int, optional Data quality flag used for cosmic ray rejection. If None, will use BADINPDQ from CRREJTAB. newbias : bool, optional This option has been deprecated. Use ``readnoise_only``. readnoise_only : bool, optional ERR is just read noise, not Poisson noise. This is used for BIAS images. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ from stsci.tools import parseinput # Optional package dependency if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acsrej.e'] # Parse input to get list of filenames to process. # acsrej.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) call_list.append(output) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if shadcorr: call_list.append('-shadcorr') if crrejtab: call_list += ['-table', crrejtab] if crmask: call_list.append('-crmask') if scalense is not None: if scalense < 0 or scalense > 100: raise ValueError('SCALENSE must be 0 to 100') call_list += ['-scale', str(scalense)] if initgues: if initgues not in ('med', 'min'): raise ValueError('INITGUES must be "med" or "min"') call_list += ['-init', initgues] if skysub: if skysub not in ('none', 'mode'): raise ValueError('SKYSUB must be "none" or "mode"') call_list += ['-sky', skysub] if crsigmas: call_list += ['-sigmas', crsigmas] if crradius is not None: call_list += ['-radius', str(crradius)] if crthresh is not None: call_list += ['-thresh ', str(crthresh)] if badinpdq is not None: call_list += ['-pdq', str(badinpdq)] # Backward-compatibility for readnoise_only. # TODO: Remove this option entirely in a future release. if newbias: warnings.warn('newbias is deprecated, use readnoise_only', ACSREJDeprecationWarning) readnoise_only = newbias if readnoise_only: call_list.append('-readnoise_only') if exe_args: call_list.extend(exe_args) subprocess.check_call(call_list)
def updatewcs(input, vacorr=True, tddcorr=True, npolcorr=True, d2imcorr=True, checkfiles=True, verbose=False): """ Updates HST science files with the best available calibration information. This allows users to retrieve from the archive self contained science files which do not require additional reference files. Basic WCS keywords are updated in the process and new keywords (following WCS Paper IV and the SIP convention) as well as new extensions are added to the science files. Example ------- >>>from stwcs import updatewcs >>>updatewcs.updatewcs(filename) Dependencies ------------ `stsci.tools` `astropy.io.fits` `astropy.wcs` Parameters ---------- input: a python list of file names or a string (wild card characters allowed) input files may be in fits, geis or waiver fits format vacorr: boolean If True, vecocity aberration correction will be applied tddcorr: boolean If True, time dependent distortion correction will be applied npolcorr: boolean If True, a Lookup table distortion will be applied d2imcorr: boolean If True, detector to image correction will be applied checkfiles: boolean If True, the format of the input files will be checked, geis and waiver fits files will be converted to MEF format. Default value is True for standalone mode. """ if verbose == False: logger.setLevel(100) else: formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s") log_filename = 'stwcs.log' fh = logging.FileHandler(log_filename, mode='w') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) logger.setLevel(verbose) args = "vacorr=%s, tddcorr=%s, npolcorr=%s, d2imcorr=%s, checkfiles=%s, \ " % (str(vacorr), str(tddcorr), str(npolcorr), str(d2imcorr), str(checkfiles)) logger.info('\n\tStarting UPDATEWCS: %s', time.asctime()) files = parseinput.parseinput(input)[0] logger.info("\n\tInput files: %s, " % [i for i in files]) logger.info("\n\tInput arguments: %s" % args) if checkfiles: files = checkFiles(files) if not files: print('No valid input, quitting ...\n') return for f in files: acorr = apply_corrections.setCorrections(f, vacorr=vacorr, \ tddcorr=tddcorr,npolcorr=npolcorr, d2imcorr=d2imcorr) if 'MakeWCS' in acorr and newIDCTAB(f): logger.warning( "\n\tNew IDCTAB file detected. All current WCSs will be deleted" ) cleanWCS(f) makecorr(f, acorr) return files
def process_input(input, output=None, ivmlist=None, updatewcs=True, prodonly=False, shiftfile=None): ivmlist = None oldasndict = None if (isinstance(input, list) == False) and \ ('_asn' in input or '_asc' in input) : # Input is an association table # Get the input files, and run makewcs on them oldasndict = asnutil.readASNTable(input, prodonly=prodonly) if not output: output = oldasndict['output'] filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']] elif (isinstance(input, list) == False) and \ (input[0] == '@') : # input is an @ file f = open(input[1:]) # Read the first line in order to determine whether # IVM files have been specified in a second column... line = f.readline() f.close() # Parse the @-file with irafglob to extract the input filename filelist = irafglob.irafglob(input, atfile=atfile_sci) # If there is a second column... if len(line.split()) == 2: # ...parse out the names of the IVM files as well ivmlist = irafglob.irafglob(input, atfile=atfile_ivm) else: #input is a string or a python list try: filelist, output = parseinput.parseinput(input, outputname=output) #filelist.sort() except IOError: raise # sort the list of input files # this ensures the list of input files has the same order on all platforms # it can have ifferent order because listdir() uses inode order, not unix type order filelist.sort() newfilelist, ivmlist = checkFiles(filelist, ivmlist) if not newfilelist: buildEmptyDRZ(input,output) return None, None, output #make an asn table at the end if updatewcs: pydr_input = runmakewcs(newfilelist) else: pydr_input = newfilelist # AsnTable will handle the case when output==None if not oldasndict: oldasndict = asnutil.ASNTable(pydr_input, output=output) oldasndict.create() if shiftfile: oldasndict.update(shiftfile=shiftfile) asndict = update_member_names(oldasndict, pydr_input) # Build output filename drz_extn = '_drz.fits' for img in newfilelist: # special case logic to automatically recognize when _flc.fits files # are provided as input and produce a _drc.fits file instead if '_flc.fits' in img: drz_extn = '_drc.fits' break if output in [None,'']: output = fileutil.buildNewRootname(asndict['output'], extn=drz_extn) else: if '.fits' in output.lower(): pass elif drz_extn[:4] not in output.lower(): output = fileutil.buildNewRootname(output, extn=drz_extn) print('Setting up output name: ',output) return asndict, ivmlist, output
def destripe_plus(inputfile, suffix='strp', stat='pmode1', maxiter=15, sigrej=2.0, lower=None, upper=None, binwidth=0.3, scimask1=None, scimask2=None, dqbits=None, rpt_clean=0, atol=0.01, cte_correct=True, clobber=False, verbose=True): """Calibrate post-SM4 ACS/WFC exposure(s) and use standalone :ref:`acsdestripe`. This takes a RAW image and generates a FLT file containing its calibrated and destriped counterpart. If CTE correction is performed, FLC will also be present. Parameters ---------- inputfile : str or list of str Input filenames in one of these formats: * a Python list of filenames * a partial filename with wildcards ('\*raw.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) suffix : str The string to use to add to each input file name to indicate an output product of ``acs_destripe``. This only affects the intermediate output file that will be automatically renamed to ``*blv_tmp.fits`` during the processing. stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1') Specifies the statistics to be used for computation of the background in image rows: * 'pmode1' - SEXTRACTOR-like mode estimate based on a modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``2.5*median-1.5*mean``; * 'pmode2' - mode estimate based on `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``3*median-2*mean``; * 'mean' - the mean of the distribution of the "good" pixels (after clipping, masking, etc.); * 'mode' - the mode of the distribution of the "good" pixels; * 'median' - the median of the distribution of the "good" pixels; * 'midpt' - estimate of the median of the distribution of the "good" pixels based on an algorithm similar to IRAF's `imagestats` task (``CDF(midpt)=1/2``). .. note:: The midpoint and mode are computed in two passes through the image. In the first pass the standard deviation of the pixels is calculated and used with the *binwidth* parameter to compute the resolution of the data histogram. The midpoint is estimated by integrating the histogram and computing by interpolation the data value at which exactly half the pixels are below that data value and half are above it. The mode is computed by locating the maximum of the data histogram and fitting the peak by parabolic interpolation. maxiter : int This parameter controls the maximum number of iterations to perform when computing the statistics used to compute the row-by-row corrections. sigrej : float This parameters sets the sigma level for the rejection applied during each iteration of statistics computations for the row-by-row corrections. lower : float, None (Default = None) Lower limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). upper : float, None (Default = None) Upper limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). binwidth : float (Default = 0.1) Histogram's bin width, in sigma units, used to sample the distribution of pixel brightness values in order to compute the background statistics. This parameter is aplicable *only* to *stat* parameter values of `'mode'` or `'midpt'`. clobber : bool Specify whether or not to 'clobber' (delete then replace) previously generated products with the same names. scimask1 : str or list of str Mask images for *calibrated* ``SCI,1``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. scimask2 : str or list of str Mask images for *calibrated* ``SCI,2``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. This is not used for subarrays. dqbits : int, str, None (Default = None) Integer sum of all the DQ bit values from the input image's DQ array that should be considered "good" when building masks for de-striping computations. For example, if pixels in the DQ array can be combinations of 1, 2, 4, and 8 flags and one wants to consider DQ "defects" having flags 2 and 4 as being acceptable for de-striping computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel having values 2,4, or 6 will be considered a good pixel, while a DQ pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged as a "bad" pixel. Alternatively, one can enter a comma- or '+'-separated list of integer bit flags that should be added to obtain the final "good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to setting `dqbits` to 12. | Set `dqbits` to 0 to make *all* non-zero pixels in the DQ mask to be considered "bad" pixels, and the corresponding image pixels not to be used for de-striping computations. | Default value (`None`) will turn off the use of image's DQ array for de-striping computations. | In order to reverse the meaning of the `dqbits` parameter from indicating values of the "good" DQ flags to indicating the "bad" DQ flags, prepend '~' to the string value. For example, in order not to use pixels with DQ flags 4 and 8 for sky computations and to consider as "good" all other pixels (regardless of their DQ flag), set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the same effect with an `int` input value (except for 0), enter -(4+8+1)=-9. Following this convention, a `dqbits` string value of ``'~0'`` would be equivalent to setting ``dqbits=None``. .. note:: DQ masks (if used), *will be* combined with user masks specified in the `scimask1` and `scimask2` parameters (if any). rpt_clean : int An integer indicating how many *additional* times stripe cleaning should be performed on the input image. Default = 0. atol : float, None The threshold for maximum absolute value of bias stripe correction below which repeated cleanings can stop. When `atol` is `None` cleaning will be repeated `rpt_clean` number of times. Default = 0.01 [e]. cte_correct : bool Perform CTE correction. verbose : bool Print informational messages. Default = True. Raises ------ ImportError ``stsci.tools`` not found. IOError Input file does not exist. ValueError Invalid header values or CALACS version. """ # Optional package dependencies from stsci.tools import parseinput try: from stsci.tools.bitmask import interpret_bit_flags except ImportError: from stsci.tools.bitmask import ( interpret_bits_value as interpret_bit_flags ) # process input file(s) and if we have multiple input files - recursively # call acs_destripe_plus for each input image: flist = parseinput.parseinput(inputfile)[0] if isinstance(scimask1, str): mlist1 = parseinput.parseinput(scimask1)[0] elif isinstance(scimask1, np.ndarray): mlist1 = [ scimask1.copy() ] elif scimask1 is None: mlist1 = [] elif isinstance(scimask1, list): mlist1 = [] for m in scimask1: if isinstance(m, np.ndarray): mlist1.append(m.copy()) elif isinstance(m, str): mlist1 += parseinput.parseinput(m)[0] else: raise TypeError("'scimask1' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'scimask1' must be either a str, or a " "numpy.ndarray, or a list of the two type of " "values.") if isinstance(scimask2, str): mlist2 = parseinput.parseinput(scimask2)[0] elif isinstance(scimask2, np.ndarray): mlist2 = [ scimask2.copy() ] elif scimask2 is None: mlist2 = [] elif isinstance(scimask2, list): mlist2 = [] for m in scimask2: if isinstance(m, np.ndarray): mlist2.append(m.copy()) elif isinstance(m, str): mlist2 += parseinput.parseinput(m)[0] else: raise TypeError("'scimask2' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'scimask2' must be either a str, or a " "numpy.ndarray, or a list of the two type of " "values.") n_input = len(flist) n_mask1 = len(mlist1) n_mask2 = len(mlist2) if n_input == 0: raise ValueError( 'No input file(s) provided or the file(s) do not exist') if n_mask1 == 0: mlist1 = [None] * n_input elif n_mask1 != n_input: raise ValueError('Insufficient masks for [SCI,1]') if n_mask2 == 0: mlist2 = [None] * n_input elif n_mask2 != n_input: raise ValueError('Insufficient masks for [SCI,2]') if n_input > 1: for img, mf1, mf2 in zip(flist, mlist1, mlist2): destripe_plus( inputfile=img, suffix=suffix, stat=stat, lower=lower, upper=upper, binwidth=binwidth, maxiter=maxiter, sigrej=sigrej, scimask1=scimask1, scimask2=scimask2, dqbits=dqbits, cte_correct=cte_correct, clobber=clobber, verbose=verbose ) return inputfile = flist[0] scimask1 = mlist1[0] scimask2 = mlist2[0] # verify that the RAW image exists in cwd cwddir = os.getcwd() if not os.path.exists(os.path.join(cwddir, inputfile)): raise IOError("{0} does not exist.".format(inputfile)) # get image's primary header: header = fits.getheader(inputfile) # verify masks defined (or not) simultaneously: if header['CCDAMP'] == 'ABCD' and \ ((scimask1 is not None and scimask2 is None) or \ (scimask1 is None and scimask2 is not None)): raise ValueError("Both 'scimask1' and 'scimask2' must be specified " "or not specified together.") calacs_str = subprocess.check_output(['calacs.e', '--version']).split()[0] calacs_ver = [int(x) for x in calacs_str.decode().split('.')] if calacs_ver < [8, 3, 1]: raise ValueError('CALACS {0} is incomptible. ' 'Must be 8.3.1 or later.'.format(calacs_str)) # check date for post-SM4 and if 2K subarray or full frame is_sub2K = False ctecorr = header['PCTECORR'] aperture = header['APERTURE'] detector = header['DETECTOR'] date_obs = Time(header['DATE-OBS']) # intermediate filenames blvtmp_name = inputfile.replace('raw', 'blv_tmp') blctmp_name = inputfile.replace('raw', 'blc_tmp') # output filenames tra_name = inputfile.replace('_raw.fits', '.tra') flt_name = inputfile.replace('raw', 'flt') flc_name = inputfile.replace('raw', 'flc') if detector != 'WFC': raise ValueError("{0} is not a WFC image, please check the 'DETECTOR'" " keyword.".format(inputfile)) if date_obs < SM4_DATE: raise ValueError( "{0} is a pre-SM4 image.".format(inputfile)) if header['SUBARRAY'] and cte_correct: if aperture in SUBARRAY_LIST: is_sub2K = True else: LOG.warning('Using non-2K subarray, turning CTE correction off') cte_correct = False # delete files from previous CALACS runs if clobber: for tmpfilename in [blvtmp_name, blctmp_name, flt_name, flc_name, tra_name]: if os.path.exists(tmpfilename): os.remove(tmpfilename) # run ACSCCD on RAW subarray acsccd.acsccd(inputfile) # modify user mask with DQ masks if requested dqbits = interpret_bit_flags(dqbits) if dqbits is not None: # save 'tra' file in memory to trick the log file # not to save first acs2d log as this is done only # for the purpose of obtaining DQ masks. # WISH: it would have been nice is there was an easy way of obtaining # just the DQ masks as if data were calibrated but without # having to recalibrate them with acs2d. if os.path.isfile(tra_name): fh = open(tra_name) tra_lines = fh.readlines() fh.close() else: tra_lines = None # apply flats, etc. acs2d.acs2d(blvtmp_name, verbose=False, quiet=True) # extract DQ arrays from the FLT image: dq1, dq2 = _read_DQ_arrays(flt_name) if isinstance(scimask1, str): if scimask1.strip() is '': mask1 = None scimask1 = None else: mask1 = fits.getdata(scimask1) elif isinstance(scimask1, np.ndarray): mask1 = scimask1.copy() elif scimask1 is None: mask1 = None else: raise TypeError("'scimask1' must be either a str file name, " "a numpy.ndarray, or None.") scimask1 = acs_destripe._mergeUserMaskAndDQ(dq1, mask1, dqbits) if isinstance(scimask2, str): if scimask2.strip() is '': mask2 = None scimask2 = None else: mask2 = fits.getdata(scimask2) elif isinstance(scimask2, np.ndarray): mask2 = scimask2.copy() elif scimask2 is None: mask2 = None else: raise TypeError("'scimask2' must be either a str file name, " "a numpy.ndarray, or None.") if dq2 is not None: scimask2 = acs_destripe._mergeUserMaskAndDQ(dq2, mask2, dqbits) # reconstruct trailer file: if tra_lines is not None: fh = open(tra_name, mode='w') fh.writelines(tra_lines) fh.close() # delete temporary FLT image: if os.path.isfile(flt_name): os.remove(flt_name) # execute destriping of the subarray (post-SM4 data only) acs_destripe.clean( blvtmp_name, suffix, stat=stat, maxiter=maxiter, sigrej=sigrej, lower=lower, upper=upper, binwidth=binwidth, mask1=scimask1, mask2=scimask2, dqbits=dqbits, rpt_clean=rpt_clean, atol=atol, clobber=clobber, verbose=verbose) blvtmpsfx = 'blv_tmp_{0}'.format(suffix) os.rename(inputfile.replace('raw', blvtmpsfx), blvtmp_name) # update subarray header if is_sub2K and cte_correct: fits.setval(blvtmp_name, 'PCTECORR', value='PERFORM') ctecorr = 'PERFORM' # perform CTE correction on destriped image if cte_correct: if ctecorr == 'PERFORM': acscte.acscte(blvtmp_name) else: LOG.warning( "PCTECORR={0}, cannot run CTE correction".format(ctecorr)) cte_correct = False # run ACS2D to get FLT and FLC images acs2d.acs2d(blvtmp_name) if cte_correct: acs2d.acs2d(blctmp_name) # delete intermediate files os.remove(blvtmp_name) if cte_correct and os.path.isfile(blctmp_name): os.remove(blctmp_name) info_str = 'Done.\nFLT: {0}\n'.format(flt_name) if cte_correct: info_str += 'FLC: {0}\n'.format(flc_name) LOG.info(info_str)
def buildEmptyDRZ(input, output): """ METHOD : _buildEmptyDRZ PURPOSE : Create an empty DRZ file in a valid FITS format so that the HST pipeline can handle the Multidrizzle zero expossure time exception where all data has been excluded from processing. INPUT : None OUTPUT : DRZ file on disk """ if output == None: if len(input) == 1: oname = fu.buildNewRootname(input[0]) else: oname = 'final' _drzextn = '_drz.fits' if '_flc.fits' in input[0]: _drzextn = '_drc.fits' output = fileutil.buildNewRootname(oname,extn=_drzextn) else: if 'drz' not in output: output = fileutil.buildNewRootname(output,extn='_drz.fits') print('Setting up output name: ',output) # Open the first image of the excludedFileList to use as a template to build # the DRZ file. inputfile = parseinput.parseinput(input)[0] try : img = pyfits.open(inputfile[0]) except: raise IOError('Unable to open file %s \n' %inputfile) # Create the fitsobject fitsobj = pyfits.HDUList() # Copy the primary header hdu = img[0].copy() fitsobj.append(hdu) # Modify the 'NEXTEND' keyword of the primary header to 3 for the #'sci, wht, and ctx' extensions of the newly created file. fitsobj[0].header['NEXTEND'] = 3 # Create the 'SCI' extension hdu = pyfits.ImageHDU(header=img['sci',1].header.copy(),data=None) hdu.header['EXTNAME'] = 'SCI' fitsobj.append(hdu) # Create the 'WHT' extension hdu = pyfits.ImageHDU(header=img['sci',1].header.copy(),data=None) hdu.header['EXTNAME'] = 'WHT' fitsobj.append(hdu) # Create the 'CTX' extension hdu = pyfits.ImageHDU(header=img['sci',1].header.copy(),data=None) hdu.header['EXTNAME'] = 'CTX' fitsobj.append(hdu) # Add HISTORY comments explaining the creation of this file. fitsobj[0].header.add_history("** Multidrizzle has created this empty DRZ **") fitsobj[0].header.add_history("** product because all input images were **") fitsobj[0].header.add_history("** excluded from processing because their **") fitsobj[0].header.add_history("** header EXPTIME values were 0.0. If you **") fitsobj[0].header.add_history("** still wish to use this data make the **") fitsobj[0].header.add_history("** EXPTIME values in the header non-zero. **") # Change the filename in the primary header to reflect the name of the output # filename. fitsobj[0].header['FILENAME'] = str(output) #+"_drz.fits" # Change the ROOTNAME keyword to the ROOTNAME of the output PRODUCT _drzsuffix = 'drz' if 'drc' in output: _drzsuffix = 'drc' fitsobj[0].header['ROOTNAME'] = str(output.split('_%s.fits'%_drzsuffix)[0]) print('self.output', output) # Modify the ASN_MTYP keyword to contain "PROD-DTH" so it can be properly # ingested into the archive catalog. fitsobj[0].header['ASN_MTYP'] = 'PROD-DTH' errstr = "#############################################\n" errstr += "# #\n" errstr += "# ERROR: #\n" errstr += "# Multidrizzle has created this empty DRZ #\n" errstr += "# product because all input images were #\n" errstr += "# excluded from processing because their #\n" errstr += "# header EXPTIME values were 0.0. If you #\n" errstr += "# still wish to use this data make the #\n" errstr += "# EXPTIME values in the header non-zero. #\n" errstr += "# #\n" errstr += "#############################################\n\n" print(errstr) # If the file is already on disk delete it and replace it with the # new file dirfiles = os.listdir(os.curdir) if (dirfiles.count(output) > 0): os.remove(output) print(" Replacing "+output+"...") # Write out the empty DRZ file fitsobj.writeto(output) return
def processFilenames(input=None,output=None,infilesOnly=False): """Process the input string which contains the input file information and return a filelist,output """ ivmlist = None oldasndict = None if input is None: print("No input files provided to processInput") raise ValueError if not isinstance(input, list) and ('_asn' in input or '_asc' in input): # Input is an association table # Get the input files, and run makewcs on them oldasndict = asnutil.readASNTable(input, prodonly=infilesOnly) if not infilesOnly: if output in ["",None,"None"]: output = oldasndict['output'].lower() # insure output name is lower case asnhdr = fits.getheader(input, memmap=False) # Only perform duplication check if not already completed... dupcheck = asnhdr.get('DUPCHECK',default="PERFORM") == "PERFORM" #filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']] filelist = buildASNList(oldasndict['order'],input,check_for_duplicates=dupcheck) elif (not isinstance(input, list)) and \ (input[0] == '@') : # input is an @ file f = open(input[1:]) # Read the first line in order to determine whether # IVM files have been specified in a second column... line = f.readline() f.close() # Parse the @-file with irafglob to extract the input filename filelist = irafglob.irafglob(input, atfile=util.atfile_sci) # If there is a second column... if len(line.split()) == 2: # ...parse out the names of the IVM files as well ivmlist = irafglob.irafglob(input, atfile=util.atfile_ivm) if output in ['',None,"None"]: if len(filelist) == 1: output = fileutil.buildNewRootname(filelist[0]) else: output = 'final' else: #input is a string or a python list try: filelist, output = parseinput.parseinput(input, outputname=output) if output in ['',None,"None"]: if len(filelist) == 1: output = fileutil.buildNewRootname(filelist[0]) else: output = 'final' if not isinstance(input, list): filelist.sort() except IOError: raise # sort the list of input files # this ensures the list of input files has the same order on all platforms # it can have ifferent order because listdir() uses inode order, not unix type order #filelist.sort() return filelist, output, ivmlist, oldasndict
def buildEmptyDRZ(input, output): """ Create an empty DRZ file. This module creates an empty DRZ file in a valid FITS format so that the HST pipeline can handle the Multidrizzle zero expossure time exception where all data has been excluded from processing. Parameters ---------- input : str filename of the initial input to process_input output : str filename of the default empty _drz.fits file to be generated """ # Identify the first input image inputfile = parseinput.parseinput(input)[0] if not inputfile: print('\n******* ERROR *******', file=sys.stderr) print( 'No input file found! Check specification of parameter ' '"input". ', file=sys.stderr) print('Quitting...', file=sys.stderr) print('******* ***** *******\n', file=sys.stderr) return # raise IOError, "No input file found!" # Set up output file here... if output is None: if len(input) == 1: oname = fileutil.buildNewRootname(input[0]) else: oname = 'final' output = fileutil.buildNewRootname(oname, extn='_drz.fits') else: if 'drz' not in output: output = fileutil.buildNewRootname(output, extn='_drz.fits') log.info('Setting up output name: %s' % output) # Open the first image (of the excludedFileList?) to use as a template to build # the DRZ file. try : log.info('Building empty DRZ file from %s' % inputfile[0]) img = fits.open(inputfile[0], memmap=False) except: raise IOError('Unable to open file %s \n' % inputfile) # Create the fitsobject fitsobj = fits.HDUList() # Copy the primary header hdu = img[0].copy() fitsobj.append(hdu) # Modify the 'NEXTEND' keyword of the primary header to 3 for the #'sci, wht, and ctx' extensions of the newly created file. fitsobj[0].header['NEXTEND'] = 3 # Create the 'SCI' extension hdu = fits.ImageHDU(header=img['sci', 1].header.copy()) hdu.header['EXTNAME'] = 'SCI' fitsobj.append(hdu) # Create the 'WHT' extension hdu = fits.ImageHDU(header=img['sci', 1].header.copy()) hdu.header['EXTNAME'] = 'WHT' fitsobj.append(hdu) # Create the 'CTX' extension hdu = fits.ImageHDU(header=img['sci', 1].header.copy()) hdu.header['EXTNAME'] = 'CTX' fitsobj.append(hdu) # Add HISTORY comments explaining the creation of this file. fitsobj[0].header.add_history("** AstroDrizzle has created this empty " "DRZ product because**") fitsobj[0].header.add_history("** all input images were excluded from " "processing.**") # Change the filename in the primary header to reflect the name of the output # filename. fitsobj[0].header['FILENAME'] = str(output) # +"_drz.fits" # Change the ROOTNAME keyword to the ROOTNAME of the output PRODUCT fitsobj[0].header['ROOTNAME'] = str(output.split('_drz.fits')[0]) # Modify the ASN_MTYP keyword to contain "PROD-DTH" so it can be properly # ingested into the archive catalog. # stis has this keyword in the [1] header, so I am directing the code #t o first look in the primary, then the 1 try: fitsobj[0].header['ASN_MTYP'] = 'PROD-DTH' except: fitsobj[1].header['ASN_MTYP'] = 'PROD-DTH' # If the file is already on disk delete it and replace it with the # new file dirfiles = os.listdir(os.curdir) if dirfiles.count(output) > 0: os.remove(output) log.info(" Replacing %s..." % output) # Write out the empty DRZ file fitsobj.writeto(output) print(textutil.textbox( 'ERROR:\nAstroDrizzle has created an empty DRZ product because all ' 'input images were excluded from processing or a user requested the ' 'program to stop.') + '\n', file=sys.stderr) return
def tweakback(drzfile, input=None, origwcs = None, newname = None, wcsname = None, extname='SCI', force=False, verbose=False): """ Apply WCS solution recorded in drizzled file to distorted input images (``_flt.fits`` files) used to create the drizzled file. This task relies on the original WCS and updated WCS to be recorded in the drizzled image's header as the last 2 alternate WCSs. Parameters ---------- drzfile : str (Default = '') filename of undistorted image which contains the new WCS and WCS prior to being updated newname : str (Default = None) Value of ``WCSNAME`` to be used to label the updated solution in the output (eq., ``_flt.fits``) files. If left blank or None, it will default to using the current ``WCSNAME`` value from the input drzfile. input : str (Default = '') filenames of distorted images to be updated using new WCS from 'drzfile'. These can be provided either as an ``@-file``, a comma-separated list of filenames or using wildcards. .. note:: A blank value will indicate that the task should derive the filenames from the 'drzfile' itself, if possible. The filenames will be derived from the ``D*DATA`` keywords written out by ``AstroDrizzle``. If they can not be found, the task will quit. origwcs : str (Default = None) Value of ``WCSNAME`` keyword prior to the drzfile image being updated by ``TweakReg``. If left blank or None, it will default to using the second to last ``WCSNAME*`` keyword value found in the header. wcsname : str (Default = None) Value of WCSNAME for updated solution written out by ``TweakReg`` as specified by the `wcsname` parameter from ``TweakReg``. If this is left blank or `None`, it will default to the current ``WCSNAME`` value from the input drzfile. extname : str (Default = 'SCI') Name of extension in `input` files to be updated with new WCS force : bool (Default = False) This parameters specified whether or not to force an update of the WCS even though WCS already exists with this solution or `wcsname`? verbose : bool (Default = False) This parameter specifies whether or not to print out additional messages during processing. Notes ----- The algorithm used by this function is based on linearization of the exact compound operator that converts input image coordinates to the coordinates (in the input image) that would result in alignment with the new drizzled image WCS. If no input distorted files are specified as input, this task will attempt to generate the list of filenames from the drizzled input file's own header. EXAMPLES -------- An image named ``acswfc_mos2_drz.fits`` was created from 4 images using astrodrizzle. This drizzled image was then aligned to another image using tweakreg and the header was updated using the ``WCSNAME`` = ``TWEAK_DRZ``. The new WCS can then be used to update each of the 4 images that were combined to make up this drizzled image using: >>> from drizzlepac import tweakback >>> tweakback.tweakback('acswfc_mos2_drz.fits') If the same WCS should be applied to a specific set of images, those images can be updated using: >>> tweakback.tweakback('acswfc_mos2_drz.fits', ... input='img_mos2a_flt.fits,img_mos2e_flt.fits') See Also -------- stwcs.wcsutil.altwcs: Alternate WCS implementation """ print("TweakBack Version {:s}({:s}) started at: {:s}\n" .format(__version__,__version_date__,util._ptime()[0])) # Interpret input list/string into list of filename(s) fltfiles = parseinput.parseinput(input)[0] if fltfiles is None or len(fltfiles) == 0: # try to extract the filenames from the drizzled file's header fltfiles = extract_input_filenames(drzfile) if fltfiles is None: print('*'*60) print('*') print('* ERROR:') print('* No input filenames found! ') print('* Please specify "fltfiles" or insure that input drizzled') print('* image contains D*DATA keywords. ') print('*') print('*'*60) raise ValueError if not isinstance(fltfiles,list): fltfiles = [fltfiles] sciext = determine_extnum(drzfile, extname='SCI') scihdr = fits.getheader(drzfile, ext=sciext, memmap=False) ### Step 1: Read in updated and original WCS solutions # determine keys for all alternate WCS solutions in drizzled image header wkeys = wcsutil.altwcs.wcskeys(drzfile, ext=sciext) wnames = wcsutil.altwcs.wcsnames(drzfile, ext=sciext) if not util.is_blank(newname): final_name = newname else: final_name = wnames[wkeys[-1]] # Read in HSTWCS objects for final,updated WCS and previous WCS from # from drizzled image header # The final solution also serves as reference WCS when using updatehdr if not util.is_blank(wcsname): for k in wnames: if wnames[k] == wcsname: wcskey = k break else: wcskey = wkeys[-1] final_wcs = wcsutil.HSTWCS(drzfile, ext=sciext, wcskey=wkeys[-1]) if not util.is_blank(origwcs): for k in wnames: if wnames[k] == origwcs: orig_wcskey = k orig_wcsname = origwcs break else: orig_wcsname,orig_wcskey = determine_orig_wcsname(scihdr,wnames,wkeys) orig_wcs = wcsutil.HSTWCS(drzfile,ext=sciext,wcskey=orig_wcskey) # read in RMS values reported for new solution crderr1kw = 'CRDER1'+wkeys[-1] crderr2kw = 'CRDER2'+wkeys[-1] if crderr1kw in scihdr: crderr1 = fits.getval(drzfile, crderr1kw, ext=sciext, memmap=False) else: crderr1 = 0.0 if crderr2kw in scihdr: crderr2 = fits.getval(drzfile, crderr2kw, ext=sciext, memmap=False) else: crderr2 = 0.0 del scihdr ### Step 2: Apply solution to input file headers for fname in fltfiles: logstr = "....Updating header for {:s}...".format(fname) if verbose: print("\n{:s}\n".format(logstr)) else: log.info(logstr) # reset header WCS keywords to original (OPUS generated) values imhdulist = fits.open(fname, mode='update', memmap=False) extlist = get_ext_list(imhdulist, extname='SCI') if not extlist: extlist = [0] # insure that input PRIMARY WCS has been archived before overwriting # with new solution wcsutil.altwcs.archiveWCS(imhdulist, extlist, reusekey=True) # Process MEF images... for ext in extlist: logstr = "Processing {:s}[{:s}]".format(imhdulist.filename(), ext2str(ext)) if verbose: print("\n{:s}\n".format(logstr)) else: log.info(logstr) chip_wcs = wcsutil.HSTWCS(imhdulist, ext=ext) update_chip_wcs(chip_wcs, orig_wcs, final_wcs, xrms=crderr1, yrms = crderr2) # Update FITS file with newly updated WCS for this chip extnum = imhdulist.index(imhdulist[ext]) updatehdr.update_wcs(imhdulist, extnum, chip_wcs, wcsname=final_name, reusename=False, verbose=verbose) imhdulist.close()
def acsccd(input, exec_path='', time_stamps=False, verbose=False, quiet=False): """ Run the acsccd.e executable as from the shell. Expect input to be ``*_raw.fits``. Output is automatically named ``*_blv_tmp.fits``. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_raw.fits') * a Python list of filenames * a partial filename with wildcards ('\*raw.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACSCCD executable. If not given, run ACSCCD given by 'acsccd.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. """ from stsci.tools import parseinput # Optional package dependency if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acsccd.e'] # Parse input to get list of filenames to process. # acsccd.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') #if dqicorr: # call_list.append('-dqi') #if atodcor: # call_list.append('-atod') #if blevcorr: # call_list.append('-blev') #if biascorr: # call_list.append('-bias') subprocess.call(call_list)
def acscteforwardmodel(input, exec_path='', time_stamps=False, verbose=False, quiet=False, single_core=False, exe_args=None): """ Run the acscteforwardmodel.e executable as from the shell. Expect input to be ``*_blc_tmp.fits`` or ``*_flc.fits``. Output is automatically named ``*_ctefmod.fits``. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_blc_tmp.fits') * a Python list of filenames * a partial filename with wildcards ('\*blc_tmp.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACSCTE forward model executable. If not given, run ACSCTE given by 'acscteforwardmodel.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. single_core : bool, optional CTE correction in the ACSCTE forward model will by default try to use all available CPUs on your computer. Set this to True to force the use of just one CPU. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ from stsci.tools import parseinput # Optional package dependency if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acscteforwardmodel.e'] # Parse input to get list of filenames to process. # acscte.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') if single_core: call_list.append('-1') if exe_args: call_list.extend(exe_args) subprocess.check_call(call_list)
def tweakback(drzfile, input=None, origwcs=None, newname=None, wcsname=None, extname='SCI', force=False, verbose=False): """ Apply WCS solution recorded in drizzled file to distorted input images (``_flt.fits`` files) used to create the drizzled file. This task relies on the original WCS and updated WCS to be recorded in the drizzled image's header as the last 2 alternate WCSs. Parameters ---------- drzfile : str (Default = '') filename of undistorted image which contains the new WCS and WCS prior to being updated newname : str (Default = None) Value of ``WCSNAME`` to be used to label the updated solution in the output (eq., ``_flt.fits``) files. If left blank or None, it will default to using the current ``WCSNAME`` value from the input drzfile. input : str (Default = '') filenames of distorted images to be updated using new WCS from 'drzfile'. These can be provided either as an ``@-file``, a comma-separated list of filenames or using wildcards. .. note:: A blank value will indicate that the task should derive the filenames from the 'drzfile' itself, if possible. The filenames will be derived from the ``D*DATA`` keywords written out by ``AstroDrizzle``. If they can not be found, the task will quit. origwcs : str (Default = None) Value of ``WCSNAME`` keyword prior to the drzfile image being updated by ``TweakReg``. If left blank or None, it will default to using the second to last ``WCSNAME*`` keyword value found in the header. wcsname : str (Default = None) Value of WCSNAME for updated solution written out by ``TweakReg`` as specified by the `wcsname` parameter from ``TweakReg``. If this is left blank or `None`, it will default to the current ``WCSNAME`` value from the input drzfile. extname : str (Default = 'SCI') Name of extension in `input` files to be updated with new WCS force : bool (Default = False) This parameters specified whether or not to force an update of the WCS even though WCS already exists with this solution or `wcsname`? verbose : bool (Default = False) This parameter specifies whether or not to print out additional messages during processing. Notes ----- The algorithm used by this function is based on linearization of the exact compound operator that converts input image coordinates to the coordinates (in the input image) that would result in alignment with the new drizzled image WCS. If no input distorted files are specified as input, this task will attempt to generate the list of filenames from the drizzled input file's own header. EXAMPLES -------- An image named ``acswfc_mos2_drz.fits`` was created from 4 images using astrodrizzle. This drizzled image was then aligned to another image using tweakreg and the header was updated using the ``WCSNAME`` = ``TWEAK_DRZ``. The new WCS can then be used to update each of the 4 images that were combined to make up this drizzled image using: >>> from drizzlepac import tweakback >>> tweakback.tweakback('acswfc_mos2_drz.fits') If the same WCS should be applied to a specific set of images, those images can be updated using: >>> tweakback.tweakback('acswfc_mos2_drz.fits', ... input='img_mos2a_flt.fits,img_mos2e_flt.fits') See Also -------- stwcs.wcsutil.altwcs: Alternate WCS implementation """ print("TweakBack Version {:s} started at: {:s}\n".format( __version__, util._ptime()[0])) # Interpret input list/string into list of filename(s) fltfiles = parseinput.parseinput(input)[0] if fltfiles is None or len(fltfiles) == 0: # try to extract the filenames from the drizzled file's header fltfiles = extract_input_filenames(drzfile) if fltfiles is None: print('*' * 60) print('*') print('* ERROR:') print('* No input filenames found! ') print( '* Please specify "fltfiles" or insure that input drizzled') print('* image contains D*DATA keywords. ') print('*') print('*' * 60) raise ValueError if not isinstance(fltfiles, list): fltfiles = [fltfiles] sciext = determine_extnum(drzfile, extname='SCI') scihdr = fits.getheader(drzfile, ext=sciext, memmap=False) ### Step 1: Read in updated and original WCS solutions # determine keys for all alternate WCS solutions in drizzled image header wkeys = wcsutil.altwcs.wcskeys(drzfile, ext=sciext) if len(wkeys) < 2: raise ValueError( f"'{drzfile}' must contain at least two valid WCS: original and updated." ) wnames = wcsutil.altwcs.wcsnames(drzfile, ext=sciext) if not util.is_blank(newname): final_name = newname else: final_name = wnames[wkeys[-1]] # Read in HSTWCS objects for final,updated WCS and previous WCS from # from drizzled image header # The final solution also serves as reference WCS when using updatehdr if not util.is_blank(wcsname): for wkey, wname in wnames.items(): if wname == wcsname: wcskey = wkey break else: raise ValueError( f"WCS with name '{wcsname}' not found in '{drzfile}'") else: wcskey = wkeys[-1] final_wcs = wcsutil.HSTWCS(drzfile, ext=sciext, wcskey=wcskey) if not util.is_blank(origwcs): for wkey, wname in wnames.items(): if wname == origwcs: orig_wcskey = wkey break else: raise ValueError( f"WCS with name '{origwcs}' not found in '{drzfile}'") else: _, orig_wcskey = determine_orig_wcsname(scihdr, wnames, wkeys) orig_wcs = wcsutil.HSTWCS(drzfile, ext=sciext, wcskey=orig_wcskey) # read in RMS values reported for new solution crderr1kw = 'CRDER1' + wkeys[-1] crderr2kw = 'CRDER2' + wkeys[-1] if crderr1kw in scihdr: crderr1 = fits.getval(drzfile, crderr1kw, ext=sciext, memmap=False) else: crderr1 = 0.0 if crderr2kw in scihdr: crderr2 = fits.getval(drzfile, crderr2kw, ext=sciext, memmap=False) else: crderr2 = 0.0 del scihdr ### Step 2: Apply solution to input file headers for fname in fltfiles: logstr = "....Updating header for {:s}...".format(fname) if verbose: print("\n{:s}\n".format(logstr)) else: log.info(logstr) # reset header WCS keywords to original (OPUS generated) values imhdulist = fits.open(fname, mode='update', memmap=False) extlist = get_ext_list(imhdulist, extname='SCI') if not extlist: extlist = [0] # Process MEF images... for ext in extlist: logstr = "Processing {:s}[{:s}]".format(imhdulist.filename(), ext2str(ext)) if verbose: print("\n{:s}\n".format(logstr)) else: log.info(logstr) chip_wcs = wcsutil.HSTWCS(imhdulist, ext=ext) update_chip_wcs(chip_wcs, orig_wcs, final_wcs, xrms=crderr1, yrms=crderr2) # Update FITS file with newly updated WCS for this chip extnum = imhdulist.index(imhdulist[ext]) updatehdr.update_wcs(imhdulist, extnum, chip_wcs, wcsname=final_name, reusename=False, verbose=verbose) imhdulist.close()
def destripe_plus(inputfile, suffix='strp', stat='pmode1', maxiter=15, sigrej=2.0, lower=None, upper=None, binwidth=0.3, scimask1=None, scimask2=None, dqbits=None, rpt_clean=0, atol=0.01, cte_correct=True, clobber=False, verbose=True): r"""Calibrate post-SM4 ACS/WFC exposure(s) and use standalone :ref:`acsdestripe`. This takes a RAW image and generates a FLT file containing its calibrated and destriped counterpart. If CTE correction is performed, FLC will also be present. Parameters ---------- inputfile : str or list of str Input filenames in one of these formats: * a Python list of filenames * a partial filename with wildcards ('\*raw.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) suffix : str The string to use to add to each input file name to indicate an output product of ``acs_destripe``. This only affects the intermediate output file that will be automatically renamed to ``*blv_tmp.fits`` during the processing. stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1') Specifies the statistics to be used for computation of the background in image rows: * 'pmode1' - SEXTRACTOR-like mode estimate based on a modified `Pearson's rule <https://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``2.5*median-1.5*mean``; * 'pmode2' - mode estimate based on `Pearson's rule <https://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``3*median-2*mean``; * 'mean' - the mean of the distribution of the "good" pixels (after clipping, masking, etc.); * 'mode' - the mode of the distribution of the "good" pixels; * 'median' - the median of the distribution of the "good" pixels; * 'midpt' - estimate of the median of the distribution of the "good" pixels based on an algorithm similar to IRAF's `imagestats` task (``CDF(midpt)=1/2``). .. note:: The midpoint and mode are computed in two passes through the image. In the first pass the standard deviation of the pixels is calculated and used with the *binwidth* parameter to compute the resolution of the data histogram. The midpoint is estimated by integrating the histogram and computing by interpolation the data value at which exactly half the pixels are below that data value and half are above it. The mode is computed by locating the maximum of the data histogram and fitting the peak by parabolic interpolation. maxiter : int This parameter controls the maximum number of iterations to perform when computing the statistics used to compute the row-by-row corrections. sigrej : float This parameters sets the sigma level for the rejection applied during each iteration of statistics computations for the row-by-row corrections. lower : float, None (Default = None) Lower limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). upper : float, None (Default = None) Upper limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). binwidth : float (Default = 0.1) Histogram's bin width, in sigma units, used to sample the distribution of pixel brightness values in order to compute the background statistics. This parameter is aplicable *only* to *stat* parameter values of `'mode'` or `'midpt'`. clobber : bool Specify whether or not to 'clobber' (delete then replace) previously generated products with the same names. scimask1 : str or list of str Mask images for *calibrated* ``SCI,1``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. scimask2 : str or list of str Mask images for *calibrated* ``SCI,2``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. This is not used for subarrays. dqbits : int, str, None (Default = None) Integer sum of all the DQ bit values from the input image's DQ array that should be considered "good" when building masks for de-striping computations. For example, if pixels in the DQ array can be combinations of 1, 2, 4, and 8 flags and one wants to consider DQ "defects" having flags 2 and 4 as being acceptable for de-striping computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel having values 2,4, or 6 will be considered a good pixel, while a DQ pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged as a "bad" pixel. Alternatively, one can enter a comma- or '+'-separated list of integer bit flags that should be added to obtain the final "good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to setting `dqbits` to 12. | Set `dqbits` to 0 to make *all* non-zero pixels in the DQ mask to be considered "bad" pixels, and the corresponding image pixels not to be used for de-striping computations. | Default value (`None`) will turn off the use of image's DQ array for de-striping computations. | In order to reverse the meaning of the `dqbits` parameter from indicating values of the "good" DQ flags to indicating the "bad" DQ flags, prepend '~' to the string value. For example, in order not to use pixels with DQ flags 4 and 8 for sky computations and to consider as "good" all other pixels (regardless of their DQ flag), set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the same effect with an `int` input value (except for 0), enter -(4+8+1)=-9. Following this convention, a `dqbits` string value of ``'~0'`` would be equivalent to setting ``dqbits=None``. .. note:: DQ masks (if used), *will be* combined with user masks specified in the `scimask1` and `scimask2` parameters (if any). rpt_clean : int An integer indicating how many *additional* times stripe cleaning should be performed on the input image. Default = 0. atol : float, None The threshold for maximum absolute value of bias stripe correction below which repeated cleanings can stop. When `atol` is `None` cleaning will be repeated `rpt_clean` number of times. Default = 0.01 [e]. cte_correct : bool Perform CTE correction. verbose : bool Print informational messages. Default = True. Raises ------ ImportError ``stsci.tools`` not found. IOError Input file does not exist. ValueError Invalid header values or CALACS version. """ # Optional package dependencies from stsci.tools import parseinput try: from stsci.tools.bitmask import interpret_bit_flags except ImportError: from stsci.tools.bitmask import (interpret_bits_value as interpret_bit_flags) # process input file(s) and if we have multiple input files - recursively # call acs_destripe_plus for each input image: flist = parseinput.parseinput(inputfile)[0] if isinstance(scimask1, str): mlist1 = parseinput.parseinput(scimask1)[0] elif isinstance(scimask1, np.ndarray): mlist1 = [scimask1.copy()] elif scimask1 is None: mlist1 = [] elif isinstance(scimask1, list): mlist1 = [] for m in scimask1: if isinstance(m, np.ndarray): mlist1.append(m.copy()) elif isinstance(m, str): mlist1 += parseinput.parseinput(m)[0] else: raise TypeError("'scimask1' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'scimask1' must be either a str, or a " "numpy.ndarray, or a list of the two type of " "values.") if isinstance(scimask2, str): mlist2 = parseinput.parseinput(scimask2)[0] elif isinstance(scimask2, np.ndarray): mlist2 = [scimask2.copy()] elif scimask2 is None: mlist2 = [] elif isinstance(scimask2, list): mlist2 = [] for m in scimask2: if isinstance(m, np.ndarray): mlist2.append(m.copy()) elif isinstance(m, str): mlist2 += parseinput.parseinput(m)[0] else: raise TypeError("'scimask2' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'scimask2' must be either a str, or a " "numpy.ndarray, or a list of the two type of " "values.") n_input = len(flist) n_mask1 = len(mlist1) n_mask2 = len(mlist2) if n_input == 0: raise ValueError( 'No input file(s) provided or the file(s) do not exist') if n_mask1 == 0: mlist1 = [None] * n_input elif n_mask1 != n_input: raise ValueError('Insufficient masks for [SCI,1]') if n_mask2 == 0: mlist2 = [None] * n_input elif n_mask2 != n_input: raise ValueError('Insufficient masks for [SCI,2]') if n_input > 1: for img, mf1, mf2 in zip(flist, mlist1, mlist2): destripe_plus(inputfile=img, suffix=suffix, stat=stat, lower=lower, upper=upper, binwidth=binwidth, maxiter=maxiter, sigrej=sigrej, scimask1=scimask1, scimask2=scimask2, dqbits=dqbits, cte_correct=cte_correct, clobber=clobber, verbose=verbose) return inputfile = flist[0] scimask1 = mlist1[0] scimask2 = mlist2[0] # verify that the RAW image exists in cwd cwddir = os.getcwd() if not os.path.exists(os.path.join(cwddir, inputfile)): raise IOError(f"{inputfile} does not exist.") # get image's primary header: header = fits.getheader(inputfile) # verify masks defined (or not) simultaneously: if (header['CCDAMP'] == 'ABCD' and ((scimask1 is not None and scimask2 is None) or (scimask1 is None and scimask2 is not None))): raise ValueError("Both 'scimask1' and 'scimask2' must be specified " "or not specified together.") calacs_str = subprocess.check_output(['calacs.e', '--version' ]).split()[0] # nosec # noqa calacs_ver = [int(x) for x in calacs_str.decode().split('.')] if calacs_ver < [8, 3, 1]: raise ValueError(f'CALACS {calacs_str} is incomptible. ' 'Must be 8.3.1 or later.') # check date for post-SM4 and if supported subarray or full frame is_subarray = False ctecorr = header['PCTECORR'] aperture = header['APERTURE'] detector = header['DETECTOR'] date_obs = Time(header['DATE-OBS']) # intermediate filenames blvtmp_name = inputfile.replace('raw', 'blv_tmp') blctmp_name = inputfile.replace('raw', 'blc_tmp') # output filenames tra_name = inputfile.replace('_raw.fits', '.tra') flt_name = inputfile.replace('raw', 'flt') flc_name = inputfile.replace('raw', 'flc') if detector != 'WFC': raise ValueError(f"{inputfile} is not a WFC image, please check the " "'DETECTOR' keyword.") if date_obs < SM4_DATE: raise ValueError(f"{inputfile} is a pre-SM4 image.") if header['SUBARRAY'] and cte_correct: if aperture in SUBARRAY_LIST: is_subarray = True else: LOG.warning('Using non-supported subarray, ' 'turning CTE correction off') cte_correct = False # delete files from previous CALACS runs if clobber: for tmpfilename in [ blvtmp_name, blctmp_name, flt_name, flc_name, tra_name ]: if os.path.exists(tmpfilename): os.remove(tmpfilename) # run ACSCCD on RAW acsccd.acsccd(inputfile) # modify user mask with DQ masks if requested dqbits = interpret_bit_flags(dqbits) if dqbits is not None: # save 'tra' file in memory to trick the log file # not to save first acs2d log as this is done only # for the purpose of obtaining DQ masks. # WISH: it would have been nice is there was an easy way of obtaining # just the DQ masks as if data were calibrated but without # having to recalibrate them with acs2d. if os.path.isfile(tra_name): with open(tra_name) as fh: tra_lines = fh.readlines() else: tra_lines = None # apply flats, etc. acs2d.acs2d(blvtmp_name, verbose=False, quiet=True) # extract DQ arrays from the FLT image: dq1, dq2 = _read_DQ_arrays(flt_name) mask1 = _get_mask(scimask1, 1) scimask1 = acs_destripe._mergeUserMaskAndDQ(dq1, mask1, dqbits) mask2 = _get_mask(scimask2, 2) if dq2 is not None: scimask2 = acs_destripe._mergeUserMaskAndDQ(dq2, mask2, dqbits) elif mask2 is None: scimask2 = None # reconstruct trailer file: if tra_lines is not None: with open(tra_name, mode='w') as fh: fh.writelines(tra_lines) # delete temporary FLT image: if os.path.isfile(flt_name): os.remove(flt_name) # execute destriping (post-SM4 data only) acs_destripe.clean(blvtmp_name, suffix, stat=stat, maxiter=maxiter, sigrej=sigrej, lower=lower, upper=upper, binwidth=binwidth, mask1=scimask1, mask2=scimask2, dqbits=dqbits, rpt_clean=rpt_clean, atol=atol, clobber=clobber, verbose=verbose) blvtmpsfx = f'blv_tmp_{suffix}' os.rename(inputfile.replace('raw', blvtmpsfx), blvtmp_name) # update subarray header if is_subarray and cte_correct: fits.setval(blvtmp_name, 'PCTECORR', value='PERFORM') ctecorr = 'PERFORM' # perform CTE correction on destriped image if cte_correct: if ctecorr == 'PERFORM': acscte.acscte(blvtmp_name) else: LOG.warning(f"PCTECORR={ctecorr}, cannot run CTE correction") cte_correct = False # run ACS2D to get FLT and FLC images acs2d.acs2d(blvtmp_name) if cte_correct: acs2d.acs2d(blctmp_name) # delete intermediate files os.remove(blvtmp_name) if cte_correct and os.path.isfile(blctmp_name): os.remove(blctmp_name) info_str = f'Done.\nFLT: {flt_name}\n' if cte_correct: info_str += f'FLC: {flc_name}\n' LOG.info(info_str)
def acsccd(input, exec_path='', time_stamps=False, verbose=False, quiet=False, exe_args=None): r""" Run the acsccd.e executable as from the shell. Expect input to be ``*_raw.fits``. Output is automatically named ``*_blv_tmp.fits``. Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_raw.fits') * a Python list of filenames * a partial filename with wildcards ('\*raw.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACSCCD executable. If not given, run ACSCCD given by 'acsccd.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ from stsci.tools import parseinput # Optional package dependency if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acsccd.e'] # Parse input to get list of filenames to process. # acsccd.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') if exe_args: call_list.extend(exe_args) #if dqicorr: # call_list.append('-dqi') #if atodcor: # call_list.append('-atod') #if blevcorr: # call_list.append('-blev') #if biascorr: # call_list.append('-bias') subprocess.check_call(call_list)
def buildEmptyDRZ(input, output): """ Create an empty DRZ file. This module creates an empty DRZ file in a valid FITS format so that the HST pipeline can handle the Multidrizzle zero expossure time exception where all data has been excluded from processing. Parameters ---------- input : str filename of the initial input to process_input output : str filename of the default empty _drz.fits file to be generated """ # Identify the first input image inputfile = parseinput.parseinput(input)[0] if not inputfile: print('\n******* ERROR *******', file=sys.stderr) print( 'No input file found! Check specification of parameter ' '"input". ', file=sys.stderr) print('Quitting...', file=sys.stderr) print('******* ***** *******\n', file=sys.stderr) return # raise IOError, "No input file found!" # Set up output file here... if output is None: if len(input) == 1: oname = fileutil.buildNewRootname(input[0]) else: oname = 'final' output = fileutil.buildNewRootname(oname, extn='_drz.fits') else: if '_drz' not in output: output = fileutil.buildNewRootname(output, extn='_drz.fits') print('Building emtpy DRZ file with output name: %s' % output) # Open the first image (of the excludedFileList?) to use as a template to build # the DRZ file. try: log.info('Building empty DRZ file from %s' % inputfile[0]) img = fits.open(inputfile[0], memmap=False) except: raise IOError('Unable to open file %s \n' % inputfile) # Create the fitsobject fitsobj = fits.HDUList() # Copy the primary header hdu = img[0].copy() fitsobj.append(hdu) # Modify the 'NEXTEND' keyword of the primary header to 3 for the #'sci, wht, and ctx' extensions of the newly created file. fitsobj[0].header['NEXTEND'] = 3 # Create the 'SCI' extension hdu = fits.ImageHDU(header=img['sci', 1].header.copy()) hdu.header['EXTNAME'] = 'SCI' fitsobj.append(hdu) # Create the 'WHT' extension hdu = fits.ImageHDU(header=img['sci', 1].header.copy()) hdu.header['EXTNAME'] = 'WHT' fitsobj.append(hdu) # Create the 'CTX' extension hdu = fits.ImageHDU(header=img['sci', 1].header.copy()) hdu.header['EXTNAME'] = 'CTX' fitsobj.append(hdu) # Add HISTORY comments explaining the creation of this file. fitsobj[0].header.add_history("** AstroDrizzle has created this empty " "DRZ product because**") fitsobj[0].header.add_history("** all input images were excluded from " "processing.**") # Change the filename in the primary header to reflect the name of the output # filename. fitsobj[0].header['FILENAME'] = str(output) # +"_drz.fits" # Change the ROOTNAME keyword to the ROOTNAME of the output PRODUCT fitsobj[0].header['ROOTNAME'] = str(output.split('_drz.fits')[0]) # Modify the ASN_MTYP keyword to contain "PROD-DTH" so it can be properly # ingested into the archive catalog. # stis has this keyword in the [1] header, so I am directing the code #t o first look in the primary, then the 1 try: fitsobj[0].header['ASN_MTYP'] = 'PROD-DTH' except: fitsobj[1].header['ASN_MTYP'] = 'PROD-DTH' # If the file is already on disk delete it and replace it with the # new file dirfiles = os.listdir(os.curdir) if dirfiles.count(output) > 0: os.remove(output) log.info(" Replacing %s..." % output) # Write out the empty DRZ file fitsobj.writeto(output) print(textutil.textbox( 'ERROR:\nAstroDrizzle has created an empty DRZ product because all ' 'input images were excluded from processing or a user requested the ' 'program to stop.') + '\n', file=sys.stderr) return
def blendheaders(drzfile, inputs=None, output=None, sciext='SCI', errext='ERR', dqext='DQ', verbose=False): """ Blend headers that went into creating the original drzfile into a new header with table that contains keyword values from all input images. The drzfile will be used to determine the names of the input files, should no filenames be provided in the 'inputs' parameter. The drzfile will be updated 'in-place' with the new headers and table if no output filename has been provided. Parameters ---------- drzfile : str Name of drizzled image(s) with headers that need updating. This can be specified as a single filename, or using wildcards, or '@'-file or python list of filenames. When no value for 'inputs' has been provided, this file(or set of files) will be used to determine the names of the input (flt.fits) files whose headers need to be blended to create the new drzfile header. inputs : list, optional If provided, the filenames with extensions for each chip provided in this list will be used to get the headers which will be blended into the final output headers. For example, ['j9cd01kqq_flt.fits[sci,1]','j9cd01kqq_flt.fits[sci,2]'] would create a blended header based on these chips headers. output : str, optional If specified, a new file will be written out that contains the updated (blended) headers. sciext: str, optional [Default: 'SCI'] EXTNAME of extensions with science data from the input FITS files. The header of this extension will be used as the basis for the SCI header of the drizzled product FITS file. errext: str, optional [Default: 'ERR'] EXTNAME of extensions with the error array from the input FITS files. The header of this extension will be used as the basis for the WHT header of the drizzled product FITS file. If blank or "INDEF", it will use the 'SCI' header as the basis for the output header for the WHT array. dqext: str, optional [Default: 'DQ'] EXTNAME of extensions with the data quality array from the input FITS files. The header of this extension will be used as the basis for the CTX header of the drizzled product FITS file (when a CTX extension gets created). If blank or "INDEF", it will use the 'SCI' header as the basis for the header of any generated CTX array. verbose : bool, optional [Default: False] Print out additional messages during processing when specified. """ # interpret input drzfiles = parseinput.parseinput(drzfile)[0] # operate on each drzfile specified for drzfile in drzfiles: if inputs in [None, '', ' ', 'INDEF', 'None']: inputs = extract_filenames_from_drz(drzfile) if verbose: print('Creating blended headers from: ') for i in inputs: print(' ', i) newhdrs, newtab = get_blended_headers(inputs, verbose=verbose) # Remove distortion related keywords not included in rules for hdr in newhdrs: remove_distortion_keywords(hdr) # open drizzle product to update headers with new headers open_mode = 'update' if output not in [None, '', ' ', 'INDEF', 'None']: open_mode = 'readonly' drzimg = fits.open(drzfile, mode=open_mode) # Determine whether we are working with a simple DRZ FITS file or # a full multi-extension DRZ FITS file. if len(drzimg) < 3: # We are working with a simple FITS image, so concatenate the # blended PRIMARY and SCI headers drzimg[0].header = cat_headers(newhdrs[0], newhdrs[1]) drzimg.append(newtab) else: # We are working with a full MEF file, so update all extension headers for i, extn in enumerate(drzimg): if isinstance(extn, fits.BinTableHDU): break # Update new headers with correct array sizes if isinstance(extn, fits.ImageHDU): extn_naxis = extn.header['NAXIS'] newhdrs[i]['NAXIS'] = extn_naxis newhdrs[i]['BITPIX'] = extn.header['BITPIX'] for card in newhdrs[i]['naxis*']: if len(card.key) > 5: # naxisj keywords if extn_naxis > 0: newhdrs[i][card.keyword] = extn.header[ card.keyword] else: try: del newhdrs[i][card.keyword] except KeyError: pass newhdrs[i].set('EXTNAME', value=extn.header['EXTNAME'], after='ORIGIN') newhdrs[i].set('EXTVER', value=extn.header['EXTVER'], after='EXTNAME') for kw in WCS_KEYWORDS: if kw in extn.header: newhdrs[i][kw] = extn.header[kw] if isinstance(extn, fits.PrimaryHDU): for card in extn.header['exp*']: newhdrs[i][card.keyword] = card.value newhdrs[i]['NEXTEND'] = len(drzimg) - 1 newhdrs[i]['ROOTNAME'] = extn.header['rootname'] newhdrs[i]['BITPIX'] = extn.header['bitpix'] # Determine which keywords are included in the table but not # the new dict(header). These will be removed from the output # header altogether tabcols = newtab.data.dtype.names hdrkws = list(newhdrs[i].keys()) del_kws = list(set(tabcols) - set(hdrkws)) del_kws.append('HISTORY') for kw in extn.header: if kw not in newhdrs[i] and kw not in del_kws: newhdrs[i][kw] = extn.header[kw] extn.header = newhdrs[i] # Now append table with remaining header keyword values drzimg.append(newtab) if 'nextend' in drzimg[0].header: drzimg[0].header['nextend'] = len(drzimg) - 1 # Write out the updated product if open_mode == 'update': drzimg.close() print('Updated ', drzfile, ' with blended headers.') else: if os.path.exists(output): os.remove(output) drzimg.writeto(output) drzimg.close() print('Created new file ', output, ' with blended headers.') # Clean up for the next run del drzimg, newhdrs, newtab
def processFilenames(input=None, output=None, infilesOnly=False): """Process the input string which contains the input file information and return a filelist,output """ ivmlist = None oldasndict = None if input is None: print("No input files provided to processInput") raise ValueError if not isinstance(input, list) and ('_asn' in input or '_asc' in input): # Input is an association table # Get the input files, and run makewcs on them oldasndict = asnutil.readASNTable(input, prodonly=infilesOnly) if not infilesOnly: if output in ["", None, "None"]: output = oldasndict['output'].lower( ) # insure output name is lower case asnhdr = fits.getheader(input, memmap=False) # Only perform duplication check if not already completed... dupcheck = asnhdr.get('DUPCHECK', default="PERFORM") == "PERFORM" #filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']] filelist = buildASNList(oldasndict['order'], input, check_for_duplicates=dupcheck) elif (not isinstance(input, list)) and \ (input[0] == '@') : # input is an @ file f = open(input[1:]) # Read the first line in order to determine whether # IVM files have been specified in a second column... line = f.readline() f.close() # Parse the @-file with irafglob to extract the input filename filelist = irafglob.irafglob(input, atfile=util.atfile_sci) # If there is a second column... if len(line.split()) == 2: # ...parse out the names of the IVM files as well ivmlist = irafglob.irafglob(input, atfile=util.atfile_ivm) if output in ['', None, "None"]: if len(filelist) == 1: output = fileutil.buildNewRootname(filelist[0]) else: output = 'final' else: #input is a string or a python list try: filelist, output = parseinput.parseinput(input, outputname=output) if output in ['', None, "None"]: if len(filelist) == 1: output = fileutil.buildNewRootname(filelist[0]) else: output = 'final' if not isinstance(input, list): filelist.sort() except IOError: raise # sort the list of input files # this ensures the list of input files has the same order on all platforms # it can have ifferent order because listdir() uses inode order, not unix type order #filelist.sort() return filelist, output, ivmlist, oldasndict
def create_astrometric_catalog(inputs, **pars): """Create an astrometric catalog that covers the inputs' field-of-view. Parameters =========== input : str Filenames of images to be aligned to astrometric catalog catalog : str, optional Name of catalog to extract astrometric positions for sources in the input images' field-of-view. Default: GAIADR2. Options available are documented on the catalog web page. output : str, optional Filename to give to the astrometric catalog read in from the master catalog web service. If 'None', no file will be written out. Default: ref_cat.ecsv gaia_only : bool, optional Specify whether or not to only use sources from GAIA in output catalog Default: False existing_wcs : HST.wcs object existing WCS object specified by the user note :: This function will point to astrometric catalog web service defined through the use of the ASTROMETRIC_CATALOG_URL environment variable. Returns ======= ref_table : object Astropy Table object of the catalog """ # interpret input parameters catalog = pars.get("catalog", 'GAIADR2') output = pars.get("output", 'ref_cat.ecsv') gaia_only = pars.get("gaia_only", False) table_format = pars.get("table_format", 'ascii.ecsv') existing_wcs = pars.get("existing_wcs", None) inputs, _ = parseinput.parseinput(inputs) # start by creating a composite field-of-view for all inputs # This default output WCS will have the same plate-scale and orientation # as the first chip in the list, which for WFPC2 data means the PC. # Fortunately, for alignment, this doesn't matter since no resampling of # data will be performed if existing_wcs: outwcs = existing_wcs else: outwcs = build_reference_wcs(inputs) radius = compute_radius(outwcs) ra, dec = outwcs.wcs.crval # perform query for this field-of-view ref_dict = get_catalog(ra, dec, sr=radius, catalog=catalog) colnames = ('ra','dec', 'mag', 'objID', 'GaiaID') col_types = ('f8', 'f8', 'f4', 'U25', 'U25') ref_table = Table(names = colnames, dtype=col_types) # Add catalog name as meta data ref_table.meta['catalog']=catalog ref_table.meta['gaia_only'] = gaia_only # rename coordinate columns to be consistent with tweakwcs ref_table.rename_column('ra', 'RA') ref_table.rename_column('dec', 'DEC') # extract just the columns we want... num_sources = 0 for source in ref_dict: if 'GAIAsourceID' in source: g = source['GAIAsourceID'] if gaia_only and g.strip() is '': continue else: g = -1 # indicator for no source ID extracted r = float(source['ra']) d = float(source['dec']) m = -999.9 # float(source['mag']) o = source['objID'] num_sources += 1 ref_table.add_row((r,d,m,o,g)) # Write out table to a file, if specified if output: ref_table.write(output, format=table_format) log.info("Created catalog '{}' with {} sources".format(output, num_sources)) return ref_table
def reset_dq_bits(input,bits,extver=None,extname='dq'): """ This function resets bits in the integer array(s) of a FITS file. Parameters ---------- filename : str full filename with path bits : str sum or list of integers corresponding to all the bits to be reset extver : int, optional List of version numbers of the DQ arrays to be corrected [Default Value: None, will do all] extname : str, optional EXTNAME of the DQ arrays in the FITS file [Default Value: 'dq'] Notes ----- The default value of None for the 'extver' parameter specifies that all extensions with EXTNAME matching 'dq' (as specified by the 'extname' parameter) will have their bits reset. Examples -------- 1. The following command will reset the 4096 bits in all the DQ arrays of the file input_file_flt.fits:: reset_dq_bits("input_file_flt.fits", 4096) 2. To reset the 2,32,64 and 4096 bits in the second DQ array, specified as 'dq,2', in the file input_file_flt.fits:: reset_dq_bits("input_file_flt.fits", "2,32,64,4096", extver=2) """ # Interpret bits value bits = interpret_bit_flags(bits) flist, fcol = parseinput.parseinput(input) for filename in flist: # open input file in write mode to allow updating the DQ array in-place p = fits.open(filename, mode='update', memmap=False) # Identify the DQ array to be updated # If no extver is specified, build a list of all DQ arrays in the file if extver is None: extver = [] for hdu in p: # find only those extensions which match the input extname # using case-insensitive name comparisons for 'extname' if 'extver' in hdu.header and \ hdu.header['extname'].lower() == extname.lower(): extver.append(int(hdu.header['extver'])) else: # Otherwise, insure that input extver values are a list if not isinstance(extver, list): extver = [extver] # for each DQ array identified in the file... for extn in extver: dqarr = p[extname,extn].data dqdtype = dqarr.dtype # reset the desired bits p[extname,extn].data = (dqarr & ~bits).astype(dqdtype) # preserve original dtype log.info('Reset bit values of %s to a value of 0 in %s[%s,%s]' % (bits, filename, extname, extn)) # close the file with the updated DQ array(s) p.close()
def update(input,refdir="jref$",local=None,interactive=False,wcsupdate=True): """ Updates headers of files given as input to point to the new reference files NPOLFILE and D2IMFILE required with the new C version of MultiDrizzle. Parameters ----------- input : string or list Name of input file or files acceptable forms: - single filename with or without directory - @-file - association table - python list of filenames - wildcard specification of filenames refdir : string Path to directory containing new reference files, either environment variable or full path. local : boolean Specifies whether or not to copy new reference files to local directory for use with the input files. interactive : boolean Specifies whether or not to interactively ask the user for the exact names of the new reference files instead of automatically searching a directory for them. updatewcs : boolean Specifies whether or not to update the WCS information in this file to use the new reference files. Examples -------- 1. A set of associated images specified by an ASN file can be updated to use the NPOLFILEs and D2IMFILE found in the local directory defined using the `myjref$` environment variable under PyRAF using:: >>>import updatenpol >>>updatenpol.update('j8bt06010_asn.fits', 'myref$') 2. Another use under Python would be to feed it a specific list of files to be updated using:: >>> updatenpol.update(['file1_flt.fits','file2_flt.fits'],'myjref$') 3. Files in another directory can also be processed using:: >>> updatenpol.update('data$*flt.fits','../new/ref/') Notes ----- .. warning:: This program requires access to the `jref$` directory in order to evaluate the DGEOFILE specified in the input image header. This evaluation allows the program to get the information it needs to identify the correct NPOLFILE. The use of this program now requires that a directory be set up with all the new NPOLFILE and D2IMFILE reference files for ACS (a single directory for all files for all ACS detectors will be fine, much like jref). Currently, all the files generated by the ACS team has initially been made available at:: /grp/hst/acs/lucas/new-npl/ The one known limitation to how this program works comes from confusion if more than 1 file could possibly be used as the new reference file. This would only happen when NPOLFILE reference files have been checked into CDBS multiple times, and there are several versions that apply to the same detector/filter combination. However, that can be sorted out later if we get into that situation at all. """ print('UPDATENPOL Version',__version__+'('+__vdate__+')') # expand (as needed) the list of input files files,fcol = parseinput.parseinput(input) if not interactive: # expand reference directory name (if necessary) to # interpret IRAF or environment variable names rdir = fu.osfn(refdir) ngeofiles,ngcol = parseinput.parseinput(os.path.join(rdir,'*npl.fits')) # Find D2IMFILE in refdir for updating input file header as well d2ifiles,d2col = parseinput.parseinput(os.path.join(rdir,"*d2i.fits")) # Now, build a matched list of input files and DGEOFILE reference files # to use for selecting the appropriate new reference file from the # refdir directory. for f in files: print('Updating: ',f) fdir = os.path.split(f)[0] # Open each file... fimg = fits.open(f, mode='update') phdr = fimg['PRIMARY'].header fdet = phdr['detector'] # get header of DGEOFILE dfile = phdr.get('DGEOFILE','') if dfile in ['N/A','',' ',None]: npolname = '' else: dhdr = fits.getheader(fu.osfn(dfile)) if not interactive: # search all new NPOLFILEs for one that matches current DGEOFILE config npol = find_npolfile(ngeofiles,fdet,[phdr['filter1'],phdr['filter2']]) else: if sys.version_info[0] >= 3: npol = input("Enter name of NPOLFILE for %s:"%f) else: npol = raw_input("Enter name of NPOLFILE for %s:"%f) if npol == "": npol = None if npol is None: errstr = "No valid NPOLFILE found in "+rdir+" for detector="+fdet+"\n" errstr += " filters = "+phdr['filter1']+","+phdr['filter2'] raise ValueError(errstr) npolname = os.path.split(npol)[1] if local: npolname = os.path.join(fdir,npolname) # clobber any previous copies of this reference file if os.path.exists(npolname): os.remove(npolname) shutil.copy(npol,npolname) else: if '$' in refdir: npolname = refdir+npolname else: npolname = os.path.join(refdir,npolname) phdr.set('NPOLFILE', value=npolname, comment="Non-polynomial corrections in Paper IV LUT", after='DGEOFILE') # Now find correct D2IFILE if not interactive: d2i = find_d2ifile(d2ifiles,fdet) else: if sys.version_info[0] >= 3: d2i = input("Enter name of D2IMFILE for %s:"%f) else: d2i = raw_input("Enter name of D2IMFILE for %s:"%f) if d2i == "": d2i = None if d2i is None: print('=============\nWARNING:') print(" No valid D2IMFILE found in "+rdir+" for detector ="+fdet) print(" D2IMFILE correction will not be applied.") print('=============\n') d2iname = "" else: d2iname = os.path.split(d2i)[1] if local: # Copy D2IMFILE to local data directory alongside input file as well d2iname = os.path.join(fdir,d2iname) # clobber any previous copies of this reference file if os.path.exists(d2iname): os.remove(d2iname) shutil.copy(d2i,d2iname) else: if '$' in refdir: d2iname = refdir+d2iname else: d2iname = os.path.join(refdir,d2iname) phdr.set('D2IMFILE', value=d2iname, comment="Column correction table", after='DGEOFILE') # Close this input file header and go on to the next fimg.close() if wcsupdate: updatewcs.updatewcs(f)
def acs2d(input, exec_path='', time_stamps=False, verbose=False, quiet=False): """ Run the acs2d.e executable as from the shell. Output is automatically named based on input suffix: +--------------------+----------------+------------------------------+ | INPUT | OUTPUT | EXPECTED DATA | +====================+================+==============================+ | ``*_raw.fits`` | ``*_flt.fits`` | SBC image. | +--------------------+----------------+------------------------------+ | ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. | +--------------------+----------------+------------------------------+ | ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. | +--------------------+----------------+------------------------------+ | ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. | +--------------------+----------------+------------------------------+ | ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. | +--------------------+----------------+------------------------------+ Parameters ---------- input : str or list of str Input filenames in one of these formats: * a single filename ('j1234567q_blv_tmp.fits') * a Python list of filenames * a partial filename with wildcards ('\*blv_tmp.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) exec_path : str, optional The complete path to ACS2D executable. If not given, run ACS2D given by 'acs2d.e'. time_stamps : bool, optional Set to True to turn on the printing of time stamps. verbose : bool, optional Set to True for verbose output. quiet : bool, optional Set to True for quiet output. """ if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['acs2d.e'] # Parse input to get list of filenames to process. # acs2d.e only takes 'file1,file2,...' infiles, dummy_out = parseinput.parseinput(input) call_list.append(','.join(infiles)) if time_stamps: call_list.append('-t') if verbose: call_list.append('-v') if quiet: call_list.append('-q') subprocess.call(call_list)