Exemple #1
0
def parse_input(input, prodonly=False, sort_wildcards=True):
    catlist = None

    if (isinstance(input, list) == False) and \
       ('_asn' in input or '_asc' in input) :
        # Input is an association table
        # Get the input files
        oldasndict = asnutil.readASNTable(input, prodonly=prodonly)
        filelist = [
            fileutil.buildRootname(fname) for fname in oldasndict['order']
        ]

    elif (isinstance(input, list) == False) and \
       (input[0] == '@') :
        # input is an @ file
        f = open(input[1:])
        # Read the first line in order to determine whether
        # catalog files have been specified in a second column...
        line = f.readline()
        f.close()
        # Parse the @-file with irafglob to extract the input filename
        filelist = irafglob.irafglob(input, atfile=atfile_sci)
        print(line)
        # If there are additional columns for catalog files...
        if len(line.split()) > 1:
            # ...parse out the names of the catalog files as well
            catlist, catdict = parse_atfile_cat(input)
    elif (isinstance(input, list)):
        # input a python list
        filelist = []
        for fn in input:
            flist, output = parse_input(fn, prodonly=prodonly)
            # if wild-cards are given, sort for uniform usage:
            if fn.find('*') > -1 and sort_wildcards:
                flist.sort()
            filelist += flist
    else:
        # input is either a string or something unrecognizable, so give it a try:
        try:
            filelist, output = parseinput.parseinput(input)
            # if wild-cards are given, sort for uniform usage:
            if input.find('*') > -1 and sort_wildcards:
                filelist.sort()
        except IOError:
            raise

    return filelist, catlist
def parse_input(input, prodonly=False, sort_wildcards=True):
    catlist = None

    if (isinstance(input, list) == False) and \
       ('_asn' in input or '_asc' in input) :
        # Input is an association table
        # Get the input files
        oldasndict = asnutil.readASNTable(input, prodonly=prodonly)
        filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']]

    elif (isinstance(input, list) == False) and \
       (input[0] == '@') :
        # input is an @ file
        f = open(input[1:])
        # Read the first line in order to determine whether
        # catalog files have been specified in a second column...
        line = f.readline()
        f.close()
        # Parse the @-file with irafglob to extract the input filename
        filelist = irafglob.irafglob(input, atfile=atfile_sci)
        print(line)
        # If there are additional columns for catalog files...
        if len(line.split()) > 1:
            # ...parse out the names of the catalog files as well
            catlist,catdict = parse_atfile_cat(input)
    elif (isinstance(input, list)):
        # input a python list
        filelist = []
        for fn in input:
            flist, output = parse_input(fn, prodonly=prodonly)
            # if wild-cards are given, sort for uniform usage:
            if fn.find('*') > -1 and sort_wildcards:
                flist.sort()
            filelist += flist
    else:
        # input is either a string or something unrecognizable, so give it a try:
        try:
            filelist, output = parseinput.parseinput(input)
            # if wild-cards are given, sort for uniform usage:
            if input.find('*') > -1 and sort_wildcards:
                filelist.sort()
        except IOError: raise

    return filelist,catlist
def _copyToNewWorkingDir(newdir, input):
    """ Copy input file and all related files necessary for processing to the new working directory.

        This function works in a greedy manner, in that all files associated
        with all inputs(have the same rootname) will be copied to the new
        working directory.
    """
    flist = []
    if '_asn.fits' in input:
        asndict = asnutil.readASNTable(input, None)
        flist.append(input[:input.find('_')])
        flist.extend(asndict['order'])
        flist.append(asndict['output'])
    else:
        flist.append(input[:input.find('_')])
    # copy all files related to these rootnames into new dir
    for rootname in flist:
        for fname in glob.glob(rootname + '*'):
            shutil.copy(fname, os.path.join(newdir, fname))
Exemple #4
0
def _copyToNewWorkingDir(newdir,input):
    """ Copy input file and all related files necessary for processing to the new working directory.

        This function works in a greedy manner, in that all files associated
        with all inputs(have the same rootname) will be copied to the new
        working directory.
    """
    flist = []
    if '_asn.fits' in input:
        asndict = asnutil.readASNTable(input,None)
        flist.append(input[:input.find('_')])
        flist.extend(asndict['order'])
        flist.append(asndict['output'])
    else:
        flist.append(input[:input.find('_')])
    # copy all files related to these rootnames into new dir
    for rootname in flist:
        for fname in glob.glob(rootname+'*'):
            shutil.copy(fname,os.path.join(newdir,fname))
Exemple #5
0
def combine_flt(files=[],
                output='exposures_cmb.fits',
                grow=1,
                add_padding=True,
                pixfrac=0.5,
                kernel='point',
                verbose=True,
                clobber=True,
                ds9=None):
    """Drizzle distorted FLT frames to an "interlaced" image
        
    Parameters
    ----------
    files : list of strings
        Filenames of FLT files to combine
    
    output : str
        Output filename of the combined file.  Convention elsewhere is to use
        an "_cmb.fits" extension to distinguish from "_flt.fits".
        
    grow : int
        Factor by which to `grow` the FLT frames to interlaced outputs.  For 
        example, `grow=2` results in 2x2 interlacing.
    
    add_padding : True
        Expand pixel grid to accommodate all dithered exposures.  WCS is 
        preserved but "CRPIX" will change.
        
    pixfrac : float
        Drizzle pixfrac (for kernels other than 'point')
    
    kernel : {'point', 'square'}
        Drizzle kernel. The 'point' kernel is effectively interlacing and is
        best for preserving the noise properties of the final combined image.
        However, can result in empty pixels given the camera distortions
        depending on the dithering of the input exposures.
    
    ds9 : `pyds9.DS9`
        Display the progress of the script to a DS9 window.
        
    verbose : bool
        Print logging information
        
    clobber : bool
        Overwrite existing files
    
    Returns
    -------
    Creates combined images
    
    """
    import numpy.linalg
    from stsci.tools import asnutil
    from drizzlepac import astrodrizzle

    ###  `files` is an ASN filename, not  a list of exposures
    if '_asn.fits' in files:
        asn = asnutil.readASNTable(files)
        files = ['{0}_flt.fits'.format(flt) for flt in asn['order']]
        if output == 'combined_flt.fits':
            output = '{0}_cmb.fits'.format(asn['output'])

    if False:
        files = glob.glob('ibhj3*flt.fits')
        files.sort()
        grism_files = files[1::2]

        ### FIGS
        info = catIO.Table('info')
        pas = np.cast[int](info['PA_V3'] * 10) / 10.
        pa_list = np.unique(pas)
        grism_files = info['FILE'][(info['FILTER'] == 'G102')
                                   & (pas == pa_list[0])]

        files = grism_files
        #utils = grizlidev.utils

    f0 = pyfits.open(files[0])
    h0 = f0[0].header.copy()
    h0['EXPTIME'] = 0.
    h0['NFILES'] = (len(files), 'Number of combined files')

    out_wcs = pywcs.WCS(f0[1].header, relax=True)
    out_wcs.pscale = utils.get_wcs_pscale(out_wcs)
    # out_wcs.pscale = np.sqrt(out_wcs.wcs.cd[0,0]**2 +
    #                          out_wcs.wcs.cd[1,0]**2)*3600.

    ### Compute maximum offset needed for padding
    if add_padding:
        ra0, de0 = out_wcs.all_pix2world([0], [0], 0)

        x0 = np.zeros(len(files))
        y0 = np.zeros(len(files))

        for i, file in enumerate(files):
            hx = pyfits.getheader(file, 0)
            h0['EXPTIME'] += hx['EXPTIME']
            h0['FILE{0:04d}'.format(i)] = (file,
                                           'Included file #{0:d}'.format(i))

            h = pyfits.getheader(file, 1)
            flt_wcs = pywcs.WCS(h, relax=True)
            x0[i], y0[i] = flt_wcs.all_world2pix(ra0, de0, 0)

        xmax = np.abs(x0).max()
        ymax = np.abs(y0).max()
        padx = 50 * int(np.ceil(xmax / 50.))
        pady = 50 * int(np.ceil(ymax / 50.))
        pad = np.maximum(padx, pady) * grow

        if verbose:
            print('Maximum shift (x, y) = ({0:6.1f}, {1:6.1f}), pad={2:d}'.
                  format(xmax, ymax, pad))
    else:
        pad = 0

    inter_wcs = out_wcs.deepcopy()
    if grow > 1:
        inter_wcs.wcs.cd /= grow
        for i in range(inter_wcs.sip.a_order + 1):
            for j in range(inter_wcs.sip.a_order + 1):
                inter_wcs.sip.a[i, j] /= grow**(i + j - 1)

        for i in range(inter_wcs.sip.b_order + 1):
            for j in range(inter_wcs.sip.b_order + 1):
                inter_wcs.sip.b[i, j] /= grow**(i + j - 1)

        inter_wcs._naxis1 *= grow
        inter_wcs._naxis2 *= grow
        inter_wcs.wcs.crpix *= grow
        inter_wcs.sip.crpix[0] *= grow
        inter_wcs.sip.crpix[1] *= grow

        if grow > 1:
            inter_wcs.wcs.crpix += grow / 2.
            inter_wcs.sip.crpix[0] += grow / 2.
            inter_wcs.sip.crpix[1] += grow / 2.

    inter_wcs._naxis1 += pad
    inter_wcs._naxis2 += pad
    inter_wcs.wcs.crpix += pad
    inter_wcs.sip.crpix[0] += pad
    inter_wcs.sip.crpix[1] += pad

    outh = inter_wcs.to_header(relax=True)
    for key in outh:
        if key.startswith('PC'):
            outh.rename_keyword(key, key.replace('PC', 'CD'))

    outh['GROW'] = grow, 'Grow factor'
    outh['PAD'] = pad, 'Image padding'
    outh['BUNIT'] = h['BUNIT']

    sh = (1014 * grow + 2 * pad, 1014 * grow + 2 * pad)
    outsci = np.zeros(sh, dtype=np.float32)
    outwht = np.zeros(sh, dtype=np.float32)
    outctx = np.zeros(sh, dtype=np.int32)

    ## Pixel area map
    # PAM_im = pyfits.open(os.path.join(os.getenv('iref'), 'ir_wfc3_map.fits'))
    # PAM = PAM_im[1].data

    for i, file in enumerate(files):
        im = pyfits.open(file)

        if verbose:
            print('{0:3d} {1:s} {2:6.1f} {3:6.1f} {4:10.2f}'.format(
                i + 1, file, x0[i], y0[i], im[0].header['EXPTIME']))

        dq = utils.unset_dq_bits(im['DQ'].data, okbits=608, verbose=False)
        wht = 1. / im['ERR'].data**2
        wht[(im['ERR'].data == 0) | (dq > 0) | (~np.isfinite(wht))] = 0
        wht[im['SCI'].data < -3 * im['ERR'].data] = 0

        wht = np.cast[np.float32](wht)

        exp_wcs = pywcs.WCS(im[1].header, relax=True)
        exp_wcs.pscale = utils.get_wcs_pscale(exp_wcs)

        #pf = 0.5
        # import drizzlepac.wcs_functions as dwcs
        # xx = out_wcs.deepcopy()
        # #xx.all_pix2world = xx.wcs_world2pix
        # map = dwcs.WCSMap(exp_wcs, xx)

        astrodrizzle.adrizzle.do_driz(im['SCI'].data,
                                      exp_wcs,
                                      wht,
                                      inter_wcs,
                                      outsci,
                                      outwht,
                                      outctx,
                                      1.,
                                      'cps',
                                      1,
                                      wcslin_pscale=exp_wcs.pscale,
                                      uniqid=1,
                                      pixfrac=pixfrac,
                                      kernel=kernel,
                                      fillval=0,
                                      stepsize=10,
                                      wcsmap=SIP_WCSMap)

        if ds9 is not None:
            ds9.view(outsci, header=outh)

    #outsci /= out_wcs.pscale**2
    rms = 1 / np.sqrt(outwht)
    mask = (outwht == 0) | (rms > 100)
    rms[mask] = 0
    outsci[mask] = 0.

    hdu = [pyfits.PrimaryHDU(header=h0)]
    hdu.append(pyfits.ImageHDU(data=outsci / grow**2, header=outh, name='SCI'))
    hdu.append(pyfits.ImageHDU(data=rms / grow**2, header=outh, name='ERR'))
    hdu.append(pyfits.ImageHDU(data=mask * 1024, header=outh, name='DQ'))

    pyfits.HDUList(hdu).writeto(output, clobber=clobber, output_verify='fix')
Exemple #6
0
def processFilenames(input=None,output=None,infilesOnly=False):
    """Process the input string which contains the input file information and
       return a filelist,output
    """
    ivmlist = None
    oldasndict = None

    if input is None:
        print("No input files provided to processInput")
        raise ValueError

    if not isinstance(input, list) and ('_asn' in input or '_asc' in input):
        # Input is an association table
        # Get the input files, and run makewcs on them
        oldasndict = asnutil.readASNTable(input, prodonly=infilesOnly)

        if not infilesOnly:
            if output in ["",None,"None"]:
                output = oldasndict['output'].lower() # insure output name is lower case

        asnhdr = fits.getheader(input, memmap=False)
        # Only perform duplication check if not already completed...
        dupcheck = asnhdr.get('DUPCHECK',default="PERFORM") == "PERFORM"

        #filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']]
        filelist = buildASNList(oldasndict['order'],input,check_for_duplicates=dupcheck)

    elif (not isinstance(input, list)) and \
       (input[0] == '@') :
        # input is an @ file
        f = open(input[1:])
        # Read the first line in order to determine whether
        # IVM files have been specified in a second column...
        line = f.readline()
        f.close()
        # Parse the @-file with irafglob to extract the input filename
        filelist = irafglob.irafglob(input, atfile=util.atfile_sci)
        # If there is a second column...
        if len(line.split()) == 2:
            # ...parse out the names of the IVM files as well
            ivmlist = irafglob.irafglob(input, atfile=util.atfile_ivm)
        if output in ['',None,"None"]:
            if len(filelist) == 1:
                output = fileutil.buildNewRootname(filelist[0])
            else:
                output = 'final'
    else:
        #input is a string or a python list
        try:
            filelist, output = parseinput.parseinput(input, outputname=output)
            if output in ['',None,"None"]:
                if len(filelist) == 1:
                    output = fileutil.buildNewRootname(filelist[0])
                else:
                    output = 'final'
            if not isinstance(input, list):
                filelist.sort()
        except IOError: raise

    # sort the list of input files
    # this ensures the list of input files has the same order on all platforms
    # it can have ifferent order because listdir() uses inode order, not unix type order
    #filelist.sort()



    return filelist, output, ivmlist, oldasndict
Exemple #7
0
def parseinput(inputlist,outputname=None, atfile=None):
    """
    Recursively parse user input based upon the irafglob
    program and construct a list of files that need to be processed.
    This program addresses the following deficiencies of the irafglob program::

       parseinput can extract filenames from association tables

    Returns
    -------
    This program will return a list of input files that will need to
    be processed in addition to the name of any outfiles specified in
    an association table.

    Parameters
    ----------
    inputlist - string
        specification of input files using either wild-cards, @-file or
        comma-separated list of filenames

    outputname - string
        desired name for output product to be created from the input files

    atfile - object
        function to use in interpreting the @-file columns that gets passed to irafglob

    Returns
    -------
    files - list of strings
        names of output files to be processed
    newoutputname - string
        name of output file to be created.

    See Also
    --------
    stsci.tools.irafglob

    """

    # Initalize some variables
    files = [] # list used to store names of input files
    newoutputname = outputname # Outputname returned to calling program.
                               # The value of outputname is only changed
                               # if it had a value of 'None' on input.


    # We can use irafglob to parse the input.  If the input wasn't
    # an association table, it needs to be either a wildcard, '@' file,
    # or comma seperated list.
    files = irafglob(inputlist, atfile=atfile)

    # Now that we have expanded the inputlist into a python list
    # containing the list of input files, it is necessary to examine
    # each of the files to make sure none of them are association tables.
    #
    # If an association table is found, the entries should be read
    # Determine if the input is an association table
    for file in files:
        if (checkASN(file) == True):
            # Create a list to store the files extracted from the
            # association tiable
            assoclist = []

            # The input is an association table
            try:
                # Open the association table
                assocdict = readASNTable(file, None, prodonly=False)
            except:
                errorstr  = "###################################\n"
                errorstr += "#                                 #\n"
                errorstr += "# UNABLE TO READ ASSOCIATION FILE,#\n"
                errorstr +=  str(file)+'\n'
                errorstr += "# DURING FILE PARSING.            #\n"
                errorstr += "#                                 #\n"
                errorstr += "# Please determine if the file is #\n"
                errorstr += "# in the current directory and    #\n"
                errorstr += "# that it has been properly       #\n"
                errorstr += "# formatted.                      #\n"
                errorstr += "#                                 #\n"
                errorstr += "# This error message is being     #\n"
                errorstr += "# generated from within the       #\n"
                errorstr += "# parseinput.py module.           #\n"
                errorstr += "#                                 #\n"
                errorstr += "###################################\n"
                raise ValueError(errorstr)

            # Extract the output name from the association table if None
            # was provided on input.
            if outputname is None:
                newoutputname = assocdict['output']

            # Loop over the association dictionary to extract the input
            # file names.
            for f in assocdict['order']:
                assoclist.append(fileutil.buildRootname(f))

            # Remove the name of the association table from the list of files
            files.remove(file)
            # Append the list of filenames generated from the association table
            # to the master list of input files.
            files.extend(assoclist)

    # Return the list of the input files and the output name if provided in an association.
    return files, newoutputname
Exemple #8
0
def processFilenames(input=None, output=None, infilesOnly=False):
    """Process the input string which contains the input file information and
       return a filelist,output
    """
    ivmlist = None
    oldasndict = None

    if input is None:
        print("No input files provided to processInput")
        raise ValueError

    if not isinstance(input, list) and ('_asn' in input or '_asc' in input):
        # Input is an association table
        # Get the input files, and run makewcs on them
        oldasndict = asnutil.readASNTable(input, prodonly=infilesOnly)

        if not infilesOnly:
            if output in ["", None, "None"]:
                output = oldasndict['output'].lower(
                )  # insure output name is lower case

        asnhdr = fits.getheader(input, memmap=False)
        # Only perform duplication check if not already completed...
        dupcheck = asnhdr.get('DUPCHECK', default="PERFORM") == "PERFORM"

        #filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']]
        filelist = buildASNList(oldasndict['order'],
                                input,
                                check_for_duplicates=dupcheck)

    elif (not isinstance(input, list)) and \
       (input[0] == '@') :
        # input is an @ file
        f = open(input[1:])
        # Read the first line in order to determine whether
        # IVM files have been specified in a second column...
        line = f.readline()
        f.close()
        # Parse the @-file with irafglob to extract the input filename
        filelist = irafglob.irafglob(input, atfile=util.atfile_sci)
        # If there is a second column...
        if len(line.split()) == 2:
            # ...parse out the names of the IVM files as well
            ivmlist = irafglob.irafglob(input, atfile=util.atfile_ivm)
        if output in ['', None, "None"]:
            if len(filelist) == 1:
                output = fileutil.buildNewRootname(filelist[0])
            else:
                output = 'final'
    else:
        #input is a string or a python list
        try:
            filelist, output = parseinput.parseinput(input, outputname=output)
            if output in ['', None, "None"]:
                if len(filelist) == 1:
                    output = fileutil.buildNewRootname(filelist[0])
                else:
                    output = 'final'
            if not isinstance(input, list):
                filelist.sort()
        except IOError:
            raise

    # sort the list of input files
    # this ensures the list of input files has the same order on all platforms
    # it can have ifferent order because listdir() uses inode order, not unix type order
    #filelist.sort()

    return filelist, output, ivmlist, oldasndict
def process_input(input, output=None, ivmlist=None, updatewcs=True, prodonly=False, shiftfile=None):

    ivmlist = None
    oldasndict = None

    if (isinstance(input, list) == False) and \
       ('_asn' in input or '_asc' in input) :
        # Input is an association table
        # Get the input files, and run makewcs on them
        oldasndict = asnutil.readASNTable(input, prodonly=prodonly)
        if not output:
            output = oldasndict['output']

        filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']]

    elif (isinstance(input, list) == False) and \
       (input[0] == '@') :
        # input is an @ file
        f = open(input[1:])
        # Read the first line in order to determine whether
        # IVM files have been specified in a second column...
        line = f.readline()
        f.close()
        # Parse the @-file with irafglob to extract the input filename
        filelist = irafglob.irafglob(input, atfile=atfile_sci)
        # If there is a second column...
        if len(line.split()) == 2:
            # ...parse out the names of the IVM files as well
            ivmlist = irafglob.irafglob(input, atfile=atfile_ivm)
    else:
        #input is a string or a python list
        try:
            filelist, output = parseinput.parseinput(input, outputname=output)
            #filelist.sort()
        except IOError: raise

    # sort the list of input files
    # this ensures the list of input files has the same order on all platforms
    # it can have ifferent order because listdir() uses inode order, not unix type order
    filelist.sort()
    newfilelist, ivmlist = checkFiles(filelist, ivmlist)


    if not newfilelist:
        buildEmptyDRZ(input,output)
        return None, None, output

    #make an asn table at the end
    if updatewcs:
        pydr_input = runmakewcs(newfilelist)
    else:
        pydr_input = newfilelist

    # AsnTable will handle the case when output==None
    if not oldasndict:
        oldasndict = asnutil.ASNTable(pydr_input, output=output)
        oldasndict.create()

    if shiftfile:
        oldasndict.update(shiftfile=shiftfile)

    asndict = update_member_names(oldasndict, pydr_input)

    # Build output filename
    drz_extn = '_drz.fits'
    for img in newfilelist:
        # special case logic to automatically recognize when _flc.fits files
        # are provided as input and produce a _drc.fits file instead
        if '_flc.fits' in img:
            drz_extn = '_drc.fits'
            break

    if output in [None,'']:
        output = fileutil.buildNewRootname(asndict['output'],
                                           extn=drz_extn)
    else:
        if '.fits' in output.lower():
            pass
        elif drz_extn[:4] not in output.lower():
            output = fileutil.buildNewRootname(output, extn=drz_extn)

    print('Setting up output name: ',output)

    return asndict, ivmlist, output
def process(inFile,
            force=False,
            newpath=None,
            inmemory=False,
            num_cores=None,
            headerlets=True):
    """ Run astrodrizzle on input file/ASN table
        using default values for astrodrizzle parameters.
    """
    # We only need to import this package if a user run the task
    import drizzlepac
    from drizzlepac import processInput  # used for creating new ASNs for _flc inputs
    import stwcs

    if headerlets:
        from stwcs.wcsutil import headerlet

    # Open the input file
    try:
        # Make sure given filename is complete and exists...
        inFilename = fileutil.buildRootname(inFile, ext=['.fits'])
        if not os.path.exists(inFilename):
            print("ERROR: Input file - %s - does not exist." % inFilename)
            return
    except TypeError:
        print("ERROR: Inappropriate input file.")
        return

    #If newpath was specified, move all files to that directory for processing
    if newpath:
        orig_processing_dir = os.getcwd()
        new_processing_dir = _createWorkingDir(newpath, inFilename)
        _copyToNewWorkingDir(new_processing_dir, inFilename)
        os.chdir(new_processing_dir)

    # Initialize for later use...
    _mname = None
    _new_asn = None
    _calfiles = []

    # Check input file to see if [DRIZ/DITH]CORR is set to PERFORM
    if '_asn' in inFilename:
        # We are working with an ASN table.
        # Use asnutil code to extract filename
        inFilename = _lowerAsn(inFilename)
        _new_asn = [inFilename]
        _asndict = asnutil.readASNTable(inFilename, None, prodonly=False)
        _cal_prodname = _asndict['output'].lower()
        _fname = fileutil.buildRootname(_cal_prodname, ext=['_drz.fits'])

        # Retrieve the first member's rootname for possible use later
        _fimg = fits.open(inFilename)
        for name in _fimg[1].data.field('MEMNAME'):
            if name[-1] != '*':
                _mname = name.split('\0', 1)[0].lower()
                break
        _fimg.close()
        del _fimg

    else:
        # Check to see if input is a _RAW file
        # If it is, strip off the _raw.fits extension...
        _indx = inFilename.find('_raw')
        if _indx < 0: _indx = len(inFilename)
        # ... and build the CALXXX product rootname.
        _mname = fileutil.buildRootname(inFilename[:_indx])
        _cal_prodname = inFilename[:_indx]
        # Reset inFilename to correspond to appropriate input for
        # drizzle: calibrated product name.
        inFilename = _mname

        if _mname == None:
            errorMsg = 'Could not find calibrated product!'
            raise Exception(errorMsg)

    # Create trailer filenames based on ASN output filename or
    # on input name for single exposures
    if '_raw' in inFile:
        # Output trailer file to RAW file's trailer
        _trlroot = inFile[:inFile.find('_raw')]
    elif '_asn' in inFile:
        # Output trailer file to ASN file's trailer, not product's trailer
        _trlroot = inFile[:inFile.find('_asn')]
    else:
        # Default: trim off last suffix of input filename
        # and replacing with .tra
        _indx = inFile.rfind('_')
        if _indx > 0:
            _trlroot = inFile[:_indx]
        else:
            _trlroot = inFile

    _trlfile = _trlroot + '.tra'

    # Open product and read keyword value
    # Check to see if product already exists...
    dkey = 'DRIZCORR'
    # ...if product does NOT exist, interrogate input file
    # to find out whether 'dcorr' has been set to PERFORM
    # Check if user wants to process again regardless of DRIZCORR keyword value
    if force: dcorr = 'PERFORM'
    else:
        if _mname:
            _fimg = fits.open(fileutil.buildRootname(_mname,
                                                     ext=['_raw.fits']))
            _phdr = _fimg['PRIMARY'].header
            if dkey in _phdr:
                dcorr = _phdr[dkey]
            else:
                dcorr = None
            _fimg.close()
            del _fimg
        else:
            dcorr = None

    time_str = _getTime()
    _tmptrl = _trlroot + '_tmp.tra'
    _drizfile = _trlroot + '_pydriz'
    _drizlog = _drizfile + ".log"  # the '.log' gets added automatically by astrodrizzle
    if dcorr == 'PERFORM':
        if '_asn.fits' not in inFilename:
            # Working with a singleton
            # However, we always want to make sure we always use
            # a calibrated product as input, if available.
            _infile = fileutil.buildRootname(_cal_prodname)
            _infile_flc = fileutil.buildRootname(_cal_prodname,
                                                 ext=['_flc.fits'])

            _cal_prodname = _infile
            _inlist = _calfiles = [_infile]

            # Add CTE corrected filename as additional input if present
            if os.path.exists(_infile_flc) and _infile_flc != _infile:
                _inlist.append(_infile_flc)

        else:
            # Working with an ASN table...
            _infile = inFilename
            flist, duplist = processInput.checkForDuplicateInputs(
                _asndict['order'])
            _calfiles = flist
            if len(duplist) > 0:
                origasn = processInput.changeSuffixinASN(inFilename, 'flt')
                dupasn = processInput.changeSuffixinASN(inFilename, 'flc')
                _inlist = [origasn, dupasn]
            else:
                _inlist = [_infile]
            # We want to keep the original specification of the calibration
            # product name, though, not a lower-case version...
            _cal_prodname = inFilename
            _new_asn.extend(_inlist)  # kept so we can delete it when finished

        # Run astrodrizzle and send its processing statements to _trlfile
        _pyver = drizzlepac.astrodrizzle.__version__

        for _infile in _inlist:  # Run astrodrizzle for all inputs
            # Create trailer marker message for start of astrodrizzle processing
            _trlmsg = _timestamp('astrodrizzle started ')
            _trlmsg = _trlmsg + __trlmarker__
            _trlmsg = _trlmsg + '%s: Processing %s with astrodrizzle Version %s\n' % (
                time_str, _infile, _pyver)
            print(_trlmsg)

            # Write out trailer comments to trailer file...
            ftmp = open(_tmptrl, 'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile, _tmptrl)

            _pyd_err = _trlroot + '_pydriz.stderr'

            try:
                b = drizzlepac.astrodrizzle.AstroDrizzle(input=_infile,
                                                         runfile=_drizfile,
                                                         configobj='defaults',
                                                         in_memory=inmemory,
                                                         num_cores=num_cores,
                                                         **pipeline_pars)
            except Exception as errorobj:
                _appendTrlFile(_trlfile, _drizlog)
                _appendTrlFile(_trlfile, _pyd_err)
                _ftrl = open(_trlfile, 'a')
                _ftrl.write(
                    'ERROR: Could not complete astrodrizzle processing of %s.\n'
                    % _infile)
                _ftrl.write(str(sys.exc_info()[0]) + ': ')
                _ftrl.writelines(str(errorobj))
                _ftrl.write('\n')
                _ftrl.close()
                print(
                    'ERROR: Could not complete astrodrizzle processing of %s.'
                    % _infile)
                raise Exception(str(errorobj))

            # Now, append comments created by PyDrizzle to CALXXX trailer file
            print('Updating trailer file %s with astrodrizzle comments.' %
                  _trlfile)
            _appendTrlFile(_trlfile, _drizlog)

        # Save this for when astropy.io.fits can modify a file 'in-place'
        # Update calibration switch
        _fimg = fits.open(_cal_prodname, mode='update')
        _fimg['PRIMARY'].header[dkey] = 'COMPLETE'
        _fimg.close()
        del _fimg

        # Enforce pipeline convention of all lower-case product
        # names
        _prodlist = glob.glob('*drz.fits')
        for _prodname in _prodlist:
            _plower = _prodname.lower()
            if _prodname != _plower: os.rename(_prodname, _plower)

    else:
        # Create default trailer file messages when astrodrizzle is not
        # run on a file.  This will typically apply only to BIAS,DARK
        # and other reference images.
        # Start by building up the message...
        _trlmsg = _timestamp('astrodrizzle skipped ')
        _trlmsg = _trlmsg + __trlmarker__
        _trlmsg = _trlmsg + '%s: astrodrizzle processing not requested for %s.\n' % (
            time_str, inFilename)
        _trlmsg = _trlmsg + '       astrodrizzle will not be run at this time.\n'
        print(_trlmsg)

        # Write message out to temp file and append it to full trailer file
        ftmp = open(_tmptrl, 'w')
        ftmp.writelines(_trlmsg)
        ftmp.close()
        _appendTrlFile(_trlfile, _tmptrl)

    _fmsg = None
    # Append final timestamp to trailer file...
    _final_msg = '%s: Finished processing %s \n' % (time_str, inFilename)
    _final_msg += _timestamp('astrodrizzle completed ')
    _trlmsg += _final_msg
    ftmp = open(_tmptrl, 'w')
    ftmp.writelines(_trlmsg)
    ftmp.close()
    _appendTrlFile(_trlfile, _tmptrl)

    # If we created a new ASN table, we need to remove it
    if _new_asn != None:
        for _name in _new_asn:
            fileutil.removeFile(_name)

    # Clean up any generated OrIg_files directory
    if os.path.exists("OrIg_files"):
        # check to see whether this directory is empty
        flist = glob.glob('OrIg_files/*.fits')
        if len(flist) == 0:
            os.rmdir("OrIg_files")
        else:
            print(
                'OrIg_files directory NOT removed as it still contained images...'
            )
    if headerlets:
        # Generate headerlets for each updated FLT image
        hlet_msg = _timestamp("Writing Headerlets started")
        for fname in _calfiles:
            frootname = fileutil.buildNewRootname(fname)
            hname = "%s_flt_hlet.fits" % frootname
            hlet_msg += "Created Headerlet file %s \n" % hname
            try:
                headerlet.write_headerlet(
                    fname,
                    'OPUS',
                    output='flt',
                    wcskey='PRIMARY',
                    author="OPUS",
                    descrip="Default WCS from Pipeline Calibration",
                    attach=False,
                    clobber=True,
                    logging=False)
            except ValueError:
                hlet_msg += _timestamp(
                    "SKIPPED: Headerlet not created for %s \n" % fname)
                # update trailer file to log creation of headerlet files
        hlet_msg += _timestamp("Writing Headerlets completed")
        ftrl = open(_trlfile, 'a')
        ftrl.write(hlet_msg)
        ftrl.close()

    # If processing was done in a temp working dir, restore results to original
    # processing directory, return to original working dir and remove temp dir
    if newpath:
        _restoreResults(new_processing_dir, orig_processing_dir)
        os.chdir(orig_processing_dir)
        _removeWorkingDir(new_processing_dir)

    # Provide feedback to user
    print(_final_msg)
Exemple #11
0
def process(inFile,force=False,newpath=None, inmemory=False, num_cores=None,
            headerlets=True):
    """ Run astrodrizzle on input file/ASN table
        using default values for astrodrizzle parameters.
    """
    # We only need to import this package if a user run the task
    import drizzlepac
    from drizzlepac import processInput # used for creating new ASNs for _flc inputs
    import stwcs

    if headerlets:
        from stwcs.wcsutil import headerlet

    # Open the input file
    try:
        # Make sure given filename is complete and exists...
        inFilename = fileutil.buildRootname(inFile,ext=['.fits'])
        if not os.path.exists(inFilename):
            print("ERROR: Input file - %s - does not exist." % inFilename)
            return
    except TypeError:
        print("ERROR: Inappropriate input file.")
        return

    #If newpath was specified, move all files to that directory for processing
    if newpath:
        orig_processing_dir = os.getcwd()
        new_processing_dir = _createWorkingDir(newpath,inFilename)
        _copyToNewWorkingDir(new_processing_dir,inFilename)
        os.chdir(new_processing_dir)

    # Initialize for later use...
    _mname = None
    _new_asn = None
    _calfiles = []

    # Identify WFPC2 inputs to account for differences in WFPC2 inputs
    wfpc2_input = fits.getval(inFilename, 'instrume') == 'WFPC2'
    cal_ext = None

    # Check input file to see if [DRIZ/DITH]CORR is set to PERFORM
    if '_asn' in inFilename:
        # We are working with an ASN table.
        # Use asnutil code to extract filename
        inFilename = _lowerAsn(inFilename)
        _new_asn = [inFilename]
        _asndict = asnutil.readASNTable(inFilename,None,prodonly=False)
        _cal_prodname = _asndict['output'].lower()
        _fname = fileutil.buildRootname(_cal_prodname,ext=['_drz.fits'])

        # Retrieve the first member's rootname for possible use later
        _fimg = fits.open(inFilename, memmap=False)
        for name in _fimg[1].data.field('MEMNAME'):
            if name[-1] != '*':
                _mname = name.split('\0', 1)[0].lower()
                break
        _fimg.close()
        del _fimg

    else:
        # Check to see if input is a _RAW file
        # If it is, strip off the _raw.fits extension...
        _indx = inFilename.find('_raw')
        if _indx < 0: _indx = len(inFilename)
        # ... and build the CALXXX product rootname.
        if wfpc2_input:
            # force code to define _c0m file as calibrated product to be used
            cal_ext = ['_c0m.fits']
        _mname = fileutil.buildRootname(inFilename[:_indx], ext=cal_ext)

        _cal_prodname = inFilename[:_indx]
        # Reset inFilename to correspond to appropriate input for
        # drizzle: calibrated product name.
        inFilename = _mname

        if _mname is None:
            errorMsg = 'Could not find calibrated product!'
            raise Exception(errorMsg)

    # Create trailer filenames based on ASN output filename or
    # on input name for single exposures
    if '_raw' in inFile:
        # Output trailer file to RAW file's trailer
        _trlroot = inFile[:inFile.find('_raw')]
    elif '_asn' in inFile:
        # Output trailer file to ASN file's trailer, not product's trailer
        _trlroot = inFile[:inFile.find('_asn')]
    else:
        # Default: trim off last suffix of input filename
        # and replacing with .tra
        _indx = inFile.rfind('_')
        if _indx > 0:
            _trlroot = inFile[:_indx]
        else:
            _trlroot = inFile

    _trlfile = _trlroot + '.tra'

    # Open product and read keyword value
    # Check to see if product already exists...
    dkey = 'DRIZCORR'
    # ...if product does NOT exist, interrogate input file
    # to find out whether 'dcorr' has been set to PERFORM
    # Check if user wants to process again regardless of DRIZCORR keyword value
    if force:
        dcorr = 'PERFORM'
    else:
        if _mname :
            _fimg = fits.open(fileutil.buildRootname(_mname,ext=['_raw.fits']), memmap=False)
            _phdr = _fimg['PRIMARY'].header
            if dkey in _phdr:
                dcorr = _phdr[dkey]
            else:
                dcorr = None
            _fimg.close()
            del _fimg
        else:
            dcorr = None

    time_str = _getTime()
    _tmptrl = _trlroot + '_tmp.tra'
    _drizfile = _trlroot + '_pydriz'
    _drizlog = _drizfile + ".log" # the '.log' gets added automatically by astrodrizzle
    if dcorr == 'PERFORM':
        if '_asn.fits' not in inFilename:
            # Working with a singleton
            # However, we always want to make sure we always use
            # a calibrated product as input, if available.
            _infile = fileutil.buildRootname(_cal_prodname, ext=cal_ext)
            _infile_flc = fileutil.buildRootname(_cal_prodname,ext=['_flc.fits'])

            _cal_prodname = _infile
            _inlist = _calfiles = [_infile]

            # Add CTE corrected filename as additional input if present
            if os.path.exists(_infile_flc) and _infile_flc != _infile:
                _inlist.append(_infile_flc)

        else:
            # Working with an ASN table...
            _infile = inFilename
            flist,duplist = processInput.checkForDuplicateInputs(_asndict['order'])
            _calfiles = flist
            if len(duplist) > 0:
                origasn = processInput.changeSuffixinASN(inFilename,'flt')
                dupasn = processInput.changeSuffixinASN(inFilename,'flc')
                _inlist = [origasn,dupasn]
            else:
                _inlist = [_infile]
            # We want to keep the original specification of the calibration
            # product name, though, not a lower-case version...
            _cal_prodname = inFilename
            _new_asn.extend(_inlist) # kept so we can delete it when finished


        # Run astrodrizzle and send its processing statements to _trlfile
        _pyver = drizzlepac.astrodrizzle.__version__

        for _infile in _inlist: # Run astrodrizzle for all inputs
            # Create trailer marker message for start of astrodrizzle processing
            _trlmsg = _timestamp('astrodrizzle started ')
            _trlmsg = _trlmsg+ __trlmarker__
            _trlmsg = _trlmsg + '%s: Processing %s with astrodrizzle Version %s\n' % (time_str,_infile,_pyver)
            print(_trlmsg)

            # Write out trailer comments to trailer file...
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)

            _pyd_err = _trlroot+'_pydriz.stderr'

            try:
                b = drizzlepac.astrodrizzle.AstroDrizzle(input=_infile,runfile=_drizfile,
                                            configobj='defaults',in_memory=inmemory,
                                            num_cores=num_cores, **pipeline_pars)
            except Exception as errorobj:
                _appendTrlFile(_trlfile,_drizlog)
                _appendTrlFile(_trlfile,_pyd_err)
                _ftrl = open(_trlfile,'a')
                _ftrl.write('ERROR: Could not complete astrodrizzle processing of %s.\n' % _infile)
                _ftrl.write(str(sys.exc_info()[0])+': ')
                _ftrl.writelines(str(errorobj))
                _ftrl.write('\n')
                _ftrl.close()
                print('ERROR: Could not complete astrodrizzle processing of %s.' % _infile)
                raise Exception(str(errorobj))

            # Now, append comments created by PyDrizzle to CALXXX trailer file
            print('Updating trailer file %s with astrodrizzle comments.' % _trlfile)
            _appendTrlFile(_trlfile,_drizlog)

        # Save this for when astropy.io.fits can modify a file 'in-place'
        # Update calibration switch
        _fimg = fits.open(_cal_prodname, mode='update', memmap=False)
        _fimg['PRIMARY'].header[dkey] = 'COMPLETE'
        _fimg.close()
        del _fimg

        # Enforce pipeline convention of all lower-case product
        # names
        _prodlist = glob.glob('*drz.fits')
        for _prodname in _prodlist:
            _plower = _prodname.lower()
            if _prodname != _plower:  os.rename(_prodname,_plower)

    else:
        # Create default trailer file messages when astrodrizzle is not
        # run on a file.  This will typically apply only to BIAS,DARK
        # and other reference images.
        # Start by building up the message...
        _trlmsg = _timestamp('astrodrizzle skipped ')
        _trlmsg = _trlmsg + __trlmarker__
        _trlmsg = _trlmsg + '%s: astrodrizzle processing not requested for %s.\n' % (time_str,inFilename)
        _trlmsg = _trlmsg + '       astrodrizzle will not be run at this time.\n'
        print(_trlmsg)

        # Write message out to temp file and append it to full trailer file
        ftmp = open(_tmptrl,'w')
        ftmp.writelines(_trlmsg)
        ftmp.close()
        _appendTrlFile(_trlfile,_tmptrl)

    _fmsg = None
    # Append final timestamp to trailer file...
    _final_msg = '%s: Finished processing %s \n' % (time_str,inFilename)
    _final_msg += _timestamp('astrodrizzle completed ')
    _trlmsg += _final_msg
    ftmp = open(_tmptrl,'w')
    ftmp.writelines(_trlmsg)
    ftmp.close()
    _appendTrlFile(_trlfile,_tmptrl)

    # If we created a new ASN table, we need to remove it
    if _new_asn is not None:
        for _name in _new_asn: fileutil.removeFile(_name)

    # Clean up any generated OrIg_files directory
    if os.path.exists("OrIg_files"):
        # check to see whether this directory is empty
        flist = glob.glob('OrIg_files/*.fits')
        if len(flist) == 0:
            os.rmdir("OrIg_files")
        else:
            print('OrIg_files directory NOT removed as it still contained images...')
    if headerlets:
        # Generate headerlets for each updated FLT image
        hlet_msg = _timestamp("Writing Headerlets started")
        for fname in _calfiles:
            frootname = fileutil.buildNewRootname(fname)
            hname = "%s_flt_hlet.fits"%frootname
            hlet_msg += "Created Headerlet file %s \n"%hname
            try:
                headerlet.write_headerlet(fname,'OPUS',output='flt', wcskey='PRIMARY',
                    author="OPUS",descrip="Default WCS from Pipeline Calibration",
                    attach=False,clobber=True,logging=False)
            except ValueError:
                hlet_msg += _timestamp("SKIPPED: Headerlet not created for %s \n"%fname)
                # update trailer file to log creation of headerlet files
        hlet_msg += _timestamp("Writing Headerlets completed")
        ftrl = open(_trlfile,'a')
        ftrl.write(hlet_msg)
        ftrl.close()

    # If processing was done in a temp working dir, restore results to original
    # processing directory, return to original working dir and remove temp dir
    if newpath:
        _restoreResults(new_processing_dir,orig_processing_dir)
        os.chdir(orig_processing_dir)
        _removeWorkingDir(new_processing_dir)

    # Provide feedback to user
    print(_final_msg)
Exemple #12
0
def process(inFile,force=False,newpath=None, inmemory=False, num_cores=None,
            headerlets=True, align_to_gaia=True):
    """ Run astrodrizzle on input file/ASN table
        using default values for astrodrizzle parameters.
    """
    # We only need to import this package if a user run the task
    import drizzlepac
    from drizzlepac import processInput # used for creating new ASNs for _flc inputs
    from stwcs import updatewcs
    from drizzlepac import alignimages
    
    # interpret envvar variable, if specified
    if envvar_compute_name in os.environ:
        val = os.environ[envvar_compute_name].lower()
        if val not in envvar_bool_dict:
            msg = "ERROR: invalid value for {}.".format(envvar_compute_name)
            msg += "  \n    Valid Values: on, off, yes, no, true, false"
            raise ValueError(msg)            
        align_to_gaia = envvar_bool_dict[val]

    if envvar_new_apriori_name in os.environ:
        # Reset ASTROMETRY_STEP_CONTROL based on this variable
        # This provides backward-compatibility until ASTROMETRY_STEP_CONTROL
        # gets removed entirely.
        val = os.environ[envvar_new_apriori_name].lower()
        if val not in envvar_dict:
            msg = "ERROR: invalid value for {}.".format(envvar_new_apriori_name)
            msg += "  \n    Valid Values: on, off, yes, no, true, false"
            raise ValueError(msg)

        os.environ[envvar_old_apriori_name] = envvar_dict[val]

    if headerlets or align_to_gaia:
        from stwcs.wcsutil import headerlet

    # Open the input file
    try:
        # Make sure given filename is complete and exists...
        inFilename = fileutil.buildRootname(inFile,ext=['.fits'])
        if not os.path.exists(inFilename):
            print("ERROR: Input file - %s - does not exist." % inFilename)
            return
    except TypeError:
        print("ERROR: Inappropriate input file.")
        return

    #If newpath was specified, move all files to that directory for processing
    if newpath:
        orig_processing_dir = os.getcwd()
        new_processing_dir = _createWorkingDir(newpath,inFilename)
        _copyToNewWorkingDir(new_processing_dir,inFilename)
        os.chdir(new_processing_dir)

    # Initialize for later use...
    _mname = None
    _new_asn = None
    _calfiles = []

    # Identify WFPC2 inputs to account for differences in WFPC2 inputs
    wfpc2_input = fits.getval(inFilename, 'instrume') == 'WFPC2'
    cal_ext = None

    # Check input file to see if [DRIZ/DITH]CORR is set to PERFORM
    if '_asn' in inFilename:
        # We are working with an ASN table.
        # Use asnutil code to extract filename
        inFilename = _lowerAsn(inFilename)
        _new_asn = [inFilename]
        _asndict = asnutil.readASNTable(inFilename,None,prodonly=False)
        _cal_prodname = _asndict['output'].lower()
        #_fname = fileutil.buildRootname(_cal_prodname,ext=['_drz.fits'])

        # Retrieve the first member's rootname for possible use later
        _fimg = fits.open(inFilename, memmap=False)
        for name in _fimg[1].data.field('MEMNAME'):
            if name[-1] != '*':
                _mname = name.split('\0', 1)[0].lower()
                break
        _fimg.close()
        del _fimg

    else:
        # Check to see if input is a _RAW file
        # If it is, strip off the _raw.fits extension...
        _indx = inFilename.find('_raw')
        if _indx < 0: _indx = len(inFilename)
        # ... and build the CALXXX product rootname.
        if wfpc2_input:
            # force code to define _c0m file as calibrated product to be used
            cal_ext = ['_c0m.fits']
        _mname = fileutil.buildRootname(inFilename[:_indx], ext=cal_ext)

        _cal_prodname = inFilename[:_indx]
        # Reset inFilename to correspond to appropriate input for
        # drizzle: calibrated product name.
        inFilename = _mname

        if _mname is None:
            errorMsg = 'Could not find calibrated product!'
            raise Exception(errorMsg)

    # Create trailer filenames based on ASN output filename or
    # on input name for single exposures
    if '_raw' in inFile:
        # Output trailer file to RAW file's trailer
        _trlroot = inFile[:inFile.find('_raw')]
    elif '_asn' in inFile:
        # Output trailer file to ASN file's trailer, not product's trailer
        _trlroot = inFile[:inFile.find('_asn')]
    else:
        # Default: trim off last suffix of input filename
        # and replacing with .tra
        _indx = inFile.rfind('_')
        if _indx > 0:
            _trlroot = inFile[:_indx]
        else:
            _trlroot = inFile

    _trlfile = _trlroot + '.tra'

    # Open product and read keyword value
    # Check to see if product already exists...
    dkey = 'DRIZCORR'
    # ...if product does NOT exist, interrogate input file
    # to find out whether 'dcorr' has been set to PERFORM
    # Check if user wants to process again regardless of DRIZCORR keyword value
    if force:
        dcorr = 'PERFORM'
    else:
        if _mname :
            _fimg = fits.open(fileutil.buildRootname(_mname,ext=['_raw.fits']), memmap=False)
            _phdr = _fimg['PRIMARY'].header
            if dkey in _phdr:
                dcorr = _phdr[dkey]
            else:
                dcorr = None
            _fimg.close()
            del _fimg
        else:
            dcorr = None

    time_str = _getTime()
    _tmptrl = _trlroot + '_tmp.tra'
    _drizfile = _trlroot + '_pydriz'
    _drizlog = _drizfile + ".log" # the '.log' gets added automatically by astrodrizzle
    _alignlog = _trlroot + '_align.log'
    if dcorr == 'PERFORM':
        if '_asn.fits' not in inFilename:
            # Working with a singleton
            # However, we always want to make sure we always use
            # a calibrated product as input, if available.
            _infile = fileutil.buildRootname(_cal_prodname, ext=cal_ext)
            _infile_flc = fileutil.buildRootname(_cal_prodname,ext=['_flc.fits'])

            _cal_prodname = _infile
            _inlist = _calfiles = [_infile]

            # Add CTE corrected filename as additional input if present
            if os.path.exists(_infile_flc) and _infile_flc != _infile:
                _inlist.append(_infile_flc)

        else:
            # Working with an ASN table...
            _infile = inFilename
            flist,duplist = processInput.checkForDuplicateInputs(_asndict['order'])
            _calfiles = flist
            if len(duplist) > 0:
                origasn = processInput.changeSuffixinASN(inFilename,'flt')
                dupasn = processInput.changeSuffixinASN(inFilename,'flc')
                _inlist = [origasn,dupasn]
            else:
                _inlist = [_infile]
            # We want to keep the original specification of the calibration
            # product name, though, not a lower-case version...
            _cal_prodname = inFilename
            _new_asn.extend(_inlist) # kept so we can delete it when finished

        # check to see whether FLC files are also present, and need to be updated
        # generate list of FLC files
        align_files = None
        _calfiles_flc = [f.replace('_flt.fits','_flc.fits') for f in _calfiles]
        # insure these files exist, if not, blank them out
        # Also pick out what files will be used for additional alignment to GAIA
        if not os.path.exists(_calfiles_flc[0]):
            _calfiles_flc = None
            align_files = _calfiles
            align_update_files = None
        else:
            align_files = _calfiles_flc
            align_update_files = _calfiles

        # Run updatewcs on each list of images
        updatewcs.updatewcs(_calfiles)
        if _calfiles_flc:
            updatewcs.updatewcs(_calfiles_flc)

        if align_to_gaia:
            # Perform additional alignment on the FLC files, if present
            ###############
            #
            # call hlapipeline code here on align_files list of files
            #
            ###############
            # Create trailer marker message for start of align_to_GAIA processing
            _trlmsg = _timestamp("Align_to_GAIA started ")
            print(_trlmsg)
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)
            _trlmsg = ""

            # Create an empty astropy table so it can be used as input/output for the perform_align function
            #align_table = Table()
            try:
                align_table = alignimages.perform_align(align_files,update_hdr_wcs=True, runfile=_alignlog)
                for row in align_table:
                    if row['status'] == 0:
                        trlstr = "Successfully aligned {} to {} astrometric frame\n"
                        _trlmsg += trlstr.format(row['imageName'], row['catalog'])
                    else:
                        trlstr = "Could not align {} to absolute astrometric frame\n"
                        _trlmsg += trlstr.format(row['imageName'])

            except Exception:
                # Something went wrong with alignment to GAIA, so report this in
                # trailer file
                _trlmsg = "EXCEPTION encountered in alignimages...\n"
                _trlmsg += "   No correction to absolute astrometric frame applied!\n"

            # Write the perform_align log to the trailer file...(this will delete the _alignlog)
            _appendTrlFile(_trlfile,_alignlog)

            # Append messages from this calling routine post-perform_align
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)
            _trlmsg = ""

            #Check to see whether there are any additional input files that need to
            # be aligned (namely, FLT images)
            if align_update_files and align_table:
                # Apply headerlets from alignment to FLT version of the files
                for fltfile, flcfile in zip(align_update_files, align_files):
                    row = align_table[align_table['imageName']==flcfile]
                    headerletFile = row['headerletFile'][0]
                    if headerletFile != "None":
                        headerlet.apply_headerlet_as_primary(fltfile, headerletFile,
                                                            attach=True, archive=True)
                        # append log file contents to _trlmsg for inclusion in trailer file
                        _trlstr = "Applying headerlet {} as Primary WCS to {}\n"
                        _trlmsg += _trlstr.format(headerletFile, fltfile)
                    else:
                        _trlmsg += "No absolute astrometric headerlet applied to {}\n".format(fltfile)

            # Finally, append any further messages associated with alignement from this calling routine
            _trlmsg += _timestamp('Align_to_GAIA completed ')
            print(_trlmsg)
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)

        # Run astrodrizzle and send its processing statements to _trlfile
        _pyver = drizzlepac.astrodrizzle.__version__

        for _infile in _inlist: # Run astrodrizzle for all inputs
            # Create trailer marker message for start of astrodrizzle processing
            _trlmsg = _timestamp('astrodrizzle started ')
            _trlmsg += __trlmarker__
            _trlmsg += '%s: Processing %s with astrodrizzle Version %s\n' % (time_str,_infile,_pyver)
            print(_trlmsg)

            # Write out trailer comments to trailer file...
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)

            _pyd_err = _trlroot+'_pydriz.stderr'

            try:
                b = drizzlepac.astrodrizzle.AstroDrizzle(input=_infile,runfile=_drizfile,
                                            configobj='defaults',in_memory=inmemory,
                                            num_cores=num_cores, **pipeline_pars)
            except Exception as errorobj:
                _appendTrlFile(_trlfile,_drizlog)
                _appendTrlFile(_trlfile,_pyd_err)
                _ftrl = open(_trlfile,'a')
                _ftrl.write('ERROR: Could not complete astrodrizzle processing of %s.\n' % _infile)
                _ftrl.write(str(sys.exc_info()[0])+': ')
                _ftrl.writelines(str(errorobj))
                _ftrl.write('\n')
                _ftrl.close()
                print('ERROR: Could not complete astrodrizzle processing of %s.' % _infile)
                raise Exception(str(errorobj))

            # Now, append comments created by PyDrizzle to CALXXX trailer file
            print('Updating trailer file %s with astrodrizzle comments.' % _trlfile)
            _appendTrlFile(_trlfile,_drizlog)

        # Save this for when astropy.io.fits can modify a file 'in-place'
        # Update calibration switch
        _fimg = fits.open(_cal_prodname, mode='update', memmap=False)
        _fimg['PRIMARY'].header[dkey] = 'COMPLETE'
        _fimg.close()
        del _fimg

        # Enforce pipeline convention of all lower-case product
        # names
        _prodlist = glob.glob('*drz.fits')
        for _prodname in _prodlist:
            _plower = _prodname.lower()
            if _prodname != _plower:  os.rename(_prodname,_plower)

    else:
        # Create default trailer file messages when astrodrizzle is not
        # run on a file.  This will typically apply only to BIAS,DARK
        # and other reference images.
        # Start by building up the message...
        _trlmsg = _timestamp('astrodrizzle skipped ')
        _trlmsg = _trlmsg + __trlmarker__
        _trlmsg = _trlmsg + '%s: astrodrizzle processing not requested for %s.\n' % (time_str,inFilename)
        _trlmsg = _trlmsg + '       astrodrizzle will not be run at this time.\n'
        print(_trlmsg)

        # Write message out to temp file and append it to full trailer file
        ftmp = open(_tmptrl,'w')
        ftmp.writelines(_trlmsg)
        ftmp.close()
        _appendTrlFile(_trlfile,_tmptrl)

    # Append final timestamp to trailer file...
    _final_msg = '%s: Finished processing %s \n' % (time_str,inFilename)
    _final_msg += _timestamp('astrodrizzle completed ')
    _trlmsg += _final_msg
    ftmp = open(_tmptrl,'w')
    ftmp.writelines(_trlmsg)
    ftmp.close()
    _appendTrlFile(_trlfile,_tmptrl)

    # If we created a new ASN table, we need to remove it
    if _new_asn is not None:
        for _name in _new_asn: fileutil.removeFile(_name)

    # Clean up any generated OrIg_files directory
    if os.path.exists("OrIg_files"):
        # check to see whether this directory is empty
        flist = glob.glob('OrIg_files/*.fits')
        if len(flist) == 0:
            os.rmdir("OrIg_files")
        else:
            print('OrIg_files directory NOT removed as it still contained images...')

    # If headerlets have already been written out by alignment code,
    # do NOT write out this version of the headerlets
    if headerlets:
        # Generate headerlets for each updated FLT image
        hlet_msg = _timestamp("Writing Headerlets started")
        for fname in _calfiles:
            frootname = fileutil.buildNewRootname(fname)
            hname = "%s_flt_hlet.fits"%frootname
            # Write out headerlet file used by astrodrizzle, however,
            # do not overwrite any that was already written out by alignimages
            if not os.path.exists(hname):
                hlet_msg += "Created Headerlet file %s \n"%hname
                try:
                    headerlet.write_headerlet(fname,'OPUS',output='flt', wcskey='PRIMARY',
                        author="OPUS",descrip="Default WCS from Pipeline Calibration",
                        attach=False,clobber=True,logging=False)
                except ValueError:
                    hlet_msg += _timestamp("SKIPPED: Headerlet not created for %s \n"%fname)
                    # update trailer file to log creation of headerlet files
        hlet_msg += _timestamp("Writing Headerlets completed")
        ftrl = open(_trlfile,'a')
        ftrl.write(hlet_msg)
        ftrl.close()

    # If processing was done in a temp working dir, restore results to original
    # processing directory, return to original working dir and remove temp dir
    if newpath:
        _restoreResults(new_processing_dir,orig_processing_dir)
        os.chdir(orig_processing_dir)
        _removeWorkingDir(new_processing_dir)

    # Provide feedback to user
    print(_final_msg)
Exemple #13
0
def parseinput(inputlist, outputname=None, atfile=None):
    """
    Recursively parse user input based upon the irafglob
    program and construct a list of files that need to be processed.
    This program addresses the following deficiencies of the irafglob program::

       parseinput can extract filenames from association tables
    
    Returns
    -------
    This program will return a list of input files that will need to
    be processed in addition to the name of any outfiles specified in
    an association table.

    Parameters
    ----------
    inputlist - string
        specification of input files using either wild-cards, @-file or 
        comma-separated list of filenames

    outputname - string
        desired name for output product to be created from the input files

    atfile - object
        function to use in interpreting the @-file columns that gets passed to irafglob
        
    Returns
    -------    
    files - list of strings 
        names of output files to be processed
    newoutputname - string 
        name of output file to be created.
        
    See Also
    --------
    stsci.tools.irafglob
    
    """

    # Initalize some variables
    files = []  # list used to store names of input files
    newoutputname = outputname  # Outputname returned to calling program.
    # The value of outputname is only changed
    # if it had a value of 'None' on input.

    # We can use irafglob to parse the input.  If the input wasn't
    # an association table, it needs to be either a wildcard, '@' file,
    # or comma seperated list.
    files = irafglob(inputlist, atfile=atfile)

    # Now that we have expanded the inputlist into a python list
    # containing the list of input files, it is necessary to examine
    # each of the files to make sure none of them are association tables.
    #
    # If an association table is found, the entries should be read
    # Determine if the input is an association table
    for file in files:
        if (checkASN(file) == True):
            # Create a list to store the files extracted from the
            # association tiable
            assoclist = []

            # The input is an association table
            try:
                # Open the association table
                assocdict = readASNTable(file, None, prodonly=False)
            except:
                errorstr = "###################################\n"
                errorstr += "#                                 #\n"
                errorstr += "# UNABLE TO READ ASSOCIATION FILE,#\n"
                errorstr += str(file) + '\n'
                errorstr += "# DURING FILE PARSING.            #\n"
                errorstr += "#                                 #\n"
                errorstr += "# Please determine if the file is #\n"
                errorstr += "# in the current directory and    #\n"
                errorstr += "# that it has been properly       #\n"
                errorstr += "# formatted.                      #\n"
                errorstr += "#                                 #\n"
                errorstr += "# This error message is being     #\n"
                errorstr += "# generated from within the       #\n"
                errorstr += "# parseinput.py module.           #\n"
                errorstr += "#                                 #\n"
                errorstr += "###################################\n"
                raise ValueError(errorstr)

            # Extract the output name from the association table if None
            # was provided on input.
            if outputname == None:
                newoutputname = assocdict['output']

            # Loop over the association dictionary to extract the input
            # file names.
            for f in assocdict['order']:
                assoclist.append(fileutil.buildRootname(f))

            # Remove the name of the association table from the list of files
            files.remove(file)
            # Append the list of filenames generated from the association table
            # to the master list of input files.
            files.extend(assoclist)

    # Return the list of the input files and the output name if provided in an association.
    return files, newoutputname
Exemple #14
0
def combine_flt(files=[], output='exposures_cmb.fits', grow=1,
                add_padding=True, pixfrac=0.5, kernel='point',
                verbose=True, overwrite=True, ds9=None):
    """Drizzle distorted FLT frames to an "interlaced" image
        
    Parameters
    ----------
    files : list of strings
        Filenames of FLT files to combine
    
    output : str
        Output filename of the combined file.  Convention elsewhere is to use
        an "_cmb.fits" extension to distinguish from "_flt.fits".
        
    grow : int
        Factor by which to `grow` the FLT frames to interlaced outputs.  For 
        example, `grow=2` results in 2x2 interlacing.
    
    add_padding : True
        Expand pixel grid to accommodate all dithered exposures.  WCS is 
        preserved but "CRPIX" will change.
        
    pixfrac : float
        Drizzle pixfrac (for kernels other than 'point')
    
    kernel : {'point', 'square'}
        Drizzle kernel. The 'point' kernel is effectively interlacing and is
        best for preserving the noise properties of the final combined image.
        However, can result in empty pixels given the camera distortions
        depending on the dithering of the input exposures.
    
    ds9 : `~grizli.ds9.DS9`
        Display the progress of the script to a DS9 window.
        
    verbose : bool
        Print logging information
        
    overwrite : bool
        Overwrite existing files
    
    Returns
    -------
    Creates combined images
    
    """
    import numpy.linalg
    from stsci.tools import asnutil
    from drizzlepac import astrodrizzle
    
    ###  `files` is an ASN filename, not  a list of exposures
    if '_asn.fits' in files:
        asn = asnutil.readASNTable(files)
        files = ['{0}_flt.fits'.format(flt) for flt in asn['order']]
        if output == 'combined_flt.fits':
            output = '{0}_cmb.fits'.format(asn['output'])
            
    if False:
        files=glob.glob('ibhj3*flt.fits')
        files.sort()
        grism_files = files[1::2]

        ### FIGS
        info = catIO.Table('info')
        pas = np.cast[int](info['PA_V3']*10)/10.
        pa_list = np.unique(pas)
        grism_files = info['FILE'][(info['FILTER'] == 'G102') & 
                                   (pas == pa_list[0])]
        
        files = grism_files
        #utils = grizlidev.utils
        
    f0 = pyfits.open(files[0])
    h0 = f0[0].header.copy()
    h0['EXPTIME'] = 0.
    h0['NFILES'] = (len(files), 'Number of combined files')
    
    out_wcs = pywcs.WCS(f0[1].header, relax=True)
    out_wcs.pscale = utils.get_wcs_pscale(out_wcs)
    # out_wcs.pscale = np.sqrt(out_wcs.wcs.cd[0,0]**2 +
    #                          out_wcs.wcs.cd[1,0]**2)*3600.
    
    ### Compute maximum offset needed for padding
    if add_padding:
        ra0, de0 = out_wcs.all_pix2world([0],[0],0)
    
        x0 = np.zeros(len(files))
        y0 = np.zeros(len(files))
    
        for i, file in enumerate(files):
            hx = pyfits.getheader(file, 0)
            h0['EXPTIME'] += hx['EXPTIME']
            h0['FILE{0:04d}'.format(i)] = (file, 
                                        'Included file #{0:d}'.format(i))
        
            h = pyfits.getheader(file, 1)
            flt_wcs = pywcs.WCS(h, relax=True)
            x0[i], y0[i] = flt_wcs.all_world2pix(ra0, de0, 0)
    
        xmax = np.abs(x0).max()
        ymax = np.abs(y0).max()
        padx = 50*int(np.ceil(xmax/50.))
        pady = 50*int(np.ceil(ymax/50.))
        pad = np.maximum(padx, pady)*grow

        if verbose:
            print('Maximum shift (x, y) = ({0:6.1f}, {1:6.1f}), pad={2:d}'.format(xmax, ymax, pad))
    else:
        pad = 0
        
    inter_wcs = out_wcs.deepcopy()
    if grow > 1:
        inter_wcs.wcs.cd /= grow
        for i in range(inter_wcs.sip.a_order+1):
            for j in range(inter_wcs.sip.a_order+1):
                inter_wcs.sip.a[i,j] /= grow**(i+j-1)

        for i in range(inter_wcs.sip.b_order+1):
            for j in range(inter_wcs.sip.b_order+1):
                inter_wcs.sip.b[i,j] /= grow**(i+j-1)

        inter_wcs._naxis1 *= grow
        inter_wcs._naxis2 *= grow
        inter_wcs.wcs.crpix *= grow
        inter_wcs.sip.crpix[0] *= grow
        inter_wcs.sip.crpix[1] *= grow

        if grow > 1:
            inter_wcs.wcs.crpix += grow/2.
            inter_wcs.sip.crpix[0] += grow/2.
            inter_wcs.sip.crpix[1] += grow/2.
    
    inter_wcs._naxis1 += pad
    inter_wcs._naxis2 += pad
    inter_wcs.wcs.crpix += pad
    inter_wcs.sip.crpix[0] += pad
    inter_wcs.sip.crpix[1] += pad

    outh = inter_wcs.to_header(relax=True)
    for key in outh:
        if key.startswith('PC'):
            outh.rename_keyword(key, key.replace('PC','CD'))
    
    outh['GROW'] = grow, 'Grow factor'
    outh['PAD'] = pad, 'Image padding'
    outh['BUNIT'] = h['BUNIT']
    
    sh = (1014*grow + 2*pad, 1014*grow + 2*pad)
    outsci = np.zeros(sh, dtype=np.float32)
    outwht = np.zeros(sh, dtype=np.float32)
    outctx = np.zeros(sh, dtype=np.int32)

    ## Pixel area map
    # PAM_im = pyfits.open(os.path.join(os.getenv('iref'), 'ir_wfc3_map.fits'))
    # PAM = PAM_im[1].data
    
    for i, file in enumerate(files):
        im = pyfits.open(file)
        
        if verbose:
            print('{0:3d} {1:s} {2:6.1f} {3:6.1f} {4:10.2f}'.format(i+1, file,
                                x0[i], y0[i], im[0].header['EXPTIME']))

        dq = utils.unset_dq_bits(im['DQ'].data, okbits=608,
                                           verbose=False)
        wht = 1./im['ERR'].data**2
        wht[(im['ERR'].data == 0) | (dq > 0) | (~np.isfinite(wht))] = 0
        wht[im['SCI'].data < -3*im['ERR'].data] = 0

        wht = np.cast[np.float32](wht)

        exp_wcs = pywcs.WCS(im[1].header, relax=True)
        exp_wcs.pscale = utils.get_wcs_pscale(exp_wcs)
                                 
        #pf = 0.5
        # import drizzlepac.wcs_functions as dwcs
        # xx = out_wcs.deepcopy()
        # #xx.all_pix2world = xx.wcs_world2pix
        # map = dwcs.WCSMap(exp_wcs, xx)

        astrodrizzle.adrizzle.do_driz(im['SCI'].data, exp_wcs, wht,
                                      inter_wcs, outsci, outwht, outctx,
                                      1., 'cps', 1, 
                                      wcslin_pscale=exp_wcs.pscale,
                                      uniqid=1,
                                      pixfrac=pixfrac, kernel=kernel, 
                                      fillval=0, stepsize=10,
                                      wcsmap=SIP_WCSMap)   

        if ds9 is not None:
            ds9.view(outsci, header=outh)

    #outsci /= out_wcs.pscale**2
    rms = 1/np.sqrt(outwht)
    mask = (outwht == 0) | (rms > 100)
    rms[mask] = 0
    outsci[mask] = 0.
    
    hdu = [pyfits.PrimaryHDU(header=h0)]
    hdu.append(pyfits.ImageHDU(data=outsci/grow**2, header=outh, name='SCI'))
    hdu.append(pyfits.ImageHDU(data=rms/grow**2, header=outh, name='ERR'))
    hdu.append(pyfits.ImageHDU(data=mask*1024, header=outh, name='DQ'))
    
    pyfits.HDUList(hdu).writeto(output, overwrite=overwrite, output_verify='fix')