Beispiel #1
0
def waiver2mef(sciname, newname=None, convert_dq=True, writefits=True):
    """
    Converts a GEIS science file and its corresponding
    data quality file (if present) to MEF format
    Writes out both files to disk.
    Returns the new name of the science image.
    """

    if isinstance(sciname, fits.HDUList):
        filename = sciname.filename()
    else:
        filename = sciname

    try:
        clobber = True
        fimg = convertwaiveredfits.convertwaiveredfits(filename)

        #check for the existence of a data quality file
        _dqname = fileutil.buildNewRootname(filename, extn='_c1f.fits')
        dqexists = os.path.exists(_dqname)
        if convert_dq and dqexists:
            try:
                dqfile = convertwaiveredfits.convertwaiveredfits(_dqname)
                dqfitsname = fileutil.buildNewRootname(_dqname,
                                                       extn='_c1h.fits')
            except Exception:
                print("Could not read data quality file %s" % _dqname)
        if writefits:
            # User wants to make a FITS copy and update it
            # using the filename they have provided
            rname = fileutil.buildNewRootname(filename)
            fitsname = fileutil.buildNewRootname(rname, extn='_c0h.fits')

            # Write out GEIS image as multi-extension FITS.
            fexists = os.path.exists(fitsname)
            if (fexists and clobber) or not fexists:
                print('Writing out WAIVERED as MEF to ', fitsname)
                if ASTROPY_VER_GE13:
                    fimg.writeto(fitsname, overwrite=clobber)
                else:
                    fimg.writeto(fitsname, clobber=clobber)
                if dqexists:
                    print('Writing out WAIVERED as MEF to ', dqfitsname)
                    if ASTROPY_VER_GE13:
                        dqfile.writeto(dqfitsname, overwrite=clobber)
                    else:
                        dqfile.writeto(dqfitsname, clobber=clobber)
        # Now close input GEIS image, and open writable
        # handle to output FITS image instead...
        fimg.close()
        del fimg

        fimg = fits.open(fitsname, mode='update', memmap=False)

        return fimg
    except IOError:
        print('Warning: File %s could not be found' % sciname)
        return None
def waiver2mef(sciname, newname=None, convert_dq=True, writefits=True):
    """
    Converts a GEIS science file and its corresponding
    data quality file (if present) to MEF format
    Writes out both files to disk.
    Returns the new name of the science image.
    """

    if isinstance(sciname, fits.HDUList):
        filename = sciname.filename()
    else:
        filename = sciname

    try:
        clobber = True
        fimg = convertwaiveredfits.convertwaiveredfits(filename)

        #check for the existence of a data quality file
        _dqname = fileutil.buildNewRootname(filename, extn='_c1f.fits')
        dqexists = os.path.exists(_dqname)
        if convert_dq and dqexists:
            try:
                dqfile = convertwaiveredfits.convertwaiveredfits(_dqname)
                dqfitsname = fileutil.buildNewRootname(_dqname, extn='_c1h.fits')
            except Exception:
                print("Could not read data quality file %s" % _dqname)
        if writefits:
            # User wants to make a FITS copy and update it
            # using the filename they have provided
            rname = fileutil.buildNewRootname(filename)
            fitsname = fileutil.buildNewRootname(rname, extn='_c0h.fits')

            # Write out GEIS image as multi-extension FITS.
            fexists = os.path.exists(fitsname)
            if (fexists and clobber) or not fexists:
                print('Writing out WAIVERED as MEF to ', fitsname)
                if ASTROPY_VER_GE13:
                    fimg.writeto(fitsname, overwrite=clobber)
                else:
                    fimg.writeto(fitsname, clobber=clobber)
                if dqexists:
                    print('Writing out WAIVERED as MEF to ', dqfitsname)
                    if ASTROPY_VER_GE13:
                        dqfile.writeto(dqfitsname, overwrite=clobber)
                    else:
                        dqfile.writeto(dqfitsname, clobber=clobber)
        # Now close input GEIS image, and open writable
        # handle to output FITS image instead...
        fimg.close()
        del fimg

        fimg = fits.open(fitsname, mode='update', memmap=False)

        return fimg
    except IOError:
        print('Warning: File %s could not be found' % sciname)
        return None
Beispiel #3
0
def process_input(input,
                  output=None,
                  ivmlist=None,
                  updatewcs=True,
                  prodonly=False,
                  wcskey=None,
                  **workinplace):
    """
    Create the full input list of filenames after verifying and converting
    files as needed.
    """

    newfilelist, ivmlist, output, oldasndict, origflist = buildFileListOrig(
        input,
        output=output,
        ivmlist=ivmlist,
        wcskey=wcskey,
        updatewcs=updatewcs,
        **workinplace)

    if not newfilelist:
        buildEmptyDRZ(input, output)
        return None, None, output

    # run all WCS updating -- Now done in buildFileList
    #pydr_input = _process_input_wcs(newfilelist, wcskey, updatewcs)
    pydr_input = newfilelist

    # AsnTable will handle the case when output==None
    if not oldasndict:  # and output is not None:
        oldasndict = asnutil.ASNTable(pydr_input, output=output)
        oldasndict.create()

    asndict = update_member_names(oldasndict, pydr_input)
    asndict['original_file_names'] = origflist

    # Build output filename
    drz_extn = '_drz.fits'
    for img in newfilelist:
        # special case logic to automatically recognize when _flc.fits files
        # are provided as input and produce a _drc.fits file instead
        if '_flc.fits' in img:
            drz_extn = '_drc.fits'
            break

    if output in [None, '']:
        output = fileutil.buildNewRootname(asndict['output'], extn=drz_extn)
    else:
        if '.fits' in output.lower():
            pass
        elif drz_extn[:4] not in output.lower():
            output = fileutil.buildNewRootname(output, extn=drz_extn)

    log.info('Setting up output name: %s' % output)

    return asndict, ivmlist, output
Beispiel #4
0
def process_input(input, output=None, ivmlist=None, updatewcs=True,
                  prodonly=False,  wcskey=None, **workinplace):
    """
    Create the full input list of filenames after verifying and converting
    files as needed.
    """

    newfilelist, ivmlist, output, oldasndict, origflist = buildFileListOrig(
            input, output=output, ivmlist=ivmlist, wcskey=wcskey,
            updatewcs=updatewcs, **workinplace)

    if not newfilelist:
        buildEmptyDRZ(input, output)
        return None, None, output

    # run all WCS updating -- Now done in buildFileList
    #pydr_input = _process_input_wcs(newfilelist, wcskey, updatewcs)
    pydr_input = newfilelist

    # AsnTable will handle the case when output==None
    if not oldasndict:# and output is not None:
        oldasndict = asnutil.ASNTable(pydr_input, output=output)
        oldasndict.create()

    asndict = update_member_names(oldasndict, pydr_input)
    asndict['original_file_names'] = origflist

    # Build output filename
    drz_extn = '_drz.fits'
    for img in newfilelist:
        # special case logic to automatically recognize when _flc.fits files
        # are provided as input and produce a _drc.fits file instead
        if '_flc.fits' in img:
            drz_extn = '_drc.fits'
            break

    if output in [None,'']:
        output = fileutil.buildNewRootname(asndict['output'],
                                           extn=drz_extn)
    else:
        if '.fits' in output.lower():
            pass
        elif drz_extn[:4] not in output.lower():
            output = fileutil.buildNewRootname(output, extn=drz_extn)


    log.info('Setting up output name: %s' % output)

    return asndict, ivmlist, output
Beispiel #5
0
def update_member_names(oldasndict, pydr_input):
    """
    Update names in a member dictionary.

    Given an association dictionary with rootnames and a list of full
    file names, it will update the names in the member dictionary to
    contain '_*' extension. For example a rootname of 'u9600201m' will
    be replaced by 'u9600201m_c0h' making sure that a MEf file is passed
    as an input and not the corresponding GEIS file.
    """

    omembers = oldasndict['members'].copy()
    nmembers = {}
    translated_names = [f.split('.fits')[0] for f in pydr_input]

    newkeys = [fileutil.buildNewRootname(file) for file in pydr_input]
    keys_map = list(zip(newkeys, pydr_input))

    for okey, oval in list(omembers.items()):
        if okey in newkeys:
            nkey = pydr_input[newkeys.index(okey)]
            nmembers[nkey.split('.fits')[0]] = oval

    oldasndict.pop('members')
    # replace should be always True to cover the case when flt files were removed
    # and the case when names were translated

    oldasndict.update(members=nmembers, replace=True)
    oldasndict['order'] = translated_names
    return oldasndict
Beispiel #6
0
def waiver2mef(sciname, newname=None, convert_dq=True):
    """
    Converts a GEIS science file and its corresponding
    data quality file (if present) to MEF format
    Writes out both files to disk.
    Returns the new name of the science image.
    """
    def convert(file):
        newfilename = fileutil.buildNewRootname(file, extn='_c0h.fits')
        try:
            newimage = fileutil.openImage(file,
                                          writefits=True,
                                          fitsname=newfilename,
                                          clobber=True)
            del newimage
            return newfilename
        except IOError:
            print('Warning: File %s could not be found' % file)
            return None

    newsciname = convert(sciname)
    if convert_dq:
        dq_name = convert(fileutil.buildNewRootname(sciname, extn='_c1h.fits'))

    return newsciname
Beispiel #7
0
def extract_rootname(kwvalue,suffix=""):
    """ Returns the rootname from a full reference filename

        If a non-valid value (any of ['','N/A','NONE','INDEF',None]) is input,
            simply return a string value of 'NONE'

        This function will also replace any 'suffix' specified with a blank.
    """
    # check to see whether a valid kwvalue has been provided as input
    if kwvalue.strip() in ['','N/A','NONE','INDEF',None]:
        return 'NONE' # no valid value, so return 'NONE'

    # for a valid kwvalue, parse out the rootname
    # strip off any environment variable from input filename, if any are given
    if '$' in kwvalue:
        fullval = kwvalue[kwvalue.find('$')+1:]
    else:
        fullval = kwvalue
    # Extract filename without path from kwvalue
    fname = os.path.basename(fullval).strip()

    # Now, rip out just the rootname from the full filename
    rootname = fileutil.buildNewRootname(fname)

    # Now, remove any known suffix from rootname
    rootname = rootname.replace(suffix,'')
    return rootname.strip()
Beispiel #8
0
def update_member_names(oldasndict, pydr_input):
    """
    Update names in a member dictionary.

    Given an association dictionary with rootnames and a list of full
    file names, it will update the names in the member dictionary to
    contain '_*' extension. For example a rootname of 'u9600201m' will
    be replaced by 'u9600201m_c0h' making sure that a MEf file is passed
    as an input and not the corresponding GEIS file.
    """

    omembers = oldasndict['members'].copy()
    nmembers = {}
    translated_names = [f.split('.fits')[0] for f in pydr_input]

    newkeys = [fileutil.buildNewRootname(file) for file in pydr_input]
    keys_map = list(zip(newkeys, pydr_input))

    for okey, oval in list(omembers.items()):
        if okey in newkeys:
            nkey = pydr_input[newkeys.index(okey)]
            nmembers[nkey.split('.fits')[0]] = oval

    oldasndict.pop('members')
    # replace should be always True to cover the case when flt files were removed
    # and the case when names were translated

    oldasndict.update(members=nmembers, replace=True)
    oldasndict['order'] = translated_names
    return oldasndict
Beispiel #9
0
def extract_rootname(kwvalue, suffix=""):
    """ Returns the rootname from a full reference filename

        If a non-valid value (any of ['','N/A','NONE','INDEF',None]) is input,
            simply return a string value of 'NONE'

        This function will also replace any 'suffix' specified with a blank.
    """
    # check to see whether a valid kwvalue has been provided as input
    if kwvalue.strip() in ['', 'N/A', 'NONE', 'INDEF', None]:
        return 'NONE'  # no valid value, so return 'NONE'

    # for a valid kwvalue, parse out the rootname
    # strip off any environment variable from input filename, if any are given
    if '$' in kwvalue:
        fullval = kwvalue[kwvalue.find('$') + 1:]
    else:
        fullval = kwvalue
    # Extract filename without path from kwvalue
    fname = os.path.basename(fullval).strip()

    # Now, rip out just the rootname from the full filename
    rootname = fileutil.buildNewRootname(fname)

    # Now, remove any known suffix from rootname
    rootname = rootname.replace(suffix, '')
    return rootname.strip()
def geis2mef(sciname, convert_dq=True):
    """
    Converts a GEIS science file and its corresponding
    data quality file (if present) to MEF format
    Writes out both files to disk.
    Returns the new name of the science image.
    """
    clobber = True
    mode = 'update'
    memmap = True
    # Input was specified as a GEIS image, but no FITS copy
    # exists.  Read it in with 'readgeis' and make a copy
    # then open the FITS copy...
    try:
        # Open as a GEIS image for reading only
        fimg = readgeis.readgeis(sciname)
    except Exception:
        raise IOError("Could not open GEIS input: %s" % sciname)

    #check for the existence of a data quality file
    _dqname = fileutil.buildNewRootname(sciname, extn='.c1h')
    dqexists = os.path.exists(_dqname)
    if dqexists:
        try:
            dqfile = readgeis.readgeis(_dqname)
            dqfitsname = fileutil.buildFITSName(_dqname)
        except Exception:
            print("Could not read data quality file %s" % _dqname)

    # Check to see if user wanted to update GEIS header.
    # or write out a multi-extension FITS file and return a handle to it
    # User wants to make a FITS copy and update it
    # using the filename they have provided
    fitsname = fileutil.buildFITSName(sciname)

    # Write out GEIS image as multi-extension FITS.
    fexists = os.path.exists(fitsname)
    if (fexists and clobber) or not fexists:
            print('Writing out GEIS as MEF to ', fitsname)
            if ASTROPY_VER_GE13:
                fimg.writeto(fitsname, overwrite=clobber)
            else:
                fimg.writeto(fitsname, clobber=clobber)
            if dqexists:
                print('Writing out GEIS as MEF to ', dqfitsname)
                if ASTROPY_VER_GE13:
                    dqfile.writeto(dqfitsname, overwrite=clobber)
                else:
                    dqfile.writeto(dqfitsname, clobber=clobber)
    # Now close input GEIS image, and open writable
    # handle to output FITS image instead...
    fimg.close()
    del fimg
    fimg = fits.open(fitsname, mode=mode, memmap=memmap)

    return fimg
Beispiel #11
0
def geis2mef(sciname, convert_dq=True):
    """
    Converts a GEIS science file and its corresponding
    data quality file (if present) to MEF format
    Writes out both files to disk.
    Returns the new name of the science image.
    """
    clobber = True
    mode = 'update'
    memmap = True
    # Input was specified as a GEIS image, but no FITS copy
    # exists.  Read it in with 'readgeis' and make a copy
    # then open the FITS copy...
    try:
        # Open as a GEIS image for reading only
        fimg = readgeis.readgeis(sciname)
    except Exception:
        raise IOError("Could not open GEIS input: %s" % sciname)

    #check for the existence of a data quality file
    _dqname = fileutil.buildNewRootname(sciname, extn='.c1h')
    dqexists = os.path.exists(_dqname)
    if dqexists:
        try:
            dqfile = readgeis.readgeis(_dqname)
            dqfitsname = fileutil.buildFITSName(_dqname)
        except Exception:
            print("Could not read data quality file %s" % _dqname)

    # Check to see if user wanted to update GEIS header.
    # or write out a multi-extension FITS file and return a handle to it
    # User wants to make a FITS copy and update it
    # using the filename they have provided
    fitsname = fileutil.buildFITSName(sciname)

    # Write out GEIS image as multi-extension FITS.
    fexists = os.path.exists(fitsname)
    if (fexists and clobber) or not fexists:
        print('Writing out GEIS as MEF to ', fitsname)
        if ASTROPY_VER_GE13:
            fimg.writeto(fitsname, overwrite=clobber)
        else:
            fimg.writeto(fitsname, clobber=clobber)
        if dqexists:
            print('Writing out GEIS as MEF to ', dqfitsname)
            if ASTROPY_VER_GE13:
                dqfile.writeto(dqfitsname, overwrite=clobber)
            else:
                dqfile.writeto(dqfitsname, clobber=clobber)
    # Now close input GEIS image, and open writable
    # handle to output FITS image instead...
    fimg.close()
    del fimg
    fimg = fits.open(fitsname, mode=mode, memmap=memmap)

    return fimg
 def convert(file):
     newfilename = fileutil.buildNewRootname(file, extn='_c0h.fits')
     try:
         newimage = fileutil.openImage(file,writefits=True,
                                       fitsname=newfilename,clobber=True)
         del newimage
         return newfilename
     except IOError:
         print('Warning: File %s could not be found' % file)
         return None
    def addMembers(self,filename):

        # The PC chip defines the orientation of the metachip, so use
        # it for the PARITY as well.
        self.detector = 'WFPC'

        _chip1_rot = None
        # Build rootname here for each SCI extension...
        if self.pars['section'] == None:
            self.pars['section'] = [None] * self.nmembers
            group_indx = list(range(1,self.nmembers+1))
        else:
            group_indx = self.pars['section']

        for i in range(self.nmembers):
            _extname = self.imtype.makeSciName(i+1,section=self.pars['section'][i])

            _detnum = fileutil.getKeyword(_extname,self.DETECTOR_NAME)

            # Start by looking for the corresponding WFPC2 'c1h' files
            _dqfile, _dqextn = self._findDQFile()

            # Reset dqfile name in ImType class to point to new file
            self.imtype.dqfile = _dqfile
            self.imtype.dq_extn = _dqextn

            # Build mask file for this member chip
            _dqname = self.imtype.makeDQName(extver=group_indx[i])
            _masklist = []
            _masknames = []

            if _dqname != None:
                _maskname = buildmask.buildMaskName(fileutil.buildNewRootname(_dqname),_detnum)
            else:
                _maskname = None
            _masknames.append(_maskname)

            outmask = buildmask.buildShadowMaskImage(_dqname,_detnum,group_indx[i],_maskname, bitvalue=self.bitvalue[0], binned=self.binned)
            _masklist.append(outmask)

            _maskname = _maskname.replace('final_mask','single_mask')
            _masknames.append(_maskname)
            outmask = buildmask.buildShadowMaskImage(_dqname,_detnum,group_indx[i],_maskname, bitvalue=self.bitvalue[1], binned=self.binned)
            _masklist.append(outmask)
            _masklist.append(_masknames)


            self.members.append(Exposure(_extname, idckey=self.idckey, dqname=_dqname,
                mask=_masklist, parity=self.PARITY[str(i+1)],
                idcdir=self.pars['idcdir'], group_indx = i+1,
                rot=_chip1_rot, handle=self.image_handle, extver=_detnum,
                exptime=self.exptime[0], ref_pscale=self.REFDATA['1']['psize'], binned=self.binned))

            if self.idckey != 'idctab':
                _chip1_rot = self.members[0].geometry.def_rot
Beispiel #14
0
    def setNames(self,filename,output):
        """
        Define standard name attibutes:
                outname     - Default final output name
                outdata     - Name for drizzle science output
                outsingle   - Name for output for single image
        """
        self.rootname = filename
        self.outname = output

        # Define FITS output filenames for intermediate products
        # to be used when 'build=no'
        self.outdata = fileutil.buildNewRootname(output,extn='_sci.fits')
        self.outweight = fileutil.buildNewRootname(output,extn='_weight.fits')
        self.outcontext = fileutil.buildNewRootname(output,extn='_context.fits')

        # Define output file names for separate output for each input
        self.outsingle = fileutil.buildNewRootname(filename,extn='_single_sci.fits')
        self.outsweight = fileutil.buildNewRootname(filename,extn='_single_wht.fits')
        self.outscontext = None
Beispiel #15
0
 def convert(file):
     newfilename = fileutil.buildNewRootname(file, extn='_c0h.fits')
     try:
         newimage = fileutil.openImage(file,
                                       writefits=True,
                                       fitsname=newfilename,
                                       clobber=True)
         del newimage
         return newfilename
     except IOError:
         print('Warning: File %s could not be found' % file)
         return None
Beispiel #16
0
def init_logging(logfile=DEFAULT_LOGNAME, default=None, level=logging.INFO):
    """
    Set up logger for capturing stdout/stderr messages.

    Must be called prior to writing any messages that you want to log.
    """

    if logfile == "INDEF":
        if not is_blank(default):
            logname = fileutil.buildNewRootname(default, '.log')
        else:
            logname = DEFAULT_LOGNAME
    elif logfile not in [None, "", " "]:
        if logfile.endswith('.log'):
            logname = logfile
        else:
            logname = logfile + '.log'
    else:
        logname = None

    if logname is not None:
        logutil.setup_global_logging()
        # Don't use logging.basicConfig since it can only be called once in a
        # session
        # TODO: Would be fine to use logging.config.dictConfig, but it's not
        # available in Python 2.5
        global _log_file_handler
        root_logger = logging.getLogger()
        if _log_file_handler:
            root_logger.removeHandler(_log_file_handler)
        # Default mode is 'a' which is fine
        _log_file_handler = logging.FileHandler(logname)
        # TODO: Make the default level configurable in the task parameters
        _log_file_handler.setLevel(level)

        # Insure file handler has '.name' set so calling code can get it.
        _log_file_handler.set_name(logname)

        _log_file_handler.setFormatter(
            logging.Formatter('[%(levelname)-8s] %(message)s'))
        root_logger.setLevel(level)
        root_logger.addHandler(_log_file_handler)

        print('Setting up logfile : ', logname)

    else:
        print('No trailer file created...')

    return logname
Beispiel #17
0
def init_logging(logfile=DEFAULT_LOGNAME, default=None, level=logging.INFO):
    """
    Set up logger for capturing stdout/stderr messages.

    Must be called prior to writing any messages that you want to log.
    """

    if logfile == "INDEF":
        if not is_blank(default):
            logname = fileutil.buildNewRootname(default, '.log')
        else:
            logname = DEFAULT_LOGNAME
    elif logfile not in [None, "" , " "]:
        if logfile.endswith('.log'):
            logname = logfile
        else:
            logname = logfile + '.log'
    else:
        logname = None

    if logname is not None:
        logutil.setup_global_logging()
        # Don't use logging.basicConfig since it can only be called once in a
        # session
        # TODO: Would be fine to use logging.config.dictConfig, but it's not
        # available in Python 2.5
        global _log_file_handler
        root_logger = logging.getLogger()
        if _log_file_handler:
            root_logger.removeHandler(_log_file_handler)
        # Default mode is 'a' which is fine
        _log_file_handler = logging.FileHandler(logname)
        # TODO: Make the default level configurable in the task parameters
        _log_file_handler.setLevel(level)
        _log_file_handler.setFormatter(
            logging.Formatter('[%(levelname)-8s] %(message)s'))
        root_logger.setLevel(level)
        root_logger.addHandler(_log_file_handler)

        print('Setting up logfile : ', logname)

        #stdout_logger = logging.getLogger('stsci.tools.logutil.stdout')
        # Disable display of prints to stdout from all packages except
        # drizzlepac
        #stdout_logger.addFilter(logutil.EchoFilter(include=['drizzlepac']))
    else:
        print('No trailer file created...')
Beispiel #18
0
def find_gsc_offset(image, input_catalog='GSC1', output_catalog='GAIA'):
    """Find the GSC to GAIA offset based on guide star coordinates

    Parameters
    ----------
    image : str
        filename of image to be processed

    Returns
    -------
    delta_ra,delta_dec : tuple of floats
        Offset in decimal degrees of image based on correction to guide star
        coordinates relative to GAIA
    """
    serviceType = "GSCConvert/GSCconvert.aspx"
    spec_str = "TRANSFORM={}-{}&IPPPSSOOT={}"

    if 'rootname' in pf.getheader(image):
        ippssoot = pf.getval(image, 'rootname').upper()
    else:
        ippssoot = fu.buildNewRootname(image).upper()

    spec = spec_str.format(input_catalog, output_catalog, ippssoot)
    serviceUrl = "{}/{}?{}".format(SERVICELOCATION, serviceType,spec)
    rawcat = requests.get(serviceUrl)
    if not rawcat.ok:
        log.info("Problem accessing service with:\n{{}".format(serviceUrl))
        raise ValueError

    delta_ra = delta_dec = None
    tree = BytesIO(rawcat.content)
    for _,element in etree.iterparse(tree):
        if element.tag == 'deltaRA':
            delta_ra = float(element.text)
        elif element.tag == 'deltaDEC':
            delta_dec = float(element.text)

    return delta_ra,delta_dec
Beispiel #19
0
def find_gsc_offset(image, input_catalog='GSC1', output_catalog='GAIA'):
    """Find the GSC to GAIA offset based on guide star coordinates

    Parameters
    ----------
    image : str
        filename of image to be processed

    Returns
    -------
    delta_ra,delta_dec : tuple of floats
        Offset in decimal degrees of image based on correction to guide star
        coordinates relative to GAIA
    """
    serviceType = "GSCConvert/GSCconvert.aspx"
    spec_str = "TRANSFORM={}-{}&IPPPSSOOT={}"

    if 'rootname' in pf.getheader(image):
        ippssoot = pf.getval(image, 'rootname').upper()
    else:
        ippssoot = fu.buildNewRootname(image).upper()

    spec = spec_str.format(input_catalog, output_catalog, ippssoot)
    serviceUrl = "{}/{}?{}".format(SERVICELOCATION, serviceType, spec)
    rawcat = requests.get(serviceUrl)
    if not rawcat.ok:
        print("Problem accessing service with:\n{{}".format(serviceUrl))
        raise ValueError

    delta_ra = delta_dec = None
    tree = BytesIO(rawcat.content)
    for _, element in etree.iterparse(tree):
        if element.tag == 'deltaRA':
            delta_ra = float(element.text)
        elif element.tag == 'deltaDEC':
            delta_dec = float(element.text)

    return delta_ra, delta_dec
def waiver2mef(sciname, newname=None, convert_dq=True):
    """
    Converts a GEIS science file and its corresponding
    data quality file (if present) to MEF format
    Writes out both files to disk.
    Returns the new name of the science image.
    """

    def convert(file):
        newfilename = fileutil.buildNewRootname(file, extn='_c0h.fits')
        try:
            newimage = fileutil.openImage(file,writefits=True,
                                          fitsname=newfilename,clobber=True)
            del newimage
            return newfilename
        except IOError:
            print('Warning: File %s could not be found' % file)
            return None

    newsciname = convert(sciname)
    if convert_dq:
        dq_name = convert(fileutil.buildNewRootname(sciname, extn='_c1h.fits'))

    return newsciname
Beispiel #21
0
def splitStis(stisfile, sci_count):
    """
    :Purpose: Split a STIS association file into multiple imset MEF files.

    Split the corresponding spt file if present into single spt files.
    If an spt file can't be split or is missing a Warning is printed.

    Returns
    -------
    names: list
        a list with the names of the new flt files.

    """
    newfiles = []

    f = fits.open(stisfile)
    hdu0 = f[0].copy()

    for count in range(1, sci_count + 1):
        fitsobj = fits.HDUList()
        fitsobj.append(hdu0)
        hdu = f[('sci', count)].copy()
        fitsobj.append(hdu)
        rootname = hdu.header['EXPNAME']
        newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits')
        try:
            # Verify error array exists
            if f[('err', count)].data == None:
                raise ValueError
            # Verify dq array exists
            if f[('dq', count)].data == None:
                raise ValueError
            # Copy the err extension
            hdu = f[('err', count)].copy()
            fitsobj.append(hdu)
            # Copy the dq extension
            hdu = f[('dq', count)].copy()
            fitsobj.append(hdu)
            fitsobj[1].header['EXTVER'] = 1
            fitsobj[2].header['EXTVER'] = 1
            fitsobj[3].header['EXTVER'] = 1
        except ValueError:
            print('\nWarning:')
            print('Extension version %d of the input file %s does not' %
                  (count, stisfile))
            print('contain all required image extensions. Each must contain')
            print('populates SCI, ERR and DQ arrays.')

            continue

        # Determine if the file you wish to create already exists on the disk.
        # If the file does exist, replace it.
        if (os.path.exists(newfilename)):
            os.remove(newfilename)
            print("       Replacing " + newfilename + "...")

            # Write out the new file
        fitsobj.writeto(newfilename)
        newfiles.append(newfilename)
    f.close()

    sptfilename = fileutil.buildNewRootname(stisfile, extn='_spt.fits')
    try:
        sptfile = fits.open(sptfilename)
    except IOError:
        print('SPT file not found %s \n' % sptfilename)
        return newfiles

    if sptfile:
        hdu0 = sptfile[0].copy()
        try:
            for count in range(1, sci_count + 1):
                fitsobj = fits.HDUList()
                fitsobj.append(hdu0)
                hdu = sptfile[count].copy()
                fitsobj.append(hdu)
                rootname = hdu.header['EXPNAME']
                newfilename = fileutil.buildNewRootname(rootname,
                                                        extn='_spt.fits')
                fitsobj[1].header['EXTVER'] = 1
                if (os.path.exists(newfilename)):
                    os.remove(newfilename)
                    print("       Replacing " + newfilename + "...")

                # Write out the new file
                fitsobj.writeto(newfilename)
        except:
            print("Warning: Unable to split spt file %s " % sptfilename)
        sptfile.close()

    return newfiles
Beispiel #22
0
def compare_sub_to_full_sci(subarray,full_sci,output=False,update=True):
    from stsci.tools import fileutil
    from stwcs import updatewcs

    if update:
        # update input SCI file to be consistent with reference files in header
        print('Updating input file ',subarray,' to be consistent with reference files listed in header...')
        updatewcs.updatewcs(subarray)
        print('Updating input file ',full_sci,' to be consistent with reference files listed in header...')
        updatewcs.updatewcs(full_sci)

    fulldgeofile = fileutil.osfn(pyfits.getval(subarray,'ODGEOFIL'))
    # parse out rootname from input file if user wants results written to file
    if output:
        soutroot = fileutil.buildNewRootname(subarray)
        foutroot = fileutil.buildNewRootname(full_sci)
        hdulist = pyfits.open(fulldgeofile)

    detector = pyfits.getval(fulldgeofile,'DETECTOR')
    filter_names = fileutil.getFilterNames(pyfits.getheader(subarray))

    # count the number of chips in subarray image
    xyfile = pyfits.open(subarray)
    numchips = 0
    ccdchip = []
    extname = xyfile[1].header['EXTNAME']
    for extn in xyfile:
        if 'extname' in extn.header and extn.header['extname'] == extname:
            numchips += 1
            if 'ccdchip' in extn.header:
                ccdchip.append([extn.header['ccdchip'],extn.header['extver']])
            else:
                ccdchip.append([1,1])

    snx = xyfile['sci',1].header['NAXIS1']
    sny = xyfile['sci',1].header['NAXIS2']
    ltv1 = xyfile['sci',1].header['ltv1']
    ltv2 = xyfile['sci',1].header['ltv2']
    xyfile.close()

    # build grid of points for full-size image for
    #    chips corresponding to subarray
    xyfile = pyfits.open(full_sci)
    fullchip = []
    for extn in xyfile:
        if ('extname' in extn.header and extn.header['extname'] == extname) and \
        extn.header['ccdchip'] == ccdchip[0][0]:
            fullchip.append([extn.header['ccdchip'],extn.header['extver']])
    xyfile.close()

    sxarr,syarr = build_grid_arrays(snx,sny,1)
    full_range = [slice(-ltv2,-ltv2+sny),slice(-ltv1,-ltv1+snx)]


    fnx = pyfits.getval(full_sci,'NAXIS1','sci',1)
    fny = pyfits.getval(full_sci,'NAXIS2','sci',1)
    fxarr,fyarr = build_grid_arrays(fnx,fny,1)

    # initialize plot here
    if has_matplotlib:
        pl.clf()
        pl.gray()

    for chip,det,fext in zip(list(range(1,numchips+1)),ccdchip,fullchip):
        # Compute the correction imposed by the D2IM+DGEO corrections
        #   on the subarray
        sxout,syout = transform_d2im_dgeo(subarray,det[1],sxarr,syarr)
        sdx= (sxout-sxarr).reshape(sny,snx)
        sdy= (syout-syarr).reshape(sny,snx)
        # Compute the correction imposed by the D2IM+DGEO corrections
        #    on the full sized SCI image
        fxout,fyout = transform_d2im_dgeo(full_sci,fext[1],fxarr,fyarr)
        fdx= (fxout-fxarr).reshape(fny,fnx)
        fdy= (fyout-fyarr).reshape(fny,fnx)

        # determine the difference
        diffx = (sdx - fdx[full_range[0],full_range[1]]).astype(np.float32)
        if has_matplotlib:
            pl.imshow(diffx)
            pl.title('sub_dx-full_x: %s %s[%d:%d,%d:%d] with %g +/- %g' %
                     (filter_names, detector, full_range[0].start,
                      full_range[0].stop, full_range[1].start,
                      full_range[1].stop, diffx.mean(),diffx.std()))
            pl.colorbar()

            if sys.version_info[0] < 3:
                raw_input("Press 'ENTER' to close figure and plot DY...")
            else:
                input("Press 'ENTER' to close figure and plot DY...")

            pl.close()

        # determine the difference
        diffy = (sdy - fdy[full_range[0],full_range[1]]).astype(np.float32)
        if has_matplotlib:
            pl.imshow(diffy)
            pl.title('sub_dy-full_y: %s %s[%d:%d,%d:%d] with %g +/- %g' %
                     (filter_names, detector, full_range[0].start,
                      full_range[0].stop, full_range[1].start,
                      full_range[1].stop, diffy.mean(), diffy.std()))
            pl.colorbar()

            if sys.version_info[0] < 3:
                raw_input("Press 'ENTER' to close figure and exit...")
            else:
                input("Press 'ENTER' to close figure and exit...")

            pl.close()

        if output:
            outname = foutroot+'_sci'+str(chip)+'_newfull_dxy.fits'
            if os.path.exists(outname): os.remove(outname)
            hdulist['dx',chip].data = fdx
            hdulist['dy',chip].data = fdy
            hdulist.writeto(outname)
            outname = soutroot+'_sci'+str(chip)+'_newsub_dxy.fits'
            if os.path.exists(outname): os.remove(outname)
            hdulist['dx',chip].data = sdx
            hdulist['dy',chip].data = sdy
            hdulist.writeto(outname)

            """
            outname = outroot+'_sci'+str(chip)+'_diff_dxy.fits'
            if os.path.exists(outname): os.remove(outname)
            hdulist['dx',chip].data = diffx
            hdulist['dy',chip].data = diffy
            hdulist.writeto(outname)
            """
            print('Created output file with differences named: ',outname)
    if output:
        hdulist.close()
Beispiel #23
0
def process(inFile,force=False,newpath=None, inmemory=False, num_cores=None,
            headerlets=True, align_to_gaia=True):
    """ Run astrodrizzle on input file/ASN table
        using default values for astrodrizzle parameters.
    """
    # We only need to import this package if a user run the task
    import drizzlepac
    from drizzlepac import processInput # used for creating new ASNs for _flc inputs
    from stwcs import updatewcs
    from drizzlepac import alignimages
    
    # interpret envvar variable, if specified
    if envvar_compute_name in os.environ:
        val = os.environ[envvar_compute_name].lower()
        if val not in envvar_bool_dict:
            msg = "ERROR: invalid value for {}.".format(envvar_compute_name)
            msg += "  \n    Valid Values: on, off, yes, no, true, false"
            raise ValueError(msg)            
        align_to_gaia = envvar_bool_dict[val]

    if envvar_new_apriori_name in os.environ:
        # Reset ASTROMETRY_STEP_CONTROL based on this variable
        # This provides backward-compatibility until ASTROMETRY_STEP_CONTROL
        # gets removed entirely.
        val = os.environ[envvar_new_apriori_name].lower()
        if val not in envvar_dict:
            msg = "ERROR: invalid value for {}.".format(envvar_new_apriori_name)
            msg += "  \n    Valid Values: on, off, yes, no, true, false"
            raise ValueError(msg)

        os.environ[envvar_old_apriori_name] = envvar_dict[val]

    if headerlets or align_to_gaia:
        from stwcs.wcsutil import headerlet

    # Open the input file
    try:
        # Make sure given filename is complete and exists...
        inFilename = fileutil.buildRootname(inFile,ext=['.fits'])
        if not os.path.exists(inFilename):
            print("ERROR: Input file - %s - does not exist." % inFilename)
            return
    except TypeError:
        print("ERROR: Inappropriate input file.")
        return

    #If newpath was specified, move all files to that directory for processing
    if newpath:
        orig_processing_dir = os.getcwd()
        new_processing_dir = _createWorkingDir(newpath,inFilename)
        _copyToNewWorkingDir(new_processing_dir,inFilename)
        os.chdir(new_processing_dir)

    # Initialize for later use...
    _mname = None
    _new_asn = None
    _calfiles = []

    # Identify WFPC2 inputs to account for differences in WFPC2 inputs
    wfpc2_input = fits.getval(inFilename, 'instrume') == 'WFPC2'
    cal_ext = None

    # Check input file to see if [DRIZ/DITH]CORR is set to PERFORM
    if '_asn' in inFilename:
        # We are working with an ASN table.
        # Use asnutil code to extract filename
        inFilename = _lowerAsn(inFilename)
        _new_asn = [inFilename]
        _asndict = asnutil.readASNTable(inFilename,None,prodonly=False)
        _cal_prodname = _asndict['output'].lower()
        #_fname = fileutil.buildRootname(_cal_prodname,ext=['_drz.fits'])

        # Retrieve the first member's rootname for possible use later
        _fimg = fits.open(inFilename, memmap=False)
        for name in _fimg[1].data.field('MEMNAME'):
            if name[-1] != '*':
                _mname = name.split('\0', 1)[0].lower()
                break
        _fimg.close()
        del _fimg

    else:
        # Check to see if input is a _RAW file
        # If it is, strip off the _raw.fits extension...
        _indx = inFilename.find('_raw')
        if _indx < 0: _indx = len(inFilename)
        # ... and build the CALXXX product rootname.
        if wfpc2_input:
            # force code to define _c0m file as calibrated product to be used
            cal_ext = ['_c0m.fits']
        _mname = fileutil.buildRootname(inFilename[:_indx], ext=cal_ext)

        _cal_prodname = inFilename[:_indx]
        # Reset inFilename to correspond to appropriate input for
        # drizzle: calibrated product name.
        inFilename = _mname

        if _mname is None:
            errorMsg = 'Could not find calibrated product!'
            raise Exception(errorMsg)

    # Create trailer filenames based on ASN output filename or
    # on input name for single exposures
    if '_raw' in inFile:
        # Output trailer file to RAW file's trailer
        _trlroot = inFile[:inFile.find('_raw')]
    elif '_asn' in inFile:
        # Output trailer file to ASN file's trailer, not product's trailer
        _trlroot = inFile[:inFile.find('_asn')]
    else:
        # Default: trim off last suffix of input filename
        # and replacing with .tra
        _indx = inFile.rfind('_')
        if _indx > 0:
            _trlroot = inFile[:_indx]
        else:
            _trlroot = inFile

    _trlfile = _trlroot + '.tra'

    # Open product and read keyword value
    # Check to see if product already exists...
    dkey = 'DRIZCORR'
    # ...if product does NOT exist, interrogate input file
    # to find out whether 'dcorr' has been set to PERFORM
    # Check if user wants to process again regardless of DRIZCORR keyword value
    if force:
        dcorr = 'PERFORM'
    else:
        if _mname :
            _fimg = fits.open(fileutil.buildRootname(_mname,ext=['_raw.fits']), memmap=False)
            _phdr = _fimg['PRIMARY'].header
            if dkey in _phdr:
                dcorr = _phdr[dkey]
            else:
                dcorr = None
            _fimg.close()
            del _fimg
        else:
            dcorr = None

    time_str = _getTime()
    _tmptrl = _trlroot + '_tmp.tra'
    _drizfile = _trlroot + '_pydriz'
    _drizlog = _drizfile + ".log" # the '.log' gets added automatically by astrodrizzle
    _alignlog = _trlroot + '_align.log'
    if dcorr == 'PERFORM':
        if '_asn.fits' not in inFilename:
            # Working with a singleton
            # However, we always want to make sure we always use
            # a calibrated product as input, if available.
            _infile = fileutil.buildRootname(_cal_prodname, ext=cal_ext)
            _infile_flc = fileutil.buildRootname(_cal_prodname,ext=['_flc.fits'])

            _cal_prodname = _infile
            _inlist = _calfiles = [_infile]

            # Add CTE corrected filename as additional input if present
            if os.path.exists(_infile_flc) and _infile_flc != _infile:
                _inlist.append(_infile_flc)

        else:
            # Working with an ASN table...
            _infile = inFilename
            flist,duplist = processInput.checkForDuplicateInputs(_asndict['order'])
            _calfiles = flist
            if len(duplist) > 0:
                origasn = processInput.changeSuffixinASN(inFilename,'flt')
                dupasn = processInput.changeSuffixinASN(inFilename,'flc')
                _inlist = [origasn,dupasn]
            else:
                _inlist = [_infile]
            # We want to keep the original specification of the calibration
            # product name, though, not a lower-case version...
            _cal_prodname = inFilename
            _new_asn.extend(_inlist) # kept so we can delete it when finished

        # check to see whether FLC files are also present, and need to be updated
        # generate list of FLC files
        align_files = None
        _calfiles_flc = [f.replace('_flt.fits','_flc.fits') for f in _calfiles]
        # insure these files exist, if not, blank them out
        # Also pick out what files will be used for additional alignment to GAIA
        if not os.path.exists(_calfiles_flc[0]):
            _calfiles_flc = None
            align_files = _calfiles
            align_update_files = None
        else:
            align_files = _calfiles_flc
            align_update_files = _calfiles

        # Run updatewcs on each list of images
        updatewcs.updatewcs(_calfiles)
        if _calfiles_flc:
            updatewcs.updatewcs(_calfiles_flc)

        if align_to_gaia:
            # Perform additional alignment on the FLC files, if present
            ###############
            #
            # call hlapipeline code here on align_files list of files
            #
            ###############
            # Create trailer marker message for start of align_to_GAIA processing
            _trlmsg = _timestamp("Align_to_GAIA started ")
            print(_trlmsg)
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)
            _trlmsg = ""

            # Create an empty astropy table so it can be used as input/output for the perform_align function
            #align_table = Table()
            try:
                align_table = alignimages.perform_align(align_files,update_hdr_wcs=True, runfile=_alignlog)
                for row in align_table:
                    if row['status'] == 0:
                        trlstr = "Successfully aligned {} to {} astrometric frame\n"
                        _trlmsg += trlstr.format(row['imageName'], row['catalog'])
                    else:
                        trlstr = "Could not align {} to absolute astrometric frame\n"
                        _trlmsg += trlstr.format(row['imageName'])

            except Exception:
                # Something went wrong with alignment to GAIA, so report this in
                # trailer file
                _trlmsg = "EXCEPTION encountered in alignimages...\n"
                _trlmsg += "   No correction to absolute astrometric frame applied!\n"

            # Write the perform_align log to the trailer file...(this will delete the _alignlog)
            _appendTrlFile(_trlfile,_alignlog)

            # Append messages from this calling routine post-perform_align
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)
            _trlmsg = ""

            #Check to see whether there are any additional input files that need to
            # be aligned (namely, FLT images)
            if align_update_files and align_table:
                # Apply headerlets from alignment to FLT version of the files
                for fltfile, flcfile in zip(align_update_files, align_files):
                    row = align_table[align_table['imageName']==flcfile]
                    headerletFile = row['headerletFile'][0]
                    if headerletFile != "None":
                        headerlet.apply_headerlet_as_primary(fltfile, headerletFile,
                                                            attach=True, archive=True)
                        # append log file contents to _trlmsg for inclusion in trailer file
                        _trlstr = "Applying headerlet {} as Primary WCS to {}\n"
                        _trlmsg += _trlstr.format(headerletFile, fltfile)
                    else:
                        _trlmsg += "No absolute astrometric headerlet applied to {}\n".format(fltfile)

            # Finally, append any further messages associated with alignement from this calling routine
            _trlmsg += _timestamp('Align_to_GAIA completed ')
            print(_trlmsg)
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)

        # Run astrodrizzle and send its processing statements to _trlfile
        _pyver = drizzlepac.astrodrizzle.__version__

        for _infile in _inlist: # Run astrodrizzle for all inputs
            # Create trailer marker message for start of astrodrizzle processing
            _trlmsg = _timestamp('astrodrizzle started ')
            _trlmsg += __trlmarker__
            _trlmsg += '%s: Processing %s with astrodrizzle Version %s\n' % (time_str,_infile,_pyver)
            print(_trlmsg)

            # Write out trailer comments to trailer file...
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)

            _pyd_err = _trlroot+'_pydriz.stderr'

            try:
                b = drizzlepac.astrodrizzle.AstroDrizzle(input=_infile,runfile=_drizfile,
                                            configobj='defaults',in_memory=inmemory,
                                            num_cores=num_cores, **pipeline_pars)
            except Exception as errorobj:
                _appendTrlFile(_trlfile,_drizlog)
                _appendTrlFile(_trlfile,_pyd_err)
                _ftrl = open(_trlfile,'a')
                _ftrl.write('ERROR: Could not complete astrodrizzle processing of %s.\n' % _infile)
                _ftrl.write(str(sys.exc_info()[0])+': ')
                _ftrl.writelines(str(errorobj))
                _ftrl.write('\n')
                _ftrl.close()
                print('ERROR: Could not complete astrodrizzle processing of %s.' % _infile)
                raise Exception(str(errorobj))

            # Now, append comments created by PyDrizzle to CALXXX trailer file
            print('Updating trailer file %s with astrodrizzle comments.' % _trlfile)
            _appendTrlFile(_trlfile,_drizlog)

        # Save this for when astropy.io.fits can modify a file 'in-place'
        # Update calibration switch
        _fimg = fits.open(_cal_prodname, mode='update', memmap=False)
        _fimg['PRIMARY'].header[dkey] = 'COMPLETE'
        _fimg.close()
        del _fimg

        # Enforce pipeline convention of all lower-case product
        # names
        _prodlist = glob.glob('*drz.fits')
        for _prodname in _prodlist:
            _plower = _prodname.lower()
            if _prodname != _plower:  os.rename(_prodname,_plower)

    else:
        # Create default trailer file messages when astrodrizzle is not
        # run on a file.  This will typically apply only to BIAS,DARK
        # and other reference images.
        # Start by building up the message...
        _trlmsg = _timestamp('astrodrizzle skipped ')
        _trlmsg = _trlmsg + __trlmarker__
        _trlmsg = _trlmsg + '%s: astrodrizzle processing not requested for %s.\n' % (time_str,inFilename)
        _trlmsg = _trlmsg + '       astrodrizzle will not be run at this time.\n'
        print(_trlmsg)

        # Write message out to temp file and append it to full trailer file
        ftmp = open(_tmptrl,'w')
        ftmp.writelines(_trlmsg)
        ftmp.close()
        _appendTrlFile(_trlfile,_tmptrl)

    # Append final timestamp to trailer file...
    _final_msg = '%s: Finished processing %s \n' % (time_str,inFilename)
    _final_msg += _timestamp('astrodrizzle completed ')
    _trlmsg += _final_msg
    ftmp = open(_tmptrl,'w')
    ftmp.writelines(_trlmsg)
    ftmp.close()
    _appendTrlFile(_trlfile,_tmptrl)

    # If we created a new ASN table, we need to remove it
    if _new_asn is not None:
        for _name in _new_asn: fileutil.removeFile(_name)

    # Clean up any generated OrIg_files directory
    if os.path.exists("OrIg_files"):
        # check to see whether this directory is empty
        flist = glob.glob('OrIg_files/*.fits')
        if len(flist) == 0:
            os.rmdir("OrIg_files")
        else:
            print('OrIg_files directory NOT removed as it still contained images...')

    # If headerlets have already been written out by alignment code,
    # do NOT write out this version of the headerlets
    if headerlets:
        # Generate headerlets for each updated FLT image
        hlet_msg = _timestamp("Writing Headerlets started")
        for fname in _calfiles:
            frootname = fileutil.buildNewRootname(fname)
            hname = "%s_flt_hlet.fits"%frootname
            # Write out headerlet file used by astrodrizzle, however,
            # do not overwrite any that was already written out by alignimages
            if not os.path.exists(hname):
                hlet_msg += "Created Headerlet file %s \n"%hname
                try:
                    headerlet.write_headerlet(fname,'OPUS',output='flt', wcskey='PRIMARY',
                        author="OPUS",descrip="Default WCS from Pipeline Calibration",
                        attach=False,clobber=True,logging=False)
                except ValueError:
                    hlet_msg += _timestamp("SKIPPED: Headerlet not created for %s \n"%fname)
                    # update trailer file to log creation of headerlet files
        hlet_msg += _timestamp("Writing Headerlets completed")
        ftrl = open(_trlfile,'a')
        ftrl.write(hlet_msg)
        ftrl.close()

    # If processing was done in a temp working dir, restore results to original
    # processing directory, return to original working dir and remove temp dir
    if newpath:
        _restoreResults(new_processing_dir,orig_processing_dir)
        os.chdir(orig_processing_dir)
        _removeWorkingDir(new_processing_dir)

    # Provide feedback to user
    print(_final_msg)
Beispiel #24
0
    def __init__(self,expname, handle=None, dqname=None, idckey=None,
                    new=no,wcs=None,mask=None,pa_key=None, parity=None,
                    idcdir=None, rot=None, extver=1, exptime=None,
                    ref_pscale=1.0, binned=1, mt_wcs=None, group_indx = None):

        # This name should be formatted for use in image I/O
        self.name = fileutil.osfn(expname)

        # osfn() will expand '.' unnecessarily, potentially
        # creating a string-length problem for 'drizzle', which
        # is limited to strings of 80 chars.
        _path,_name = os.path.split(self.name)
        # if path for this filename is the same as the current dir,
        # then there is no need to pass along the path.
        if _path == os.getcwd(): self.name = _name


        # Keep track of any associated mask file created for
        # this exposure from its DQ file, or other mask file.
        _fname,_extn = fileutil.parseFilename(expname)
        _open = False

        # Make sure we have an open file handle to use for getting the
        # header and data arrays.
        if not handle and not new:
            handle = fileutil.openImage(expname)
            _open = True

        # If no extension was specified, try to interrogate the file
        # to find whether the SCI array is in the Primary
        # (as in Simple FITS) or first extension (as in MEF).
        if handle and _extn == None:
            if handle[0].data == None:
                # Primary extension specified and no data present.
                # Try looking for data in next extension.
                if len(handle) > 1 and handle[1].data != None:
                    _extn = 1
                    expname += '[1]'
                else:
                    raise IOError("No valid image data in %s.\n"%expname)
            else:
                _extn = 0

        self.dgeoname = None
        self.xgeoim = ""
        self.ygeoim = ""
        self.exptime = exptime
        self.group_indx = group_indx
        if not new:
            # Read in a copy of the header for this exposure/group/extension
            _header = fileutil.getHeader(expname,handle=handle)
            _chip = drutil.getChipId(_header)
            self.chip = str(_chip)
            # Keep track of any distortion correction images provided
            # for this chip
            self.dgeoname = fileutil.getKeyword(expname,'DGEOFILE',handle=handle)
            self.xgeoim,self.ygeoim = self.getDGEOExtn()
            if self.exptime == None:
                self.exptime = float(_header['EXPTIME'])
                if self.exptime == 0.: self.exptime = 1.0
            #
            # Extract photometric transformation keywords
            #    If they do not exist, use default values of 0 and 1
            #
            self.plam = float(fileutil.getKeyword(expname,'PHOTPLAM',handle=handle)) / 10.
            if self.plam == None:
                # Setup a default value in case this keyword does not exist
                self.plam = 555.
            self.photzpt = float(fileutil.getKeyword(expname,'PHOTZPT',handle=handle))
            if self.photzpt == None: self.photzpt = 0.0
            self.photflam = float(fileutil.getKeyword(expname,'PHOTFLAM',handle=handle))
            if self.photflam == None: self.photflam = 1.0

            # Read in date-obs from primary header
            if _header:
                if 'date-obs' in _header:
                    self.dateobs = _header['date-obs']
                elif 'date_obs' in _header:
                    self.dateobs = _header['date_obs']
                else:
                    self.dateobs = None
            else:
                self.dateobs = None

            # Initialize the value of BUNIT based on the header information, if
            # the header has the keyword
            if 'BUNIT' in _header and _header['BUNIT'].find('ergs') < 0:
                self.bunit = _header['BUNIT']
            else:
                self.bunit = 'ELECTRONS'

        else:
            _chip = 1
            _header = None
            self.chip = str(_chip)
            # Set a default value for pivot wavelength
            self.plam = 555.
            self.photzpt = 0.0
            self.photflam = 1.0
            self.dateobs = None
            if self.exptime == None:
                self.exptime = 1.

        self.parity = parity
        self.header = _header
        self.extver = extver

        # Create a pointer to the mask file's data array
        # and the name of the original input DQ file
        self.maskname = None
        self.singlemaskname = None
        self.masklist = None
        if mask != None:
            # Specifies filenames to be used if created.
            self.maskname = mask[0]
            self.singlemaskname = mask[1]
            self.masklist = mask[2]

        self.dqname = dqname

        # Remember the name of the coeffs file generated for this chip
        self.coeffs = self.buildCoeffsName()

        # Read the name of idcfile from image header if not explicitly
        # provided by user.
        if idckey != None and idckey.lower() != 'wcs':
            _indx = expname.find('[')
            if  _indx > -1:
                _idc_fname = expname[:_indx]+'[0]'
            else: _idc_fname = expname+'[0]'

            idcfile, idctype = drutil.getIDCFile(self.header,keyword=idckey,
                                        directory=idcdir)
        else:
            idcfile = None
            idctype = None

        if (idckey != None)  and (idckey.lower() == 'header'):
            idckey = idctype

        # Get distortion model and WCS info.
        self.geometry = ObsGeometry(expname, idcfile, idckey=idckey,
                chip=_chip, new=new, header=self.header,
                pa_key=pa_key, rot=rot, date=self.dateobs,
                ref_pscale=ref_pscale, binned=binned, mt_wcs=mt_wcs)

        # Remember the name and type of the IDC file used...
        self.idcfile = idcfile
        self.idctype = idctype

        # Remember the names of the filters used for the exposure
        self.filters = self.geometry.filter1+','+self.geometry.filter2

        # Define shape here...
        # nx,ny,pixel scale
        #
        if wcs != None:
            # We have been passed a WCS to use
            self.geometry.wcs = wcs
            self.geometry.model.pscale = wcs.pscale
            if expname != None:
                self.geometry.wcs.rootname = expname

        self.naxis1 = self.geometry.wcs.naxis1
        self.naxis2 = self.geometry.wcs.naxis2
        self.pscale = self.geometry.wcs.pscale
        self.shape = (self.naxis1,self.naxis2,self.pscale)

        # Keep track of the positions of the corners of the exposure
        # both for the RAW image and the
        # distortion-corrected, unscaled, unrotated image
        self.corners = {'raw':np.zeros((4,2),dtype=np.float64),'corrected':np.zeros((4,2),dtype=np.float64)}
        self.setCorners()

        # Generate BLOT output name specific to this Exposure
        _blot_extn = '_sci'+repr(extver)+'_blt.fits'
        self.outblot = fileutil.buildNewRootname(self.name,extn=_blot_extn)

        # Keep track of undistorted frame's position relative to metachip
        # Zero-point offset for chip relative to meta-chip product
        # These values get computed using 'setSingleOffsets' from 'writeCoeffs'
        # to insure that the final XDELTA/YDELTA values have been computed.
        self.product_wcs = self.geometry.wcslin
        self.xzero = 0.
        self.yzero = 0.
        self.chip_shape = (0.,0.)
        self.xsh2 = 0.
        self.ysh2 = 0.

        if _open:
            handle.close()
            del handle
Beispiel #25
0
def process(inFile,force=False,newpath=None, inmemory=False, num_cores=None,
            headerlets=True):
    """ Run astrodrizzle on input file/ASN table
        using default values for astrodrizzle parameters.
    """
    # We only need to import this package if a user run the task
    import drizzlepac
    from drizzlepac import processInput # used for creating new ASNs for _flc inputs
    import stwcs

    if headerlets:
        from stwcs.wcsutil import headerlet

    # Open the input file
    try:
        # Make sure given filename is complete and exists...
        inFilename = fileutil.buildRootname(inFile,ext=['.fits'])
        if not os.path.exists(inFilename):
            print("ERROR: Input file - %s - does not exist." % inFilename)
            return
    except TypeError:
        print("ERROR: Inappropriate input file.")
        return

    #If newpath was specified, move all files to that directory for processing
    if newpath:
        orig_processing_dir = os.getcwd()
        new_processing_dir = _createWorkingDir(newpath,inFilename)
        _copyToNewWorkingDir(new_processing_dir,inFilename)
        os.chdir(new_processing_dir)

    # Initialize for later use...
    _mname = None
    _new_asn = None
    _calfiles = []

    # Identify WFPC2 inputs to account for differences in WFPC2 inputs
    wfpc2_input = fits.getval(inFilename, 'instrume') == 'WFPC2'
    cal_ext = None

    # Check input file to see if [DRIZ/DITH]CORR is set to PERFORM
    if '_asn' in inFilename:
        # We are working with an ASN table.
        # Use asnutil code to extract filename
        inFilename = _lowerAsn(inFilename)
        _new_asn = [inFilename]
        _asndict = asnutil.readASNTable(inFilename,None,prodonly=False)
        _cal_prodname = _asndict['output'].lower()
        _fname = fileutil.buildRootname(_cal_prodname,ext=['_drz.fits'])

        # Retrieve the first member's rootname for possible use later
        _fimg = fits.open(inFilename, memmap=False)
        for name in _fimg[1].data.field('MEMNAME'):
            if name[-1] != '*':
                _mname = name.split('\0', 1)[0].lower()
                break
        _fimg.close()
        del _fimg

    else:
        # Check to see if input is a _RAW file
        # If it is, strip off the _raw.fits extension...
        _indx = inFilename.find('_raw')
        if _indx < 0: _indx = len(inFilename)
        # ... and build the CALXXX product rootname.
        if wfpc2_input:
            # force code to define _c0m file as calibrated product to be used
            cal_ext = ['_c0m.fits']
        _mname = fileutil.buildRootname(inFilename[:_indx], ext=cal_ext)

        _cal_prodname = inFilename[:_indx]
        # Reset inFilename to correspond to appropriate input for
        # drizzle: calibrated product name.
        inFilename = _mname

        if _mname is None:
            errorMsg = 'Could not find calibrated product!'
            raise Exception(errorMsg)

    # Create trailer filenames based on ASN output filename or
    # on input name for single exposures
    if '_raw' in inFile:
        # Output trailer file to RAW file's trailer
        _trlroot = inFile[:inFile.find('_raw')]
    elif '_asn' in inFile:
        # Output trailer file to ASN file's trailer, not product's trailer
        _trlroot = inFile[:inFile.find('_asn')]
    else:
        # Default: trim off last suffix of input filename
        # and replacing with .tra
        _indx = inFile.rfind('_')
        if _indx > 0:
            _trlroot = inFile[:_indx]
        else:
            _trlroot = inFile

    _trlfile = _trlroot + '.tra'

    # Open product and read keyword value
    # Check to see if product already exists...
    dkey = 'DRIZCORR'
    # ...if product does NOT exist, interrogate input file
    # to find out whether 'dcorr' has been set to PERFORM
    # Check if user wants to process again regardless of DRIZCORR keyword value
    if force:
        dcorr = 'PERFORM'
    else:
        if _mname :
            _fimg = fits.open(fileutil.buildRootname(_mname,ext=['_raw.fits']), memmap=False)
            _phdr = _fimg['PRIMARY'].header
            if dkey in _phdr:
                dcorr = _phdr[dkey]
            else:
                dcorr = None
            _fimg.close()
            del _fimg
        else:
            dcorr = None

    time_str = _getTime()
    _tmptrl = _trlroot + '_tmp.tra'
    _drizfile = _trlroot + '_pydriz'
    _drizlog = _drizfile + ".log" # the '.log' gets added automatically by astrodrizzle
    if dcorr == 'PERFORM':
        if '_asn.fits' not in inFilename:
            # Working with a singleton
            # However, we always want to make sure we always use
            # a calibrated product as input, if available.
            _infile = fileutil.buildRootname(_cal_prodname, ext=cal_ext)
            _infile_flc = fileutil.buildRootname(_cal_prodname,ext=['_flc.fits'])

            _cal_prodname = _infile
            _inlist = _calfiles = [_infile]

            # Add CTE corrected filename as additional input if present
            if os.path.exists(_infile_flc) and _infile_flc != _infile:
                _inlist.append(_infile_flc)

        else:
            # Working with an ASN table...
            _infile = inFilename
            flist,duplist = processInput.checkForDuplicateInputs(_asndict['order'])
            _calfiles = flist
            if len(duplist) > 0:
                origasn = processInput.changeSuffixinASN(inFilename,'flt')
                dupasn = processInput.changeSuffixinASN(inFilename,'flc')
                _inlist = [origasn,dupasn]
            else:
                _inlist = [_infile]
            # We want to keep the original specification of the calibration
            # product name, though, not a lower-case version...
            _cal_prodname = inFilename
            _new_asn.extend(_inlist) # kept so we can delete it when finished


        # Run astrodrizzle and send its processing statements to _trlfile
        _pyver = drizzlepac.astrodrizzle.__version__

        for _infile in _inlist: # Run astrodrizzle for all inputs
            # Create trailer marker message for start of astrodrizzle processing
            _trlmsg = _timestamp('astrodrizzle started ')
            _trlmsg = _trlmsg+ __trlmarker__
            _trlmsg = _trlmsg + '%s: Processing %s with astrodrizzle Version %s\n' % (time_str,_infile,_pyver)
            print(_trlmsg)

            # Write out trailer comments to trailer file...
            ftmp = open(_tmptrl,'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile,_tmptrl)

            _pyd_err = _trlroot+'_pydriz.stderr'

            try:
                b = drizzlepac.astrodrizzle.AstroDrizzle(input=_infile,runfile=_drizfile,
                                            configobj='defaults',in_memory=inmemory,
                                            num_cores=num_cores, **pipeline_pars)
            except Exception as errorobj:
                _appendTrlFile(_trlfile,_drizlog)
                _appendTrlFile(_trlfile,_pyd_err)
                _ftrl = open(_trlfile,'a')
                _ftrl.write('ERROR: Could not complete astrodrizzle processing of %s.\n' % _infile)
                _ftrl.write(str(sys.exc_info()[0])+': ')
                _ftrl.writelines(str(errorobj))
                _ftrl.write('\n')
                _ftrl.close()
                print('ERROR: Could not complete astrodrizzle processing of %s.' % _infile)
                raise Exception(str(errorobj))

            # Now, append comments created by PyDrizzle to CALXXX trailer file
            print('Updating trailer file %s with astrodrizzle comments.' % _trlfile)
            _appendTrlFile(_trlfile,_drizlog)

        # Save this for when astropy.io.fits can modify a file 'in-place'
        # Update calibration switch
        _fimg = fits.open(_cal_prodname, mode='update', memmap=False)
        _fimg['PRIMARY'].header[dkey] = 'COMPLETE'
        _fimg.close()
        del _fimg

        # Enforce pipeline convention of all lower-case product
        # names
        _prodlist = glob.glob('*drz.fits')
        for _prodname in _prodlist:
            _plower = _prodname.lower()
            if _prodname != _plower:  os.rename(_prodname,_plower)

    else:
        # Create default trailer file messages when astrodrizzle is not
        # run on a file.  This will typically apply only to BIAS,DARK
        # and other reference images.
        # Start by building up the message...
        _trlmsg = _timestamp('astrodrizzle skipped ')
        _trlmsg = _trlmsg + __trlmarker__
        _trlmsg = _trlmsg + '%s: astrodrizzle processing not requested for %s.\n' % (time_str,inFilename)
        _trlmsg = _trlmsg + '       astrodrizzle will not be run at this time.\n'
        print(_trlmsg)

        # Write message out to temp file and append it to full trailer file
        ftmp = open(_tmptrl,'w')
        ftmp.writelines(_trlmsg)
        ftmp.close()
        _appendTrlFile(_trlfile,_tmptrl)

    _fmsg = None
    # Append final timestamp to trailer file...
    _final_msg = '%s: Finished processing %s \n' % (time_str,inFilename)
    _final_msg += _timestamp('astrodrizzle completed ')
    _trlmsg += _final_msg
    ftmp = open(_tmptrl,'w')
    ftmp.writelines(_trlmsg)
    ftmp.close()
    _appendTrlFile(_trlfile,_tmptrl)

    # If we created a new ASN table, we need to remove it
    if _new_asn is not None:
        for _name in _new_asn: fileutil.removeFile(_name)

    # Clean up any generated OrIg_files directory
    if os.path.exists("OrIg_files"):
        # check to see whether this directory is empty
        flist = glob.glob('OrIg_files/*.fits')
        if len(flist) == 0:
            os.rmdir("OrIg_files")
        else:
            print('OrIg_files directory NOT removed as it still contained images...')
    if headerlets:
        # Generate headerlets for each updated FLT image
        hlet_msg = _timestamp("Writing Headerlets started")
        for fname in _calfiles:
            frootname = fileutil.buildNewRootname(fname)
            hname = "%s_flt_hlet.fits"%frootname
            hlet_msg += "Created Headerlet file %s \n"%hname
            try:
                headerlet.write_headerlet(fname,'OPUS',output='flt', wcskey='PRIMARY',
                    author="OPUS",descrip="Default WCS from Pipeline Calibration",
                    attach=False,clobber=True,logging=False)
            except ValueError:
                hlet_msg += _timestamp("SKIPPED: Headerlet not created for %s \n"%fname)
                # update trailer file to log creation of headerlet files
        hlet_msg += _timestamp("Writing Headerlets completed")
        ftrl = open(_trlfile,'a')
        ftrl.write(hlet_msg)
        ftrl.close()

    # If processing was done in a temp working dir, restore results to original
    # processing directory, return to original working dir and remove temp dir
    if newpath:
        _restoreResults(new_processing_dir,orig_processing_dir)
        os.chdir(orig_processing_dir)
        _removeWorkingDir(new_processing_dir)

    # Provide feedback to user
    print(_final_msg)
Beispiel #26
0
def buildEmptyDRZ(input, output):
    """
    Create an empty DRZ file.

    This module creates an empty DRZ file in a valid FITS format so that the HST
    pipeline can handle the Multidrizzle zero expossure time exception
    where all data has been excluded from processing.

    Parameters
    ----------
    input : str
        filename of the initial input to process_input
    output : str
        filename of the default empty _drz.fits file to be generated

    """

    # Identify the first input image
    inputfile = parseinput.parseinput(input)[0]
    if not inputfile:
        print('\n******* ERROR *******', file=sys.stderr)
        print(
              'No input file found!  Check specification of parameter '
              '"input". ', file=sys.stderr)
        print('Quitting...',  file=sys.stderr)
        print('******* ***** *******\n',  file=sys.stderr)
        return # raise IOError, "No input file found!"

    # Set up output file here...
    if output is None:
        if len(input) == 1:
            oname = fileutil.buildNewRootname(input[0])
        else:
            oname = 'final'
        output = fileutil.buildNewRootname(oname, extn='_drz.fits')
    else:
        if 'drz' not in output:
            output = fileutil.buildNewRootname(output, extn='_drz.fits')

    log.info('Setting up output name: %s' % output)

    # Open the first image (of the excludedFileList?) to use as a template to build
    # the DRZ file.
    try :
        log.info('Building empty DRZ file from %s' % inputfile[0])
        img = fits.open(inputfile[0], memmap=False)
    except:
        raise IOError('Unable to open file %s \n' % inputfile)

    # Create the fitsobject
    fitsobj = fits.HDUList()
    # Copy the primary header
    hdu = img[0].copy()
    fitsobj.append(hdu)

    # Modify the 'NEXTEND' keyword of the primary header to 3 for the
    #'sci, wht, and ctx' extensions of the newly created file.
    fitsobj[0].header['NEXTEND'] = 3

    # Create the 'SCI' extension
    hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
    hdu.header['EXTNAME'] = 'SCI'
    fitsobj.append(hdu)

    # Create the 'WHT' extension
    hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
    hdu.header['EXTNAME'] = 'WHT'
    fitsobj.append(hdu)

    # Create the 'CTX' extension
    hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
    hdu.header['EXTNAME'] = 'CTX'
    fitsobj.append(hdu)

    # Add HISTORY comments explaining the creation of this file.
    fitsobj[0].header.add_history("** AstroDrizzle has created this empty "
                                  "DRZ product because**")
    fitsobj[0].header.add_history("** all input images were excluded from "
                                  "processing.**")


    # Change the filename in the primary header to reflect the name of the output
    # filename.
    fitsobj[0].header['FILENAME'] = str(output)  # +"_drz.fits"

    # Change the ROOTNAME keyword to the ROOTNAME of the output PRODUCT
    fitsobj[0].header['ROOTNAME'] = str(output.split('_drz.fits')[0])
    # Modify the ASN_MTYP keyword to contain "PROD-DTH" so it can be properly
    # ingested into the archive catalog.

    # stis has this keyword in the [1] header, so I am directing the code
    #t o first look in the primary, then the 1
    try:
        fitsobj[0].header['ASN_MTYP'] = 'PROD-DTH'
    except:
        fitsobj[1].header['ASN_MTYP'] = 'PROD-DTH'

    # If the file is already on disk delete it and replace it with the
    # new file
    dirfiles = os.listdir(os.curdir)
    if dirfiles.count(output) > 0:
        os.remove(output)
        log.info("       Replacing %s..." % output)

    # Write out the empty DRZ file
    fitsobj.writeto(output)

    print(textutil.textbox(
        'ERROR:\nAstroDrizzle has created an empty DRZ product because all '
        'input images were excluded from processing or a user requested the '
        'program to stop.') + '\n', file=sys.stderr)

    return
Beispiel #27
0
def processFilenames(input=None,output=None,infilesOnly=False):
    """Process the input string which contains the input file information and
       return a filelist,output
    """
    ivmlist = None
    oldasndict = None

    if input is None:
        print("No input files provided to processInput")
        raise ValueError

    if not isinstance(input, list) and ('_asn' in input or '_asc' in input):
        # Input is an association table
        # Get the input files, and run makewcs on them
        oldasndict = asnutil.readASNTable(input, prodonly=infilesOnly)

        if not infilesOnly:
            if output in ["",None,"None"]:
                output = oldasndict['output'].lower() # insure output name is lower case

        asnhdr = fits.getheader(input, memmap=False)
        # Only perform duplication check if not already completed...
        dupcheck = asnhdr.get('DUPCHECK',default="PERFORM") == "PERFORM"

        #filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']]
        filelist = buildASNList(oldasndict['order'],input,check_for_duplicates=dupcheck)

    elif (not isinstance(input, list)) and \
       (input[0] == '@') :
        # input is an @ file
        f = open(input[1:])
        # Read the first line in order to determine whether
        # IVM files have been specified in a second column...
        line = f.readline()
        f.close()
        # Parse the @-file with irafglob to extract the input filename
        filelist = irafglob.irafglob(input, atfile=util.atfile_sci)
        # If there is a second column...
        if len(line.split()) == 2:
            # ...parse out the names of the IVM files as well
            ivmlist = irafglob.irafglob(input, atfile=util.atfile_ivm)
        if output in ['',None,"None"]:
            if len(filelist) == 1:
                output = fileutil.buildNewRootname(filelist[0])
            else:
                output = 'final'
    else:
        #input is a string or a python list
        try:
            filelist, output = parseinput.parseinput(input, outputname=output)
            if output in ['',None,"None"]:
                if len(filelist) == 1:
                    output = fileutil.buildNewRootname(filelist[0])
                else:
                    output = 'final'
            if not isinstance(input, list):
                filelist.sort()
        except IOError: raise

    # sort the list of input files
    # this ensures the list of input files has the same order on all platforms
    # it can have ifferent order because listdir() uses inode order, not unix type order
    #filelist.sort()



    return filelist, output, ivmlist, oldasndict
def splitStis(stisfile, sci_count):
    """
    Purpose
    =======

    Split a STIS association file into multiple imset MEF files.
    Split the corresponding spt file if present into single spt files.
    If an spt file can't be split or is missing a Warning is printed.

    Output: a list with the names of the new flt files.
    """
    newfiles = []

    f = pyfits.open(stisfile)
    hdu0 = f[0].copy()


    for count in range(1,sci_count+1):
        #newfilename = rootname+str(count)+'.fits'
        fitsobj = pyfits.HDUList()
        fitsobj.append(hdu0)
        hdu = f['sci',count].copy()
        fitsobj.append(hdu)
        rootname = hdu.header['EXPNAME']
        newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits')
        try:
            # Verify error array exists
            if f['err',count].data == None:
                raise ValueError
            # Verify dq array exists
            if f['dq',count].data == None:
                raise ValueError
            # Copy the err extension
            hdu = f['err',count].copy()
            fitsobj.append(hdu)
            # Copy the dq extension
            hdu = f['dq',count].copy()
            fitsobj.append(hdu)
        except:
            errorstr =  "\n###############################\n"
            errorstr += "#                             #\n"
            errorstr += "# ERROR:                      #\n"
            errorstr += "#  The input image:           #\n"
            errorstr += "      " + str(stisfile) +"\n"
            errorstr += "#  does not contain required  #\n"
            errorstr += "#  image extensions.  Each    #\n"
            errorstr += "#  must contain populated sci,#\n"
            errorstr += "#  dq, and err arrays.        #\n"
            errorstr += "#                             #\n"
            errorstr += "###############################\n"
            raise ValueError(errorstr)


        # Update the 'EXTNER' keyword to indicate the new extnesion number
        # for the single exposure files.
        fitsobj[1].header['EXTVER'] = 1
        fitsobj[2].header['EXTVER'] = 1
        fitsobj[3].header['EXTVER'] = 1

        # Determine if the file you wish to create already exists on the disk.
        # If the file does exist, replace it.
        if (os.path.exists(newfilename)):
            os.remove(newfilename)
            print("       Replacing "+newfilename+"...")

            # Write out the new file
        fitsobj.writeto(newfilename)
        newfiles.append(newfilename)
    f.close()

    sptfilename = fileutil.buildNewRootname(stisfile, extn='_spt.fits')
    try:
        sptfile = pyfits.open(sptfilename)
    except IOError:
        print('SPT file not found %s \n' % sptfilename)
        sptfile=None

    if sptfile:
        hdu0 = sptfile[0].copy()
        try:
            for count in range(1,sci_count+1):
                fitsobj = pyfits.HDUList()
                fitsobj.append(hdu0)
                hdu = sptfile[count].copy()
                fitsobj.append(hdu)
                rootname = hdu.header['EXPNAME']
                newfilename = fileutil.buildNewRootname(rootname, extn='_spt.fits')
                fitsobj[1].header['EXTVER'] = 1
                if (os.path.exists(newfilename)):
                    os.remove(newfilename)
                    print("       Replacing "+newfilename+"...")

                # Write out the new file
                fitsobj.writeto(newfilename)
        except:
            print("Warning: Unable to split spt file %s " % sptfilename)
        sptfile.close()

    return newfiles
Beispiel #29
0
def run(input,quiet=yes,restore=no,prepend='O', tddcorr=True):

    print("+ MAKEWCS Version %s" % __version__)

    _prepend = prepend

    files = parseinput.parseinput(input)[0]
    newfiles = []
    if files == []:
        print("No valid input files found.\n")
        raise IOError

    for image in files:
        #find out what the input is
        imgfits,imgtype = fileutil.isFits(image)

        # Check for existence of waiver FITS input, and quit if found.
        if imgfits and imgtype == 'waiver':
            """
            errormsg = '\n\nPyDrizzle does not support waiver fits format.\n'
            errormsg += 'Convert the input files to GEIS or multiextension FITS.\n\n'
            raise ValueError, errormsg
            """
            newfilename = fileutil.buildNewRootname(image, extn='_c0h.fits')
            # Convert GEIS image to MEF file
            newimage = fileutil.openImage(image,writefits=True,fitsname=newfilename,clobber=True)
            del newimage
            # Work with new file
            image = newfilename
            newfiles.append(image)
        # If a GEIS image is provided as input, create a new MEF file with
        # a name generated using 'buildFITSName()' and update that new MEF file.
        if not imgfits:
            # Create standardized name for MEF file
            newfilename = fileutil.buildFITSName(image)
            # Convert GEIS image to MEF file
            newimage = fileutil.openImage(image,writefits=True,fitsname=newfilename,clobber=True)
            del newimage
            # Work with new file
            image = newfilename
            newfiles.append(image)

        if not quiet:
            print("Input files: ",files)

        # First get the name of the IDC table
        #idctab = drutil.getIDCFile(_files[0][0],keyword='idctab')[0]
        idctab = drutil.getIDCFile(image,keyword='idctab')[0]
        _found = fileutil.findFile(idctab)
        if idctab == None or idctab == '':
            print('#\n No IDCTAB specified.  No correction can be done for file %s.Quitting makewcs\n' %image)
            #raise ValueError
            continue
        elif not _found:
            print('#\n IDCTAB: ',idctab,' could not be found. \n')
            print('WCS keywords for file %s will not be updated.\n' %image)
            #raise IOError
            continue

        _phdu = image + '[0]'
        _instrument = fileutil.getKeyword(_phdu,keyword='INSTRUME')
        if _instrument == 'WFPC2':
            Nrefchip, Nrefext = getNrefchip(image)
        else:
            Nrefchip = None
            Nrefext = None
        if _instrument not in NUM_PER_EXTN:

            raise ValueError("Instrument %s not supported yet. Exiting..." \
                             %_instrument)

        _detector = fileutil.getKeyword(_phdu, keyword='DETECTOR')
        _nimsets = get_numsci(image)

        for i in range(_nimsets):
            if image.find('.fits') > 0:
                _img = image+'[sci,'+repr(i+1)+']'
            else:
                _img = image+'['+repr(i+1)+']'
            if not restore:
                if not quiet:
                    print('Updating image: ', _img)

                _update(_img,idctab, _nimsets, apply_tdd=False,
                        quiet=quiet,instrument=_instrument,prepend=_prepend,
                        nrchip=Nrefchip, nrext = Nrefext)
                if _instrument == 'ACS' and _detector == 'WFC':
                    tddswitch = fileutil.getKeyword(_phdu,keyword='TDDCORR')
                    # This logic requires that TDDCORR be in the primary header
                    # and set to PERFORM in order to turn this on at all. It can
                    # be turned off by setting either tddcorr=False or setting
                    # the keyword to anything but PERFORM or by deleting the
                    # keyword altogether. PyDrizzle will rely simply on the
                    # values of alpha and beta as computed here to apply the
                    # correction to the coefficients.
                    if (tddcorr and tddswitch != 'OMIT'):
                        print('Applying time-dependent distortion corrections...')
                        _update(_img,idctab, _nimsets, apply_tdd=True, \
                                quiet=quiet,instrument=_instrument,prepend=_prepend, nrchip=Nrefchip, nrext = Nrefext)
            else:
                if not quiet:
                    print('Restoring original WCS values for',_img)
                restoreCD(_img,_prepend)

        #fimg = fileutil.openImage(image,mode='update')
        #if 'TDDCORR' in fimg[0].header and fimg[0].header['TDDCORR'] == 'PERFORM':
        #    fimg[0].header['TDDCORR'] = 'COMPLETE'
        #fimg.close()

    if newfiles == []:
        return files
    else:
        return newfiles
def process(inFile,
            force=False,
            newpath=None,
            inmemory=False,
            num_cores=None,
            headerlets=True):
    """ Run astrodrizzle on input file/ASN table
        using default values for astrodrizzle parameters.
    """
    # We only need to import this package if a user run the task
    import drizzlepac
    from drizzlepac import processInput  # used for creating new ASNs for _flc inputs
    import stwcs

    if headerlets:
        from stwcs.wcsutil import headerlet

    # Open the input file
    try:
        # Make sure given filename is complete and exists...
        inFilename = fileutil.buildRootname(inFile, ext=['.fits'])
        if not os.path.exists(inFilename):
            print("ERROR: Input file - %s - does not exist." % inFilename)
            return
    except TypeError:
        print("ERROR: Inappropriate input file.")
        return

    #If newpath was specified, move all files to that directory for processing
    if newpath:
        orig_processing_dir = os.getcwd()
        new_processing_dir = _createWorkingDir(newpath, inFilename)
        _copyToNewWorkingDir(new_processing_dir, inFilename)
        os.chdir(new_processing_dir)

    # Initialize for later use...
    _mname = None
    _new_asn = None
    _calfiles = []

    # Check input file to see if [DRIZ/DITH]CORR is set to PERFORM
    if '_asn' in inFilename:
        # We are working with an ASN table.
        # Use asnutil code to extract filename
        inFilename = _lowerAsn(inFilename)
        _new_asn = [inFilename]
        _asndict = asnutil.readASNTable(inFilename, None, prodonly=False)
        _cal_prodname = _asndict['output'].lower()
        _fname = fileutil.buildRootname(_cal_prodname, ext=['_drz.fits'])

        # Retrieve the first member's rootname for possible use later
        _fimg = fits.open(inFilename)
        for name in _fimg[1].data.field('MEMNAME'):
            if name[-1] != '*':
                _mname = name.split('\0', 1)[0].lower()
                break
        _fimg.close()
        del _fimg

    else:
        # Check to see if input is a _RAW file
        # If it is, strip off the _raw.fits extension...
        _indx = inFilename.find('_raw')
        if _indx < 0: _indx = len(inFilename)
        # ... and build the CALXXX product rootname.
        _mname = fileutil.buildRootname(inFilename[:_indx])
        _cal_prodname = inFilename[:_indx]
        # Reset inFilename to correspond to appropriate input for
        # drizzle: calibrated product name.
        inFilename = _mname

        if _mname == None:
            errorMsg = 'Could not find calibrated product!'
            raise Exception(errorMsg)

    # Create trailer filenames based on ASN output filename or
    # on input name for single exposures
    if '_raw' in inFile:
        # Output trailer file to RAW file's trailer
        _trlroot = inFile[:inFile.find('_raw')]
    elif '_asn' in inFile:
        # Output trailer file to ASN file's trailer, not product's trailer
        _trlroot = inFile[:inFile.find('_asn')]
    else:
        # Default: trim off last suffix of input filename
        # and replacing with .tra
        _indx = inFile.rfind('_')
        if _indx > 0:
            _trlroot = inFile[:_indx]
        else:
            _trlroot = inFile

    _trlfile = _trlroot + '.tra'

    # Open product and read keyword value
    # Check to see if product already exists...
    dkey = 'DRIZCORR'
    # ...if product does NOT exist, interrogate input file
    # to find out whether 'dcorr' has been set to PERFORM
    # Check if user wants to process again regardless of DRIZCORR keyword value
    if force: dcorr = 'PERFORM'
    else:
        if _mname:
            _fimg = fits.open(fileutil.buildRootname(_mname,
                                                     ext=['_raw.fits']))
            _phdr = _fimg['PRIMARY'].header
            if dkey in _phdr:
                dcorr = _phdr[dkey]
            else:
                dcorr = None
            _fimg.close()
            del _fimg
        else:
            dcorr = None

    time_str = _getTime()
    _tmptrl = _trlroot + '_tmp.tra'
    _drizfile = _trlroot + '_pydriz'
    _drizlog = _drizfile + ".log"  # the '.log' gets added automatically by astrodrizzle
    if dcorr == 'PERFORM':
        if '_asn.fits' not in inFilename:
            # Working with a singleton
            # However, we always want to make sure we always use
            # a calibrated product as input, if available.
            _infile = fileutil.buildRootname(_cal_prodname)
            _infile_flc = fileutil.buildRootname(_cal_prodname,
                                                 ext=['_flc.fits'])

            _cal_prodname = _infile
            _inlist = _calfiles = [_infile]

            # Add CTE corrected filename as additional input if present
            if os.path.exists(_infile_flc) and _infile_flc != _infile:
                _inlist.append(_infile_flc)

        else:
            # Working with an ASN table...
            _infile = inFilename
            flist, duplist = processInput.checkForDuplicateInputs(
                _asndict['order'])
            _calfiles = flist
            if len(duplist) > 0:
                origasn = processInput.changeSuffixinASN(inFilename, 'flt')
                dupasn = processInput.changeSuffixinASN(inFilename, 'flc')
                _inlist = [origasn, dupasn]
            else:
                _inlist = [_infile]
            # We want to keep the original specification of the calibration
            # product name, though, not a lower-case version...
            _cal_prodname = inFilename
            _new_asn.extend(_inlist)  # kept so we can delete it when finished

        # Run astrodrizzle and send its processing statements to _trlfile
        _pyver = drizzlepac.astrodrizzle.__version__

        for _infile in _inlist:  # Run astrodrizzle for all inputs
            # Create trailer marker message for start of astrodrizzle processing
            _trlmsg = _timestamp('astrodrizzle started ')
            _trlmsg = _trlmsg + __trlmarker__
            _trlmsg = _trlmsg + '%s: Processing %s with astrodrizzle Version %s\n' % (
                time_str, _infile, _pyver)
            print(_trlmsg)

            # Write out trailer comments to trailer file...
            ftmp = open(_tmptrl, 'w')
            ftmp.writelines(_trlmsg)
            ftmp.close()
            _appendTrlFile(_trlfile, _tmptrl)

            _pyd_err = _trlroot + '_pydriz.stderr'

            try:
                b = drizzlepac.astrodrizzle.AstroDrizzle(input=_infile,
                                                         runfile=_drizfile,
                                                         configobj='defaults',
                                                         in_memory=inmemory,
                                                         num_cores=num_cores,
                                                         **pipeline_pars)
            except Exception as errorobj:
                _appendTrlFile(_trlfile, _drizlog)
                _appendTrlFile(_trlfile, _pyd_err)
                _ftrl = open(_trlfile, 'a')
                _ftrl.write(
                    'ERROR: Could not complete astrodrizzle processing of %s.\n'
                    % _infile)
                _ftrl.write(str(sys.exc_info()[0]) + ': ')
                _ftrl.writelines(str(errorobj))
                _ftrl.write('\n')
                _ftrl.close()
                print(
                    'ERROR: Could not complete astrodrizzle processing of %s.'
                    % _infile)
                raise Exception(str(errorobj))

            # Now, append comments created by PyDrizzle to CALXXX trailer file
            print('Updating trailer file %s with astrodrizzle comments.' %
                  _trlfile)
            _appendTrlFile(_trlfile, _drizlog)

        # Save this for when astropy.io.fits can modify a file 'in-place'
        # Update calibration switch
        _fimg = fits.open(_cal_prodname, mode='update')
        _fimg['PRIMARY'].header[dkey] = 'COMPLETE'
        _fimg.close()
        del _fimg

        # Enforce pipeline convention of all lower-case product
        # names
        _prodlist = glob.glob('*drz.fits')
        for _prodname in _prodlist:
            _plower = _prodname.lower()
            if _prodname != _plower: os.rename(_prodname, _plower)

    else:
        # Create default trailer file messages when astrodrizzle is not
        # run on a file.  This will typically apply only to BIAS,DARK
        # and other reference images.
        # Start by building up the message...
        _trlmsg = _timestamp('astrodrizzle skipped ')
        _trlmsg = _trlmsg + __trlmarker__
        _trlmsg = _trlmsg + '%s: astrodrizzle processing not requested for %s.\n' % (
            time_str, inFilename)
        _trlmsg = _trlmsg + '       astrodrizzle will not be run at this time.\n'
        print(_trlmsg)

        # Write message out to temp file and append it to full trailer file
        ftmp = open(_tmptrl, 'w')
        ftmp.writelines(_trlmsg)
        ftmp.close()
        _appendTrlFile(_trlfile, _tmptrl)

    _fmsg = None
    # Append final timestamp to trailer file...
    _final_msg = '%s: Finished processing %s \n' % (time_str, inFilename)
    _final_msg += _timestamp('astrodrizzle completed ')
    _trlmsg += _final_msg
    ftmp = open(_tmptrl, 'w')
    ftmp.writelines(_trlmsg)
    ftmp.close()
    _appendTrlFile(_trlfile, _tmptrl)

    # If we created a new ASN table, we need to remove it
    if _new_asn != None:
        for _name in _new_asn:
            fileutil.removeFile(_name)

    # Clean up any generated OrIg_files directory
    if os.path.exists("OrIg_files"):
        # check to see whether this directory is empty
        flist = glob.glob('OrIg_files/*.fits')
        if len(flist) == 0:
            os.rmdir("OrIg_files")
        else:
            print(
                'OrIg_files directory NOT removed as it still contained images...'
            )
    if headerlets:
        # Generate headerlets for each updated FLT image
        hlet_msg = _timestamp("Writing Headerlets started")
        for fname in _calfiles:
            frootname = fileutil.buildNewRootname(fname)
            hname = "%s_flt_hlet.fits" % frootname
            hlet_msg += "Created Headerlet file %s \n" % hname
            try:
                headerlet.write_headerlet(
                    fname,
                    'OPUS',
                    output='flt',
                    wcskey='PRIMARY',
                    author="OPUS",
                    descrip="Default WCS from Pipeline Calibration",
                    attach=False,
                    clobber=True,
                    logging=False)
            except ValueError:
                hlet_msg += _timestamp(
                    "SKIPPED: Headerlet not created for %s \n" % fname)
                # update trailer file to log creation of headerlet files
        hlet_msg += _timestamp("Writing Headerlets completed")
        ftrl = open(_trlfile, 'a')
        ftrl.write(hlet_msg)
        ftrl.close()

    # If processing was done in a temp working dir, restore results to original
    # processing directory, return to original working dir and remove temp dir
    if newpath:
        _restoreResults(new_processing_dir, orig_processing_dir)
        os.chdir(orig_processing_dir)
        _removeWorkingDir(new_processing_dir)

    # Provide feedback to user
    print(_final_msg)
Beispiel #31
0
def processFilenames(input=None, output=None, infilesOnly=False):
    """Process the input string which contains the input file information and
       return a filelist,output
    """
    ivmlist = None
    oldasndict = None

    if input is None:
        print("No input files provided to processInput")
        raise ValueError

    if not isinstance(input, list) and ('_asn' in input or '_asc' in input):
        # Input is an association table
        # Get the input files, and run makewcs on them
        oldasndict = asnutil.readASNTable(input, prodonly=infilesOnly)

        if not infilesOnly:
            if output in ["", None, "None"]:
                output = oldasndict['output'].lower(
                )  # insure output name is lower case

        asnhdr = fits.getheader(input, memmap=False)
        # Only perform duplication check if not already completed...
        dupcheck = asnhdr.get('DUPCHECK', default="PERFORM") == "PERFORM"

        #filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']]
        filelist = buildASNList(oldasndict['order'],
                                input,
                                check_for_duplicates=dupcheck)

    elif (not isinstance(input, list)) and \
       (input[0] == '@') :
        # input is an @ file
        f = open(input[1:])
        # Read the first line in order to determine whether
        # IVM files have been specified in a second column...
        line = f.readline()
        f.close()
        # Parse the @-file with irafglob to extract the input filename
        filelist = irafglob.irafglob(input, atfile=util.atfile_sci)
        # If there is a second column...
        if len(line.split()) == 2:
            # ...parse out the names of the IVM files as well
            ivmlist = irafglob.irafglob(input, atfile=util.atfile_ivm)
        if output in ['', None, "None"]:
            if len(filelist) == 1:
                output = fileutil.buildNewRootname(filelist[0])
            else:
                output = 'final'
    else:
        #input is a string or a python list
        try:
            filelist, output = parseinput.parseinput(input, outputname=output)
            if output in ['', None, "None"]:
                if len(filelist) == 1:
                    output = fileutil.buildNewRootname(filelist[0])
                else:
                    output = 'final'
            if not isinstance(input, list):
                filelist.sort()
        except IOError:
            raise

    # sort the list of input files
    # this ensures the list of input files has the same order on all platforms
    # it can have ifferent order because listdir() uses inode order, not unix type order
    #filelist.sort()

    return filelist, output, ivmlist, oldasndict
Beispiel #32
0
def buildEmptyDRZ(input, output):
    """
    Create an empty DRZ file.

    This module creates an empty DRZ file in a valid FITS format so that the HST
    pipeline can handle the Multidrizzle zero expossure time exception
    where all data has been excluded from processing.

    Parameters
    ----------
    input : str
        filename of the initial input to process_input
    output : str
        filename of the default empty _drz.fits file to be generated

    """

    # Identify the first input image
    inputfile = parseinput.parseinput(input)[0]
    if not inputfile:
        print('\n******* ERROR *******', file=sys.stderr)
        print(
            'No input file found!  Check specification of parameter '
            '"input". ',
            file=sys.stderr)
        print('Quitting...', file=sys.stderr)
        print('******* ***** *******\n', file=sys.stderr)
        return  # raise IOError, "No input file found!"

    # Set up output file here...
    if output is None:
        if len(input) == 1:
            oname = fileutil.buildNewRootname(input[0])
        else:
            oname = 'final'
        output = fileutil.buildNewRootname(oname, extn='_drz.fits')
    else:
        if '_drz' not in output:
            output = fileutil.buildNewRootname(output, extn='_drz.fits')

    print('Building emtpy DRZ file with output name: %s' % output)

    # Open the first image (of the excludedFileList?) to use as a template to build
    # the DRZ file.
    try:
        log.info('Building empty DRZ file from %s' % inputfile[0])
        img = fits.open(inputfile[0], memmap=False)
    except:
        raise IOError('Unable to open file %s \n' % inputfile)

    # Create the fitsobject
    fitsobj = fits.HDUList()
    # Copy the primary header
    hdu = img[0].copy()
    fitsobj.append(hdu)

    # Modify the 'NEXTEND' keyword of the primary header to 3 for the
    #'sci, wht, and ctx' extensions of the newly created file.
    fitsobj[0].header['NEXTEND'] = 3

    # Create the 'SCI' extension
    hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
    hdu.header['EXTNAME'] = 'SCI'
    fitsobj.append(hdu)

    # Create the 'WHT' extension
    hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
    hdu.header['EXTNAME'] = 'WHT'
    fitsobj.append(hdu)

    # Create the 'CTX' extension
    hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
    hdu.header['EXTNAME'] = 'CTX'
    fitsobj.append(hdu)

    # Add HISTORY comments explaining the creation of this file.
    fitsobj[0].header.add_history("** AstroDrizzle has created this empty "
                                  "DRZ product because**")
    fitsobj[0].header.add_history("** all input images were excluded from "
                                  "processing.**")

    # Change the filename in the primary header to reflect the name of the output
    # filename.
    fitsobj[0].header['FILENAME'] = str(output)  # +"_drz.fits"

    # Change the ROOTNAME keyword to the ROOTNAME of the output PRODUCT
    fitsobj[0].header['ROOTNAME'] = str(output.split('_drz.fits')[0])
    # Modify the ASN_MTYP keyword to contain "PROD-DTH" so it can be properly
    # ingested into the archive catalog.

    # stis has this keyword in the [1] header, so I am directing the code
    #t o first look in the primary, then the 1
    try:
        fitsobj[0].header['ASN_MTYP'] = 'PROD-DTH'
    except:
        fitsobj[1].header['ASN_MTYP'] = 'PROD-DTH'

    # If the file is already on disk delete it and replace it with the
    # new file
    dirfiles = os.listdir(os.curdir)
    if dirfiles.count(output) > 0:
        os.remove(output)
        log.info("       Replacing %s..." % output)

    # Write out the empty DRZ file
    fitsobj.writeto(output)

    print(textutil.textbox(
        'ERROR:\nAstroDrizzle has created an empty DRZ product because all '
        'input images were excluded from processing or a user requested the '
        'program to stop.') + '\n',
          file=sys.stderr)

    return
def buildEmptyDRZ(input, output):
    """

    METHOD  : _buildEmptyDRZ
    PURPOSE : Create an empty DRZ file in a valid FITS format so that the HST
              pipeline can handle the Multidrizzle zero expossure time exception
              where all data has been excluded from processing.
    INPUT   : None
    OUTPUT  : DRZ file on disk

    """
    if output == None:
        if len(input) == 1:
            oname = fu.buildNewRootname(input[0])
        else:
            oname = 'final'
        _drzextn = '_drz.fits'
        if '_flc.fits' in input[0]:
            _drzextn = '_drc.fits'
        output = fileutil.buildNewRootname(oname,extn=_drzextn)
    else:
        if 'drz' not in output:
            output = fileutil.buildNewRootname(output,extn='_drz.fits')

    print('Setting up output name: ',output)

    # Open the first image of the excludedFileList to use as a template to build
    # the DRZ file.
    inputfile = parseinput.parseinput(input)[0]
    try :
        img = pyfits.open(inputfile[0])
    except:
        raise IOError('Unable to open file %s \n' %inputfile)

    # Create the fitsobject
    fitsobj = pyfits.HDUList()
    # Copy the primary header
    hdu = img[0].copy()
    fitsobj.append(hdu)

    # Modify the 'NEXTEND' keyword of the primary header to 3 for the
    #'sci, wht, and ctx' extensions of the newly created file.
    fitsobj[0].header['NEXTEND'] = 3

    # Create the 'SCI' extension
    hdu = pyfits.ImageHDU(header=img['sci',1].header.copy(),data=None)
    hdu.header['EXTNAME'] = 'SCI'
    fitsobj.append(hdu)

    # Create the 'WHT' extension
    hdu = pyfits.ImageHDU(header=img['sci',1].header.copy(),data=None)
    hdu.header['EXTNAME'] = 'WHT'
    fitsobj.append(hdu)

    # Create the 'CTX' extension
    hdu = pyfits.ImageHDU(header=img['sci',1].header.copy(),data=None)
    hdu.header['EXTNAME'] = 'CTX'
    fitsobj.append(hdu)

    # Add HISTORY comments explaining the creation of this file.
    fitsobj[0].header.add_history("** Multidrizzle has created this empty DRZ **")
    fitsobj[0].header.add_history("** product because all input images were   **")
    fitsobj[0].header.add_history("** excluded from processing because their  **")
    fitsobj[0].header.add_history("** header EXPTIME values were 0.0.  If you **")
    fitsobj[0].header.add_history("** still wish to use this data make the    **")
    fitsobj[0].header.add_history("** EXPTIME values in the header non-zero.  **")

    # Change the filename in the primary header to reflect the name of the output
    # filename.
    fitsobj[0].header['FILENAME'] = str(output) #+"_drz.fits"

    # Change the ROOTNAME keyword to the ROOTNAME of the output PRODUCT
    _drzsuffix = 'drz'
    if 'drc' in output:
        _drzsuffix = 'drc'
    fitsobj[0].header['ROOTNAME'] = str(output.split('_%s.fits'%_drzsuffix)[0])
    print('self.output', output)
    # Modify the ASN_MTYP keyword to contain "PROD-DTH" so it can be properly
    # ingested into the archive catalog.
    fitsobj[0].header['ASN_MTYP'] = 'PROD-DTH'

    errstr =  "#############################################\n"
    errstr += "#                                           #\n"
    errstr += "# ERROR:                                    #\n"
    errstr += "#  Multidrizzle has created this empty DRZ  #\n"
    errstr += "#  product because all input images were    #\n"
    errstr += "#  excluded from processing because their   #\n"
    errstr += "#  header EXPTIME values were 0.0.  If you  #\n"
    errstr += "#  still wish to use this data make the     #\n"
    errstr += "#  EXPTIME values in the header non-zero.   #\n"
    errstr += "#                                           #\n"
    errstr += "#############################################\n\n"
    print(errstr)

    # If the file is already on disk delete it and replace it with the
    # new file
    dirfiles = os.listdir(os.curdir)
    if (dirfiles.count(output) > 0):
        os.remove(output)
        print("       Replacing "+output+"...")

    # Write out the empty DRZ file
    fitsobj.writeto(output)
    return
Beispiel #34
0
def run(scifile,dgeofile=None,output=False,match_sci=False,update=True,vmin=None,vmax=None,plot_offset=0,plot_samp=32):
    """
        This routine compares how well the sub-sampled DGEOFILE (generated
        using the 'makesmall' module) corrects the input science image as
        opposed to the full-size DGEOFILE.

        SYNTAX:
            import test_small_dgeo
            test_small_dgeo.run(scifile,dgeofile=None,output=False)

        where:
            scifile   - name of science image
            dgeofile  - name of full-sized DGEOFILE if not in DGEOFILE keyword
            output    - if True, write out differences to FITS file(s)

        The user can either specify the full-size DGEOFILE reference filename
        as the 'dgeofile' parameter or the code will look for the 'DGEOFILE'
        keyword in the primary header for the name of the full-sized reference
        file.

        The primary output will be a series of plots showing the difference images
        with the mean and stddev of the differences in the label of the image display.

        If the 'output' parameter is set to True, these differences
        will then be written out to FITS files based on the input science image
        rootname. Both the DX and DY differences for a single chip will be written
        out to the same file, with a separate file for each chip.

    """
    from stsci.tools import fileutil
    from stwcs import updatewcs

    if update:
        # update input SCI file to be consistent with reference files in header
        print('Updating input file ',scifile,' to be consistent with reference files listed in header...')
        updatewcs.updatewcs(scifile)
    # Now, get the original NPOLFILE and overwrite the data in the scifile
    # WCSDVARR extensions to remove the scaling by the linear terms imposed by
    # the SIP convention
    npolfile = fileutil.osfn(pyfits.getval(scifile,'NPOLFILE'))
    npolroot = os.path.split(npolfile)[1]
    dxextns = []
    for extn in pyfits.open(npolfile):
        if 'extname' in extn.header and extn.header['extname'] in ['DX','DY']:
            dxextns.append([extn.header['extname'],extn.header['extver']])
    #dxextns = [['dx',1],['dy',1],['dx',2],['dy',2]]
    ndxextns = len(dxextns)
    # Update input file with NPOLFILE arrays now
    print('Updating input file ',scifile,' with original ')
    print('    NPOLFILE arrays from ',npolfile)
    fsci =pyfits.open(scifile,mode='update')
    try:
        next = fsci.index_of(('wcsdvarr',1))
    except KeyError:
        fsci.close()
        print('=====')
        print('ERROR: No WCSDVARR extensions found!')
        print('       Please make sure NPOLFILE is specified and run this task with "update=True".')
        print('=====')
        return
    # Replace WCSDVARR arrays here...
    for dxe,wextn in zip(dxextns,list(range(1,ndxextns+1))):
        fsci['wcsdvarr',wextn].data = pyfits.getdata(npolfile,dxe[0],dxe[1])
    # Now replace the NPOLEXT keyword value with a new one so that it will automatically
    # update with the correct file next time updatewcs is run.
    fsci['sci',1].header['npolext'] = npolroot
    print('Updated NPOLEXT with ',npolroot)
    fsci.close()
    print('\n=====')
    print('WARNING: Updated file ',scifile,' NO LONGER conforms to SIP convention!')
    print('         This file will need to be updated with updatewcs before using with MultiDrizzle.')
    print('=====\n')

    # Get info on full-size DGEOFILE
    if dgeofile is None:
        # read in full dgeofile from header
        fulldgeofile = pyfits.getval(scifile,'DGEOFILE')
    else:
        fulldgeofile = dgeofile

    print('Opening full-size DGEOFILE ',fulldgeofile,' for comparison.')
    fulldgeofile = fileutil.osfn(fulldgeofile)
    full_shape = [pyfits.getval(fulldgeofile,'NAXIS2','DX',1),pyfits.getval(fulldgeofile,'NAXIS1','DX',1)]

    filter_names = fileutil.getFilterNames(pyfits.getheader(scifile))

    detector = pyfits.getval(fulldgeofile,'DETECTOR')
    # count the number of chips in DGEOFILE
    xyfile = pyfits.open(scifile)
    numchips = 0
    ccdchip = []
    extname = xyfile[1].header['EXTNAME']
    for extn in xyfile:
        if 'extname' in extn.header and extn.header['extname'] == extname:
            numchips += 1
            if 'ccdchip' in extn.header:
                ccdchip.append(extn.header['ccdchip'])
            else:
                ccdchip.append(1)
    if not match_sci:
        ltv1 = 0
        ltv2 = 0
        nx = full_shape[1]
        ny = full_shape[0]
    else:
        nx = xyfile['sci',1].header['NAXIS1']
        ny = xyfile['sci',1].header['NAXIS2']
        ltv1 = xyfile['sci',1].header['ltv1']
        ltv2 = xyfile['sci',1].header['ltv2']

    grid = [nx,ny,1]
    print('grid of : ',nx,ny)
    xyfile.close()

    xarr,yarr = build_grid_arrays(nx,ny,1)
    xgarr = xarr.reshape(grid[1],grid[0])
    ygarr = yarr.reshape(grid[1],grid[0])

    # initialize plot here
    if has_matplotlib:
        pl.clf()
        pl.gray()

    for chip,det in zip(list(range(1,numchips+1)),ccdchip):

        xout,yout = transform_d2im_dgeo(scifile,chip,xarr,yarr)

        dgeochip = 1
        dgeo = pyfits.open(fulldgeofile)
        for e in dgeo:
            if 'ccdchip' not in e.header:
                continue
            else:
                if e.header['ccdchip'] == det:
                    dgeochip = e.header['extver']
                    break
        dgeo.close()

        print('Matching sci,', chip, ' with DX,', dgeochip)
        dx= (xout-xarr).reshape(grid[1],grid[0])
        fulldatax = pyfits.getdata(fulldgeofile,'DX',dgeochip)
        diffx=(dx-fulldatax[-ltv2:-ltv2+ny,-ltv1:-ltv1+nx]).astype(np.float32)

        if has_matplotlib:
            pl.imshow(diffx, vmin=vmin, vmax=vmax)
            pl.title('dx-full_x: %s %s(DX,%d) with %g +/- %g' %
                     (filter_names, detector, dgeochip, diffx.mean(),
                       diffx.std()))
            pl.colorbar()

            if sys.version_info[0] < 3:
                raw_input("Press 'ENTER' to close figure and plot DY...")
            else:
                input("Press 'ENTER' to close figure and plot DY...")

            pl.close()

        dy= (yout-yarr).reshape(grid[1],grid[0])
        fulldatay = pyfits.getdata(fulldgeofile,'DY',dgeochip)
        diffy=(dy-fulldatay[-ltv2:-ltv2+ny,-ltv1:-ltv1+nx]).astype(np.float32)

        if has_matplotlib:
            pl.imshow(diffy,vmin=vmin,vmax=vmax)
            pl.title('dy-full_y: %s %s(DY,%d) with %g +/- %g ' %
                     (filter_names, detector, dgeochip, diffy.mean(),
                      diffy.std()))
            pl.colorbar()

            if sys.version_info[0] < 3:
                raw_input("Press 'ENTER' to close figure and show next chip...")
            else:
                input("Press 'ENTER' to close figure and show next chip...")

            pl.close()

        if output:
            # parse out rootname from input file if user wants results written to file
            outroot = fileutil.buildNewRootname(scifile)
            #
            # setup DGEOFILE ref file as template for each chip's output results
            # we only need dx,1 and dy,1 since each chip will be written out
            # to a separate file and since we will use this template for
            # writing out 2 different results files
            #
            fhdulist = pyfits.open(fulldgeofile)
            hdulist = pyfits.HDUList()
            hdulist.append(fhdulist[0])
            hdulist.append(fhdulist['dx',1])
            hdulist.append(fhdulist['dy',1])
            fhdulist.close()

            outname = outroot+'_sci'+str(chip)+'_dgeo_diffxy.match'
            if os.path.exists(outname): os.remove(outname)
            dxgarr = xgarr+diffx
            dygarr = ygarr+diffy
            wtraxyutils.write_xy_file(outname,[xgarr[plot_offset::plot_samp,plot_offset::plot_samp].flatten(),
                                                ygarr[plot_offset::plot_samp,plot_offset::plot_samp].flatten(),
                                                dxgarr[plot_offset::plot_samp,plot_offset::plot_samp].flatten(),
                                                dygarr[plot_offset::plot_samp,plot_offset::plot_samp].flatten()],format="%20.8f",append=True)

            outname = outroot+'_sci'+str(chip)+'_newfull_dxy.fits'
            if os.path.exists(outname): os.remove(outname)

            hdulist['dx',1].data = dx
            hdulist['dy',1].data = dy
            hdulist.writeto(outname)

            outname = outroot+'_sci'+str(chip)+'_diff_dxy.fits'
            if os.path.exists(outname): os.remove(outname)
            hdulist['dx',1].data = diffx
            hdulist['dy',1].data = diffy
            hdulist.writeto(outname)
            print('Created output file with differences named: ',outname)

        del dx,dy,diffx,diffy

    if output:
        hdulist.close()
Beispiel #35
0
def splitStis(stisfile, sci_count):
    """
    :Purpose: Split a STIS association file into multiple imset MEF files.

    Split the corresponding spt file if present into single spt files.
    If an spt file can't be split or is missing a Warning is printed.

    Returns
    -------
    names: list
        a list with the names of the new flt files.

    """
    newfiles = []

    f = fits.open(stisfile)
    hdu0 = f[0].copy()


    for count in range(1,sci_count+1):
        fitsobj = fits.HDUList()
        fitsobj.append(hdu0)
        hdu = f[('sci',count)].copy()
        fitsobj.append(hdu)
        rootname = hdu.header['EXPNAME']
        newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits')
        try:
            # Verify error array exists
            if f[('err', count)].data is None:
                raise ValueError
            # Verify dq array exists
            if f[('dq', count)].data is None:
                raise ValueError
            # Copy the err extension
            hdu = f[('err',count)].copy()
            fitsobj.append(hdu)
            # Copy the dq extension
            hdu = f[('dq',count)].copy()
            fitsobj.append(hdu)
            fitsobj[1].header['EXTVER'] = 1
            fitsobj[2].header['EXTVER'] = 1
            fitsobj[3].header['EXTVER'] = 1
        except ValueError:
            print('\nWarning:')
            print('Extension version %d of the input file %s does not' %(count, stisfile))
            print('contain all required image extensions. Each must contain')
            print('populates SCI, ERR and DQ arrays.')

            continue


        # Determine if the file you wish to create already exists on the disk.
        # If the file does exist, replace it.
        if (os.path.exists(newfilename)):
            os.remove(newfilename)
            print("       Replacing "+newfilename+"...")

            # Write out the new file
        fitsobj.writeto(newfilename)
        newfiles.append(newfilename)
    f.close()

    sptfilename = fileutil.buildNewRootname(stisfile, extn='_spt.fits')
    try:
        sptfile = fits.open(sptfilename)
    except IOError:
        print('SPT file not found %s \n' % sptfilename)
        return newfiles

    if sptfile:
        hdu0 = sptfile[0].copy()
        try:
            for count in range(1,sci_count+1):
                fitsobj = fits.HDUList()
                fitsobj.append(hdu0)
                hdu = sptfile[count].copy()
                fitsobj.append(hdu)
                rootname = hdu.header['EXPNAME']
                newfilename = fileutil.buildNewRootname(rootname, extn='_spt.fits')
                fitsobj[1].header['EXTVER'] = 1
                if (os.path.exists(newfilename)):
                    os.remove(newfilename)
                    print("       Replacing "+newfilename+"...")

                # Write out the new file
                fitsobj.writeto(newfilename)
        except:
            print("Warning: Unable to split spt file %s " % sptfilename)
        sptfile.close()

    return newfiles
Beispiel #36
0
def find_gsc_offset(obsname, refframe="ICRS"):
    """Find the GSC to GAIA offset based on guide star coordinates

    Parameters
    ----------
    obsname : str
        Full filename or (preferably)`astropy.io.fits.HDUList` object of
        image to be processed.

    refframe : str
        Reference frame for the guide star coordinates.
        Supported options: GSC1, ICRS(default)

    NOTES
    ------
    The default transform is GSC2-GAIA. The options were primarily for transforming
    individual objects from the catalogs and that is not specified in the limited
    documentation. The ipppssoot input is a special case where it pulls the gsids,
    epoch and refframe from the dms databases and overrides the transform using this logic::

        REFFRAME=GSC1 sets GSC1-GAIA
        REFFRAME=ICRS and EPOCH < 2017.75 sets GSC2-GAIA
        REFFRAME=ICRS and EPOCH > 2017.75 sets no-offset since it's already in GAIA frame

    Returns
    -------
    deltas : dict
        Dict of offset, roll and scale in decimal degrees and pixels for image
        based on correction to guide star coordinates relative to GAIA.
        Keys: delta_x, delta_y, delta_ra, delta_dec, roll, scale, expwcs, catalog
    """
    # check to see whether any URL has been specified as an
    # environmental variable.
    if gsss_url_envvar in os.environ:
        gsss_serviceLocation = os.environ[gsss_url_envvar]
    else:
        gsss_serviceLocation = gsss_url

    # Initialize variables for cases where no offsets are available.
    delta_ra = delta_dec = 0.0
    delta_roll = 0.0
    delta_scale = 1.0
    dGSinputRA = dGSoutputRA = 0.0
    dGSinputDEC = dGSoutputDEC = 0.0
    outputCatalog = None

    # Insure input is a fits.HDUList object, if originally provided as a filename(str)
    close_obj = False
    if isinstance(obsname, str):
        obsname = fits.open(obsname)
        close_obj = True

    if 'rootname' in obsname[0].header:
        ippssoot = obsname[0].header['rootname'].upper()
    else:
        ippssoot = fileutil.buildNewRootname(obsname).upper()

    # Define what service needs to be used to get the offsets
    serviceType = "GSCConvert/GSCconvert.aspx"
    spec_str = "REFFRAME={}&IPPPSSOOT={}"
    spec = spec_str.format(refframe, ippssoot)
    serviceUrl = "{}/{}?{}".format(gsss_serviceLocation, serviceType, spec)
    rawcat = requests.get(serviceUrl)
    if not rawcat.ok:
        logger.warning(
            "Problem accessing service with:\n{}".format(serviceUrl))
        logger.warning("  No offset found! ")

    if rawcat.status_code == requests.codes.ok:
        logger.info("gsReference service retrieved {}".format(ippssoot))
        refXMLtree = etree.fromstring(rawcat.content)

        delta_ra = float(refXMLtree.findtext('deltaRA'))
        delta_dec = float(refXMLtree.findtext('deltaDEC'))
        delta_roll = float(refXMLtree.findtext('deltaROLL'))
        delta_scale = float(refXMLtree.findtext('deltaSCALE'))
        dGSinputRA = float(refXMLtree.findtext('dGSinputRA'))
        dGSinputDEC = float(refXMLtree.findtext('dGSinputDEC'))
        dGSoutputRA = float(refXMLtree.findtext('dGSoutputRA'))
        dGSoutputDEC = float(refXMLtree.findtext('dGSoutputDEC'))
        outputCatalog = refXMLtree.findtext('outputCatalog')

    # Use GS coordinate as reference point
    old_gs = (dGSinputRA, dGSinputDEC)
    new_gs = (dGSoutputRA, dGSoutputDEC)

    expwcs = build_reference_wcs(obsname)

    if delta_ra != 0.0 and delta_dec != 0.0:
        # Compute tangent plane for this observation
        wcsframe = expwcs.wcs.radesys.lower()

        # Use WCS to compute offset in pixels of shift applied to WCS Reference pixel
        # RA,Dec of ref pixel in decimal degrees
        crval = SkyCoord(expwcs.wcs.crval[0],
                         expwcs.wcs.crval[1],
                         unit='deg',
                         frame=wcsframe)

        # Define SkyCoord for Guide Star using old/original coordinates used to
        # originally compute WCS for exposure
        old_gs_coord = SkyCoord(old_gs[0],
                                old_gs[1],
                                unit='deg',
                                frame=wcsframe)
        sof_old = old_gs_coord.skyoffset_frame()
        # Define new SkyOffsetFrame based on new GS coords
        new_gs_coord = SkyCoord(new_gs[0],
                                new_gs[1],
                                unit='deg',
                                frame=wcsframe)
        # Determine offset from old GS position to the new GS position
        sof_new = new_gs_coord.transform_to(sof_old)
        # Compute new CRVAL position as old CRVAL+GS offset (sof_new)
        new_crval_coord = SkyCoord(sof_new.lon.arcsec,
                                   sof_new.lat.arcsec,
                                   unit='arcsecond',
                                   frame=crval.skyoffset_frame())
        # Return RA/Dec for new/updated CRVAL position
        new_crval = new_crval_coord.icrs

        # Compute offset in pixels for new CRVAL
        newpix = expwcs.all_world2pix(new_crval.ra.value, new_crval.dec.value,
                                      1)
        deltaxy = expwcs.wcs.crpix - newpix  # offset from ref pixel position

    else:
        deltaxy = (0., 0.)

    offsets = {
        'delta_x': deltaxy[0],
        'delta_y': deltaxy[1],
        'roll': delta_roll,
        'scale': delta_scale,
        'delta_ra': delta_ra,
        'delta_dec': delta_dec,
        'expwcs': expwcs,
        'catalog': outputCatalog
    }
    if close_obj:
        obsname.close()

    return offsets
def process_input(input, output=None, ivmlist=None, updatewcs=True, prodonly=False, shiftfile=None):

    ivmlist = None
    oldasndict = None

    if (isinstance(input, list) == False) and \
       ('_asn' in input or '_asc' in input) :
        # Input is an association table
        # Get the input files, and run makewcs on them
        oldasndict = asnutil.readASNTable(input, prodonly=prodonly)
        if not output:
            output = oldasndict['output']

        filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']]

    elif (isinstance(input, list) == False) and \
       (input[0] == '@') :
        # input is an @ file
        f = open(input[1:])
        # Read the first line in order to determine whether
        # IVM files have been specified in a second column...
        line = f.readline()
        f.close()
        # Parse the @-file with irafglob to extract the input filename
        filelist = irafglob.irafglob(input, atfile=atfile_sci)
        # If there is a second column...
        if len(line.split()) == 2:
            # ...parse out the names of the IVM files as well
            ivmlist = irafglob.irafglob(input, atfile=atfile_ivm)
    else:
        #input is a string or a python list
        try:
            filelist, output = parseinput.parseinput(input, outputname=output)
            #filelist.sort()
        except IOError: raise

    # sort the list of input files
    # this ensures the list of input files has the same order on all platforms
    # it can have ifferent order because listdir() uses inode order, not unix type order
    filelist.sort()
    newfilelist, ivmlist = checkFiles(filelist, ivmlist)


    if not newfilelist:
        buildEmptyDRZ(input,output)
        return None, None, output

    #make an asn table at the end
    if updatewcs:
        pydr_input = runmakewcs(newfilelist)
    else:
        pydr_input = newfilelist

    # AsnTable will handle the case when output==None
    if not oldasndict:
        oldasndict = asnutil.ASNTable(pydr_input, output=output)
        oldasndict.create()

    if shiftfile:
        oldasndict.update(shiftfile=shiftfile)

    asndict = update_member_names(oldasndict, pydr_input)

    # Build output filename
    drz_extn = '_drz.fits'
    for img in newfilelist:
        # special case logic to automatically recognize when _flc.fits files
        # are provided as input and produce a _drc.fits file instead
        if '_flc.fits' in img:
            drz_extn = '_drc.fits'
            break

    if output in [None,'']:
        output = fileutil.buildNewRootname(asndict['output'],
                                           extn=drz_extn)
    else:
        if '.fits' in output.lower():
            pass
        elif drz_extn[:4] not in output.lower():
            output = fileutil.buildNewRootname(output, extn=drz_extn)

    print('Setting up output name: ',output)

    return asndict, ivmlist, output
Beispiel #38
0
    def __init__(self,filename,group=None,inmemory=False):
        baseImageObject.__init__(self,filename)

        #filutil open returns a fits object
        try:
            self._image=fileutil.openImage(filename,clobber=False,memmap=0)

        except IOError:
            raise IOError("Unable to open file: %s" % filename)

        #populate the global attributes which are good for all the chips in the file
        #self._rootname=self._image['PRIMARY'].header["ROOTNAME"]
        self._rootname=fileutil.buildNewRootname(filename)
        self.outputNames=self._setOutputNames(self._rootname)

        # flag to indicate whether or not to write out intermediate products
        # to disk (default) or keep everything in memory
        self.inmemory = inmemory
        self._initVirtualOutputs()

        #self._exptime=self._image["PRIMARY"].header["EXPTIME"]
        #exptime should be set in the image subclass code since it's kept in different places
#        if(self._exptime == 0):
        self._exptime =1. #to avoid divide by zero
 #           print "Setting exposure time to 1. to avoid div/0!"

        #this is the number of science chips to be processed in the file
        self._numchips=self._countEXT(extname=self.scienceExt)

        self.proc_unit = None

        #self._nextend=self._image["PRIMARY"].header["NEXTEND"]
        self._nextend = self._countEXT(extname=None)

        if (self._numchips == 0):
            #the simple fits image contains the data in the primary extension,
            #this will help us deal with the rest of the code that looks
            #and acts on chips :)
            #self._nextend=1
            self._numchips=1
            self.scienceExt="PRIMARY"
            self.maskExt=None
            self._image["PRIMARY"].header["EXTNAME"] = "PRIMARY"
            self._image["PRIMARY"].header["EXTVER"] = 1
            self._image["PRIMARY"].extnum = 0

        self._isSimpleFits = False

        # Clean out any stray MDRIZSKY keywords from PRIMARY headers
        fimg = fileutil.openImage(filename,mode='update')
        if 'MDRIZSKY' in fimg['PRIMARY'].header:
            del fimg['PRIMARY'].header['MDRIZSKY']
        fimg.close()
        del fimg

        if group not in [None,'']:
            # Only use selected chip
            if ',' in group:
                group_id = group.split(',')
                if group_id[0].isalpha(): # user specified a specific extname,extver
                    self.group = [int(group_id[1])]
                else: # user specified a list of extension numbers to process
                    self.group = []
                    for grp in group_id:
                        # find extname/extver which corresponds to this extension number
                        group_extname = self._image[int(grp)].header['EXTNAME']
                        group_extver = self._image[int(grp)].header['EXTVER']
                        self.group.append(group_extver)
            else:
                # find extname/extver which corresponds to this extension number
                group_extver = self._image[int(group)].header['EXTVER']
                self.group = [int(group_extver)]
        else:
            # Use all chips
            self.group = None

        if not self._isSimpleFits:

            #assign chip specific information
            for chip in range(1,self._numchips+1,1):

                self._assignRootname(chip)
                sci_chip = self._image[self.scienceExt,chip]

                # Set a flag to indicate whether this chip should be included
                # or not, based on user input from the 'group' parameter.
                if self.group is None or (self.group is not None and chip in self.group):
                    sci_chip.group_member = True
                    self._nmembers += 1
                else:
                    sci_chip.group_member = False

                sci_chip.signature = None

                sci_chip.dqname = None
                sci_chip.dqmaskname = None

                sci_chip.dqfile,sci_chip.dq_extn = self.find_DQ_extension()
                #self.maskExt = sci_chip.dq_extn
                if(sci_chip.dqfile != None):
                    sci_chip.dqname = sci_chip.dqfile +'['+sci_chip.dq_extn+','+str(chip)+']'

                # build up HSTWCS object for each chip, which will be necessary for drizzling operations
                sci_chip.wcs=wcs_functions.get_hstwcs(self._filename,self._image,sci_chip.extnum)
                sci_chip.detnum,sci_chip.binned = util.get_detnum(sci_chip.wcs,self._filename,chip)
                sci_chip.wcslin_pscale = 1.0

                #assuming all the chips don't have the same dimensions in the file
                sci_chip._naxis1=sci_chip.header["NAXIS1"]
                sci_chip._naxis2=sci_chip.header["NAXIS2"]

                # record the exptime values for this chip so that it can be
                # easily used to generate the composite value for the final output image
                sci_chip._expstart,sci_chip._expend = util.get_expstart(sci_chip.header,self._image['PRIMARY'].header)

                sci_chip.outputNames=self._setChipOutputNames(sci_chip.rootname,chip).copy() #this is a dictionary
                # Set the units: both bunit and in_units
                self.set_units(chip)

                #initialize gain, readnoise, and exptime attributes
                # the actual values will be set by each instrument based on
                # keyword names specific to that instrument by 'setInstrumentParamters()'
                sci_chip._headergain = 1 # gain value read from header
                sci_chip._gain = 1.0     # calibrated gain value
                sci_chip._rdnoise = 1.0  # calibrated readnoise
                sci_chip._exptime = 1.0
                sci_chip._effGain = 1.0
                sci_chip._conversionFactor = 1.0
                sci_chip._wtscl = 1.0

                # Keep track of the sky value that should be subtracted from this chip
                # Read in value from image header, in case user has already
                # determined the sky level
                if "MDRIZSKY" in sci_chip.header:
                    subsky = sci_chip.header['MDRIZSKY']
                    log.info('Reading in MDRIZSKY of %s' % subsky)
                else:
                    subsky = 0.0
                # .computedSky:   value to be applied by the
                #                 adrizzle/ablot steps.
                # .subtractedSky: value already (or will be by adrizzle/ablot)
                #                 subtracted from the image
                sci_chip.subtractedSky = subsky
                sci_chip.computedSky = subsky

                sci_chip.darkcurrent = 0.0

                # The following attributes are used when working with sub-arrays
                # and get reference file arrays for auto-generation of IVM masks
                try:
                    sci_chip.ltv1 = sci_chip.header['LTV1'] * -1
                    sci_chip.ltv2 = sci_chip.header['LTV2'] * -1
                except KeyError:
                    sci_chip.ltv1 = 0
                    sci_chip.ltv2 = 0
                if sci_chip.ltv1 < 0:
                    sci_chip.ltv1 = 0
                if sci_chip.ltv2 < 0:
                    sci_chip.ltv2 = 0
                sci_chip.size1 = sci_chip.header['NAXIS1'] + np.round(sci_chip.ltv1)
                sci_chip.size2 = sci_chip.header['NAXIS2'] + np.round(sci_chip.ltv2)
                #sci_chip.image_shape = (sci_chip.size2,sci_chip.size1)
                sci_chip.image_shape = (sci_chip.header['NAXIS2'],sci_chip.header['NAXIS1'])

                # Interpret the array dtype by translating the IRAF BITPIX value
                for dtype in IRAF_DTYPES.keys():
                    if sci_chip.header['BITPIX'] == IRAF_DTYPES[dtype]:
                        sci_chip.image_dtype = dtype
                        break

                if self.inmemory:
                    # read image data array into memory
                    shape = sci_chip.data.shape
Beispiel #39
0
def _update(image,idctab,nimsets,apply_tdd=False,
            quiet=None,instrument=None,prepend=None,nrchip=None, nrext=None):

    tdd_xyref = {1: [2048, 3072], 2:[2048, 1024]}
    _prepend = prepend
    _dqname = None
    # Make a copy of the header for keyword access
    # This copy includes both Primary header and
    # extension header
    hdr = fileutil.getHeader(image)

    # Try to get the instrument if we don't have it already
    instrument = readKeyword(hdr,'INSTRUME')

    binned = 1
    # Read in any specified OFFTAB, if present (WFPC2)
    offtab = readKeyword(hdr,'OFFTAB')
    dateobs = readKeyword(hdr,'DATE-OBS')
    if not quiet:
        print("OFFTAB, DATE-OBS: ",offtab,dateobs)

    print("-Updating image ",image)

    if not quiet:
        print("-Reading IDCTAB file ",idctab)

    # Get telescope orientation from image header
    # If PA_V# is not present of header, try to get it from the spt file
    pvt = readKeyword(hdr,'PA_V3')
    if pvt == None:
        sptfile = fileutil.buildNewRootname(image, extn='_spt.fits')
        if os.path.exists(sptfile):
            spthdr = fileutil.getHeader(sptfile)
            pvt = readKeyword(spthdr,'PA_V3')
    if pvt != None:
        pvt = float(pvt)
    else:
        print('PA_V3 keyword not found, WCS cannot be updated. Quitting ...')
        raise ValueError

    # Find out about instrument, detector & filters
    detector = readKeyword(hdr,'DETECTOR')

    Nrefchip=1
    if instrument == 'WFPC2':
        filter1 = readKeyword(hdr,'FILTNAM1')
        filter2 = readKeyword(hdr,'FILTNAM2')
        mode = readKeyword(hdr,'MODE')
        if os.path.exists(fileutil.buildNewRootname(image, extn='_c1h.fits')):
            _dqname = fileutil.buildNewRootname(image, extn='_c1h.fits')
            dqhdr = pyfits.getheader(_dqname,1)
            dqext = readKeyword(dqhdr, 'EXTNAME')
        if mode == 'AREA':
            binned = 2
        Nrefchip=nrchip
    elif instrument == 'NICMOS':
        filter1 = readKeyword(hdr,'FILTER')
        filter2 = None
    elif instrument == 'WFC3':
        filter1 = readKeyword(hdr,'FILTER')
        filter2 = None
        # use value of 'BINAXIS' keyword to set binning value for WFC3 data
        binned = readKeyword(hdr,'BINAXIS1')
    else:
        filter1 = readKeyword(hdr,'FILTER1')
        filter2 = readKeyword(hdr,'FILTER2')

    if filter1 == None or filter1.strip() == '': filter1 = 'CLEAR'
    else: filter1 = filter1.strip()
    if filter2 == None or filter2.strip() == '': filter2 = 'CLEAR'
    else: filter2 = filter2.strip()

    if filter1.find('CLEAR') == 0: filter1 = 'CLEAR'
    if filter2.find('CLEAR') == 0: filter2 = 'CLEAR'

    # Set up parity matrix for chip
    if instrument == 'WFPC2' or instrument =='STIS' or instrument == 'NICMOS':
        parity = PARITY[instrument]
    elif detector in PARITY:
        parity = PARITY[detector]
    else:
        raise ValueError('Detector ',detector,
                         ' Not supported at this time. Exiting...')

    # Get the VAFACTOR keyword if it exists, otherwise set to 1.0
    # we also need the reference pointing position of the target
    # as this is where
    _va_key = readKeyword(hdr,'VAFACTOR')
    if _va_key != None:
        VA_fac = float(_va_key)
    else:
        VA_fac=1.0

    if not quiet:
        print('VA factor: ',VA_fac)

    #ra_targ = float(readKeyword(hdr,'RA_TARG'))
    #dec_targ = float(readKeyword(hdr,'DEC_TARG'))

    # Get the chip number
    _c = readKeyword(hdr,'CAMERA')
    _s = readKeyword(hdr,'CCDCHIP')
    _d = readKeyword(hdr,'DETECTOR')
    if _c != None and str(_c).isdigit():
        chip = int(_c)
    elif _s == None and _d == None:
        chip = 1
    else:
        if _s:
            chip = int(_s)
        elif str(_d).isdigit():
            chip = int(_d)
        else:
            chip = 1
    # For the ACS/WFC case the chip number doesn't match the image
    # extension
    nr = 1
    if (instrument == 'ACS' and detector == 'WFC') or (instrument == 'WFC3' and detector == 'UVIS'):
        if nimsets > 1:
            Nrefchip = 2
        else:
            Nrefchip = chip
    elif instrument == 'NICMOS':
        Nrefchip = readKeyword(hdr,'CAMERA')
    elif instrument == 'WFPC2':
        nr = nrext
    else:
        if nimsets > 1:
            nr = Nrefchip

    if not quiet:
        print("-PA_V3 : ",pvt," CHIP #",chip)


    # Extract the appropriate information from the IDCTAB
    #fx,fy,refpix,order=fileutil.readIDCtab(idctab,chip=chip,direction='forward',
    #            filter1=filter1,filter2=filter2,offtab=offtab,date=dateobs)
    idcmodel = models.IDCModel(idctab,
                               chip=chip, direction='forward', date=dateobs,
                               filter1=filter1, filter2=filter2, offtab=offtab, binned=binned,
                               tddcorr=apply_tdd)
    fx = idcmodel.cx
    fy = idcmodel.cy
    refpix = idcmodel.refpix
    order = idcmodel.norder

    # Determine whether to perform time-dependent correction
    # Construct matrices neded to correct the zero points for TDD
    if apply_tdd:
        #alpha,beta = mutil.compute_wfc_tdd_coeffs(dateobs,skew_coeffs)
        alpha = refpix['TDDALPHA']
        beta = refpix['TDDBETA']
        tdd = N.array([[beta, alpha], [alpha, -beta]])
        mrotp = fileutil.buildRotMatrix(2.234529)/2048.

    else:
        alpha = 0.0
        beta = 0.0

    # Get the original image WCS
    Old=wcsutil.WCSObject(image,prefix=_prepend)

    # Reset the WCS keywords to original archived values.
    Old.restore()

    #
    # Look for any subarray offset
    #
    ltv1,ltv2 = drutil.getLTVOffsets(image)
    #
    # If reference point is not centered on distortion model
    # shift coefficients to be applied relative to observation
    # reference position
    #
    offsetx = Old.crpix1 - ltv1 - refpix['XREF']
    offsety = Old.crpix2 - ltv2 - refpix['YREF']
    shiftx = refpix['XREF'] + ltv1
    shifty = refpix['YREF'] + ltv2
    if ltv1 != 0. or ltv2 != 0.:
        ltvoffx = ltv1 + offsetx
        ltvoffy = ltv2 + offsety
        offshiftx = offsetx + shiftx
        offshifty = offsety + shifty
    else:
        ltvoffx = 0.
        ltvoffy = 0.
        offshiftx = 0.
        offshifty = 0.

    if ltv1 != 0. or ltv2 != 0.:
        fx,fy = idcmodel.shift(idcmodel.cx,idcmodel.cy,offsetx,offsety)

    # Extract the appropriate information for reference chip

    ridcmodel = models.IDCModel(idctab,
                                chip=Nrefchip, direction='forward', date=dateobs,
                                filter1=filter1, filter2=filter2, offtab=offtab, binned=binned,
                                tddcorr=apply_tdd)
    rfx = ridcmodel.cx
    rfy = ridcmodel.cy
    rrefpix = ridcmodel.refpix
    rorder = ridcmodel.norder
    """
    rfx,rfy,rrefpix,rorder=mutil.readIDCtab(idctab,chip=Nrefchip,
        direction='forward', filter1=filter1,filter2=filter2,offtab=offtab,
        date=dateobs,tddcorr=apply_tdd)
    """
    # Create the reference image name
    rimage = image.split('[')[0]+"[sci,%d]" % nr
    if not quiet:
        print("Reference image: ",rimage)

    # Create the tangent plane WCS on which the images are defined
    # This is close to that of the reference chip
    R=wcsutil.WCSObject(rimage)
    R.write_archive(rimage)
    R.restore()

    # Reacd in declination of target (for computing orientation at aperture)
    # Note that this is from the reference image
    #dec = float(fileutil.getKeyword(rimage,'CRVAL2'))
    #crval1 = float(fileutil.getKeyword(rimage,'CRVAL1'))
    #crval1 = float(R.crval1)
    #crval2 = dec
    dec = float(R.crval2)

    # Get an approximate reference position on the sky
    rref = (rrefpix['XREF']+ltvoffx, rrefpix['YREF']+ltvoffy)

    crval1,crval2=R.xy2rd(rref)

    if apply_tdd:
        # Correct zero points for TDD
        tddscale = (R.pscale/fx[1][1])
        rxy0 = N.array([[tdd_xyref[Nrefchip][0]-2048.],[ tdd_xyref[Nrefchip][1]-2048.]])
        xy0 = N.array([[tdd_xyref[chip][0]-2048.], [tdd_xyref[chip][1]-2048.]])
        rv23_corr = N.dot(mrotp,N.dot(tdd,rxy0))*tddscale
        v23_corr = N.dot(mrotp,N.dot(tdd,xy0))*tddscale
    else:
        rv23_corr = N.array([[0],[0]])
        v23_corr = N.array([[0],[0]])

    # Convert the PA_V3 orientation to the orientation at the aperture
    # This is for the reference chip only - we use this for the
    # reference tangent plane definition
    # It has the same orientation as the reference chip
    v2ref = rrefpix['V2REF'] +  rv23_corr[0][0]*0.05
    v3ref = rrefpix['V3REF'] - rv23_corr[1][0]*0.05
    v2 = refpix['V2REF'] + v23_corr[0][0]*0.05
    v3 = refpix['V3REF'] - v23_corr[1][0] *0.05

    pv = wcsutil.troll(pvt,dec,v2ref,v3ref)

    # Add the chip rotation angle
    if rrefpix['THETA']:
        pv += rrefpix['THETA']


    # Set values for the rest of the reference WCS
    R.crval1=crval1
    R.crval2=crval2
    R.crpix1=0.0 + offshiftx
    R.crpix2=0.0 + offshifty

    R_scale=rrefpix['PSCALE']/3600.0
    R.cd11=parity[0][0] *  cos(pv*pi/180.0)*R_scale
    R.cd12=parity[0][0] * -sin(pv*pi/180.0)*R_scale
    R.cd21=parity[1][1] *  sin(pv*pi/180.0)*R_scale
    R.cd22=parity[1][1] *  cos(pv*pi/180.0)*R_scale

    ##print R
    R_cdmat = N.array([[R.cd11,R.cd12],[R.cd21,R.cd22]])

    if not quiet:
        print("  Reference Chip Scale (arcsec/pix): ",rrefpix['PSCALE'])

    # Offset and angle in V2/V3 from reference chip to
    # new chip(s) - converted to reference image pixels

    off = sqrt((v2-v2ref)**2 + (v3-v3ref)**2)/(R_scale*3600.0)

    # Here we must include the PARITY
    if v3 == v3ref:
        theta=0.0
    else:
        theta = atan2(parity[0][0]*(v2-v2ref),parity[1][1]*(v3-v3ref))

    if rrefpix['THETA']: theta += rrefpix['THETA']*pi/180.0

    dX=(off*sin(theta)) + offshiftx
    dY=(off*cos(theta)) + offshifty

    # Check to see whether we are working with GEIS or FITS input
    _fname,_iextn = fileutil.parseFilename(image)

    if _fname.find('.fits') < 0:
        # Input image is NOT a FITS file, so
        #     build a FITS name for it's copy.
        _fitsname = fileutil.buildFITSName(_fname)
    else:
        _fitsname = None
    # Create a new instance of a WCS
    if _fitsname == None:
        _new_name = image
    else:
        _new_name = _fitsname+'['+str(_iextn)+']'

    #New=wcsutil.WCSObject(_new_name,new=yes)
    New = Old.copy()

    # Calculate new CRVALs and CRPIXs
    New.crval1,New.crval2=R.xy2rd((dX,dY))
    New.crpix1=refpix['XREF'] + ltvoffx
    New.crpix2=refpix['YREF'] + ltvoffy

    # Account for subarray offset
    # Angle of chip relative to chip
    if refpix['THETA']:
        dtheta = refpix['THETA'] - rrefpix['THETA']
    else:
        dtheta = 0.0

    # Create a small vector, in reference image pixel scale
    # There is no parity effect here ???
    delXX=fx[1,1]/R_scale/3600.
    delYX=fy[1,1]/R_scale/3600.
    delXY=fx[1,0]/R_scale/3600.
    delYY=fy[1,0]/R_scale/3600.

    # Convert to radians
    rr=dtheta*pi/180.0

    # Rotate the vectors
    dXX= cos(rr)*delXX - sin(rr)*delYX
    dYX= sin(rr)*delXX + cos(rr)*delYX

    dXY= cos(rr)*delXY - sin(rr)*delYY
    dYY= sin(rr)*delXY + cos(rr)*delYY

    # Transform to sky coordinates
    a,b=R.xy2rd((dX+dXX,dY+dYX))
    c,d=R.xy2rd((dX+dXY,dY+dYY))

    # Calculate the new CDs and convert to degrees
    New.cd11=diff_angles(a,New.crval1)*cos(New.crval2*pi/180.0)
    New.cd12=diff_angles(c,New.crval1)*cos(New.crval2*pi/180.0)
    New.cd21=diff_angles(b,New.crval2)
    New.cd22=diff_angles(d,New.crval2)

    # Apply the velocity aberration effect if applicable
    if VA_fac != 1.0:

        # First shift the CRVALs apart
#       New.crval1 = ra_targ + VA_fac*(New.crval1 - ra_targ)
#       New.crval2 = dec_targ + VA_fac*(New.crval2 - dec_targ)
        # First shift the CRVALs apart
        # This is now relative to the reference chip, not the
        # target position.
        New.crval1 = R.crval1 + VA_fac*diff_angles(New.crval1, R.crval1)
        New.crval2 = R.crval2 + VA_fac*diff_angles(New.crval2, R.crval2)

        # and scale the CDs
        New.cd11 = New.cd11*VA_fac
        New.cd12 = New.cd12*VA_fac
        New.cd21 = New.cd21*VA_fac
        New.cd22 = New.cd22*VA_fac

    New_cdmat = N.array([[New.cd11,New.cd12],[New.cd21,New.cd22]])

    # Store new one
    # archive=yes specifies to also write out archived WCS keywords
    # overwrite=no specifies do not overwrite any pre-existing archived keywords

    New.write(fitsname=_new_name,overwrite=no,quiet=quiet,archive=yes)
    if _dqname:
        _dq_iextn = _iextn.replace('sci', dqext.lower())
        _new_dqname = _dqname +'['+_dq_iextn+']'
        dqwcs = wcsutil.WCSObject(_new_dqname)
        dqwcs.write(fitsname=_new_dqname, wcs=New,overwrite=no,quiet=quiet, archive=yes)

    """ Convert distortion coefficients into SIP style
        values and write out to image (assumed to be FITS).
    """
    #First the CD matrix:
    f = refpix['PSCALE']/3600.0
    a = fx[1,1]/3600.0
    b = fx[1,0]/3600.0
    c = fy[1,1]/3600.0
    d = fy[1,0]/3600.0
    det = (a*d - b*c)*refpix['PSCALE']

    # Write to header
    fimg = fileutil.openImage(_new_name,mode='update')
    _new_root,_nextn = fileutil.parseFilename(_new_name)
    _new_extn = fileutil.getExtn(fimg,_nextn)


    # Transform the higher-order coefficients
    for n in range(order+1):
        for m in range(order+1):
            if n >= m and n>=2:

                # Form SIP-style keyword names
                Akey="A_%d_%d" % (m,n-m)
                Bkey="B_%d_%d" % (m,n-m)

                # Assign them values
                Aval= f*(d*fx[n,m]-b*fy[n,m])/det
                Bval= f*(a*fy[n,m]-c*fx[n,m])/det

                _new_extn.header.update(Akey,Aval)
                _new_extn.header.update(Bkey,Bval)

    # Update the SIP flag keywords as well
    #iraf.hedit(image,"CTYPE1","RA---TAN-SIP",verify=no,show=no)
    #iraf.hedit(image,"CTYPE2","DEC--TAN-SIP",verify=no,show=no)
    _new_extn.header.update("CTYPE1","RA---TAN-SIP")
    _new_extn.header.update("CTYPE2","DEC--TAN-SIP")

    # Finally we also need the order
    #iraf.hedit(image,"A_ORDER","%d" % order,add=yes,verify=no,show=no)
    #iraf.hedit(image,"B_ORDER","%d" % order,add=yes,verify=no,show=no)
    _new_extn.header.update("A_ORDER",order)
    _new_extn.header.update("B_ORDER",order)

    # Update header with additional keywords required for proper
    # interpretation of SIP coefficients by PyDrizzle.

    _new_extn.header.update("IDCSCALE",refpix['PSCALE'])
    _new_extn.header.update("IDCV2REF",refpix['V2REF'])
    _new_extn.header.update("IDCV3REF",refpix['V3REF'])
    _new_extn.header.update("IDCTHETA",refpix['THETA'])
    _new_extn.header.update("OCX10",fx[1][0])
    _new_extn.header.update("OCX11",fx[1][1])
    _new_extn.header.update("OCY10",fy[1][0])
    _new_extn.header.update("OCY11",fy[1][1])
    #_new_extn.header.update("TDDXOFF",rv23_corr[0][0] - v23_corr[0][0])
    #_new_extn.header.update("TDDYOFF",-(rv23_corr[1][0] - v23_corr[1][0]))

    # Report time-dependent coeffs, if computed
    if instrument == 'ACS' and detector == 'WFC':
        _new_extn.header.update("TDDALPHA",alpha)
        _new_extn.header.update("TDDBETA",beta)


    # Close image now
    fimg.close()
    del fimg