コード例 #1
0
def applyNpolCorr(fname, unpolcorr):
    """
    Determines whether non-polynomial distortion lookup tables should be added
    as extensions to the science file based on the 'NPOLFILE' keyword in the
    primary header and NPOLEXT kw in the first extension.
    This is a default correction and will always run in the pipeline.
    The file used to generate the extensions is
    recorded in the NPOLEXT keyword in the first science extension.
    If 'NPOLFILE' in the primary header is different from 'NPOLEXT' in the
    extension header and the file exists on disk and is a 'new type' npolfile,
    then the lookup tables will be updated as 'WCSDVARR' extensions.

    Parameters
    ----------
    fname : `~astropy.io.fits.HDUList`
        Input FITS file object.

    """
    applyNPOLCorr = True
    try:
        # get NPOLFILE kw from primary header
        fnpol0 = fname[0].header['NPOLFILE']
        if fnpol0 == 'N/A':
            utils.remove_distortion(fname, "NPOLFILE")
            return False
        fnpol0 = fileutil.osfn(fnpol0)
        if not fileutil.findFile(fnpol0):
            msg = '"NPOLFILE" exists in primary header but file {0} not found.'
            'Non-polynomial distortion correction will not be applied.'.format(fnpol0)
            logger.critical(msg)
            raise IOError("NPOLFILE {0} not found".format(fnpol0))
        try:
            # get NPOLEXT kw from first extension header
            fnpol1 = fname[1].header['NPOLEXT']
            fnpol1 = fileutil.osfn(fnpol1)
            if fnpol1 and fileutil.findFile(fnpol1):
                if fnpol0 != fnpol1:
                    applyNPOLCorr = True
                else:
                    msg = """\n\tNPOLEXT with the same value as NPOLFILE found in first extension.
                             NPOL correction will not be applied."""
                    logger.info(msg)
                    applyNPOLCorr = False
            else:
                # npl file defined in first extension may not be found
                # but if a valid kw exists in the primary header, non-polynomial
                # distortion correction should be applied.
                applyNPOLCorr = True
        except KeyError:
            # the case of "NPOLFILE" kw present in primary header but "NPOLEXT" missing
            # in first extension header
            applyNPOLCorr = True
    except KeyError:
        logger.info('\n\t"NPOLFILE" keyword not found in primary header')
        applyNPOLCorr = False
        return applyNPOLCorr

    if isOldStyleDGEO(fname, fnpol0):
            applyNPOLCorr = False
    return (applyNPOLCorr and unpolcorr)
コード例 #2
0
def applyNpolCorr(fname, unpolcorr):
    """
    Determines whether non-polynomial distortion lookup tables should be added
    as extensions to the science file based on the 'NPOLFILE' keyword in the
    primary header and NPOLEXT kw in the first extension.
    This is a default correction and will always run in the pipeline.
    The file used to generate the extensions is
    recorded in the NPOLEXT keyword in the first science extension.
    If 'NPOLFILE' in the primary header is different from 'NPOLEXT' in the
    extension header and the file exists on disk and is a 'new type' npolfile,
    then the lookup tables will be updated as 'WCSDVARR' extensions.
    """
    applyNPOLCorr = True
    try:
        # get NPOLFILE kw from primary header
        fnpol0 = fits.getval(fname, 'NPOLFILE')
        if fnpol0 == 'N/A':
            utils.remove_distortion(fname, "NPOLFILE")
            return False
        fnpol0 = fileutil.osfn(fnpol0)
        if not fileutil.findFile(fnpol0):
            msg = """\n\tKw "NPOLFILE" exists in primary header but file %s not found
                      Non-polynomial distortion correction will not be applied\n
                    """ % fnpol0
            logger.critical(msg)
            raise IOError("NPOLFILE {0} not found".format(fnpol0))
        try:
            # get NPOLEXT kw from first extension header
            fnpol1 = fits.getval(fname, 'NPOLEXT', ext=1)
            fnpol1 = fileutil.osfn(fnpol1)
            if fnpol1 and fileutil.findFile(fnpol1):
                if fnpol0 != fnpol1:
                    applyNPOLCorr = True
                else:
                    msg = """\n\tNPOLEXT with the same value as NPOLFILE found in first extension.
                             NPOL correction will not be applied."""
                    logger.info(msg)
                    applyNPOLCorr = False
            else:
                # npl file defined in first extension may not be found
                # but if a valid kw exists in the primary header, non-polynomial
                #distortion correction should be applied.
                applyNPOLCorr = True
        except KeyError:
            # the case of "NPOLFILE" kw present in primary header but "NPOLEXT" missing
            # in first extension header
            applyNPOLCorr = True
    except KeyError:
        logger.info('\n\t"NPOLFILE" keyword not found in primary header')
        applyNPOLCorr = False
        return applyNPOLCorr

    if isOldStyleDGEO(fname, fnpol0):
        applyNPOLCorr = False
    return (applyNPOLCorr and unpolcorr)
コード例 #3
0
ファイル: __init__.py プロジェクト: brechmos-stsci/stwcs
def newIDCTAB(fname):
    #When this is called we know there's a kw IDCTAB in the header
    hdul = fits.open(fname)
    idctab = fileutil.osfn(hdul[0].header['IDCTAB'])
    try:
        #check for the presence of IDCTAB in the first extension
        oldidctab = fileutil.osfn(hdul[1].header['IDCTAB'])
    except KeyError:
        return False
    if idctab == oldidctab:
        return False
    else:
        return True
コード例 #4
0
ファイル: __init__.py プロジェクト: jhunkeler/stwcs
def newIDCTAB(fname):
    # When this is called we know there's a kw IDCTAB in the header
    hdul = fits.open(fname)
    idctab = fileutil.osfn(hdul[0].header['IDCTAB'])
    try:
        # check for the presence of IDCTAB in the first extension
        oldidctab = fileutil.osfn(hdul[1].header['IDCTAB'])
    except KeyError:
        return False
    if idctab == oldidctab:
        return False
    else:
        return True
コード例 #5
0
ファイル: hstwcs.py プロジェクト: brechmos-stsci/stwcs
    def readModel(self, update=False, header=None):
        """
        Reads distortion model from IDCTAB.

        If IDCTAB is not found ('N/A', "", or not found on disk), then
        if SIP coefficients and first order IDCTAB coefficients are present
        in the header, restore the idcmodel from the header.
        If not - assign None to self.idcmodel.

        Parameters
        ----------
        header : `astropy.io.fits.Header`
            fits extension header
        update : bool (False)
            if True - record the following IDCTAB quantities as header keywords:
            CX10, CX11, CY10, CY11, IDCSCALE, IDCTHETA, IDCXREF, IDCYREF,
            IDCV2REF, IDCV3REF
        """
        if self.idctab in [None, '', ' ','N/A']:
            #Keyword idctab is not present in header - check for sip coefficients
            if header is not None and 'IDCSCALE' in header:
                self._readModelFromHeader(header)
            else:
                print("Distortion model is not available: IDCTAB=None\n")
                self.idcmodel = None
        elif not os.path.exists(fileutil.osfn(self.idctab)):
            if header is not None and 'IDCSCALE' in header:
                self._readModelFromHeader(header)
            else:
                print('Distortion model is not available: IDCTAB file %s not found\n' % self.idctab)
                self.idcmodel = None
        else:
            self.readModelFromIDCTAB(header=header, update=update)
コード例 #6
0
ファイル: mdzhandler.py プロジェクト: bsugerman/drizzlepac
def getMdriztabParameters(files):
    """ Gets entry in MDRIZTAB where task parameters live.
        This method returns a record array mapping the selected
        row.
    """

    # Get the MDRIZTAB table file name from the primary header.
    # It is gotten from the first file in the input list. No
    # consistency checks are performed.
    _fileName = files[0]
    _header = fileutil.getHeader(_fileName)
    if 'MDRIZTAB' in _header:
        _tableName = _header['MDRIZTAB']
    else:
        raise KeyError("No MDRIZTAB found in file " + _fileName)

    _tableName = fileutil.osfn(_tableName)

    # Now get the filters from the primary header.
    _filters = fileutil.getFilterNames(_header)

    # Specifically check to see whether the MDRIZTAB file can be found
    mtab_path = os.path.split(_tableName)[0] # protect against no path given for _tableName
    if mtab_path and not os.path.exists(mtab_path): # check path first, if given
        raise IOError("Directory for MDRIZTAB '%s' could not be accessed!"%mtab_path)
    if not os.path.exists(_tableName): # then check for the table itself
        raise IOError("MDRIZTAB table '%s' could not be found!"%_tableName)

    # Open MDRIZTAB file.
    try:
        _mdriztab = fits.open(_tableName, memmap=False)
    except:
        raise IOError("MDRIZTAB table '%s' not valid!" % _tableName)

    # Look for matching rows based on filter name. If no
    # match, pick up rows for the default filter.
    _rows = _getRowsByFilter(_mdriztab, _filters)
    if _rows == []:
        _rows = _getRowsByFilter(_mdriztab, 'ANY')

    # Now look for the row that matches the number of images.
    # The logic below assumes that rows for a given filter
    # are arranged in ascending order of the 'numimage' field.
    _nimages = len(files)
    _row = 0
    for i in _rows:
        _numimages = _mdriztab[1].data.field('numimages')[i]
        if _nimages >= _numimages:
            _row = i
    print('- MDRIZTAB: AstroDrizzle parameters read from row %s.'%(_row+1))

    mpars = _mdriztab[1].data[_row]
    _mdriztab.close()

    interpreted = _interpretMdriztabPars(mpars)

    if "staticfile" in interpreted:
        interpreted.pop("staticfile")

    return interpreted
コード例 #7
0
ファイル: hstwcs.py プロジェクト: sean-lockwood/stwcs
    def readModel(self, update=False, header=None):
        """
        Reads distortion model from IDCTAB.

        If IDCTAB is not found ('N/A', "", or not found on disk), then
        if SIP coefficients and first order IDCTAB coefficients are present
        in the header, restore the idcmodel from the header.
        If not - assign None to self.idcmodel.

        Parameters
        ----------
        header : `astropy.io.fits.Header`
            fits extension header
        update : bool (False)
            if True - record the following IDCTAB quantities as header keywords:
            CX10, CX11, CY10, CY11, IDCSCALE, IDCTHETA, IDCXREF, IDCYREF,
            IDCV2REF, IDCV3REF
        """
        if self.idctab in [None, '', ' ', 'N/A']:
            # Keyword idctab is not present in header - check for sip coefficients
            if header is not None and 'IDCSCALE' in header:
                self._readModelFromHeader(header)
            else:
                print("Distortion model is not available: IDCTAB=None\n")
                self.idcmodel = None
        elif not os.path.exists(fileutil.osfn(self.idctab)):
            if header is not None and 'IDCSCALE' in header:
                self._readModelFromHeader(header)
            else:
                print(
                    'Distortion model is not available: IDCTAB file %s not found\n'
                    % self.idctab)
                self.idcmodel = None
        else:
            self.readModelFromIDCTAB(header=header, update=update)
コード例 #8
0
def isSupportedFilter(hdr):
    idc = hdr['idctab']
    idcname = fileutil.osfn(idc)
    filter1 = hdr['FILTNAM1']
    filter2 = hdr['FILTNAM2']

    try:
        idctab = fileutil.openImage(idcname)
    except:
        raise IOError

    if idctab[1].columns.names.count(
            'FILTER1') > 0 and idctab[1].columns.names.count('FILTER2') > 0:
        # 2 filter IDCTAB, all filter modes should be supported
        val = True
    else:
        # 1 filter IDCTAB, check to see whether it is a supported filter and
        # that input is not a 2 filter observation
        filters = idctab[1].data.field('FILTER')

        if filter1 not in filters or filter2.strip():
            val = False
        else:
            val = True

    idctab.close()

    return val
コード例 #9
0
def foundIDCTAB(fname):
    """
    This functions looks for an "IDCTAB" keyword in the primary header.

    Parameters
    ----------
    fname : `~astropy.io.fits.HDUList`
        Input FITS file object.

    Returns
    -------
    status : bool
        If False : MakeWCS, CompSIP and TDDCorr should not be applied.
        If True : there's no restriction on corrections, they all should be applied.

    Raises
    ------
    IOError : If IDCTAB file not found on disk.
    """

    try:
        idctab = fname[0].header['IDCTAB'].strip()
        if idctab == 'N/A' or idctab == "":
            return False
    except KeyError:
        return False
    idctab = fileutil.osfn(idctab)
    if os.path.exists(idctab):
        return True
    else:
        raise IOError("IDCTAB file {0} not found".format(idctab))
コード例 #10
0
def foundIDCTAB(fname):
    """
    This functions looks for an "IDCTAB" keyword in the primary header.

    Parameters
    ----------
    fname : `~astropy.io.fits.HDUList`
        Input FITS file object.

    Returns
    -------
    status : bool
        If False : MakeWCS, CompSIP and TDDCorr should not be applied.
        If True : there's no restriction on corrections, they all should be applied.

    Raises
    ------
    IOError : If IDCTAB file not found on disk.
    """

    try:
        idctab = fname[0].header['IDCTAB'].strip()
        if idctab == 'N/A' or idctab == "":
            return False
    except KeyError:
        return False
    idctab = fileutil.osfn(idctab)
    if os.path.exists(idctab):
        return True
    else:
        raise IOError("IDCTAB file {0} not found".format(idctab))
コード例 #11
0
def getMdriztabParameters(files):
    """ Gets entry in MDRIZTAB where task parameters live.
        This method returns a record array mapping the selected
        row.
    """

    # Get the MDRIZTAB table file name from the primary header.
    # It is gotten from the first file in the input list. No
    # consistency checks are performed.
    _fileName = files[0]
    _header = fileutil.getHeader(_fileName)
    if 'MDRIZTAB' in _header:
        _tableName = _header['MDRIZTAB']
    else:
        raise KeyError("No MDRIZTAB found in file " + _fileName)

    _tableName = fileutil.osfn(_tableName)

    # Now get the filters from the primary header.
    _filters = fileutil.getFilterNames(_header)

    # Specifically check to see whether the MDRIZTAB file can be found
    mtab_path = os.path.split(_tableName)[0] # protect against no path given for _tableName
    if mtab_path and not os.path.exists(mtab_path): # check path first, if given
        raise IOError("Directory for MDRIZTAB '%s' could not be accessed!"%mtab_path)
    if not os.path.exists(_tableName): # then check for the table itself
        raise IOError("MDRIZTAB table '%s' could not be found!"%_tableName)

    # Open MDRIZTAB file.
    try:
        _mdriztab = fits.open(_tableName)
    except:
        raise IOError("MDRIZTAB table '%s' not valid!" % _tableName)

    # Look for matching rows based on filter name. If no
    # match, pick up rows for the default filter.
    _rows = _getRowsByFilter(_mdriztab, _filters)
    if _rows == []:
        _rows = _getRowsByFilter(_mdriztab, 'ANY')

    # Now look for the row that matches the number of images.
    # The logic below assumes that rows for a given filter
    # are arranged in ascending order of the 'numimage' field.
    _nimages = len(files)
    _row = 0
    for i in _rows:
        _numimages = _mdriztab[1].data.field('numimages')[i]
        if _nimages >= _numimages:
            _row = i
    print('- MDRIZTAB: AstroDrizzle parameters read from row %s.'%(_row+1))

    mpars = _mdriztab[1].data[_row]
    _mdriztab.close()

    interpreted = _interpretMdriztabPars(mpars)

    if "staticfile" in interpreted:
        interpreted.pop("staticfile")

    return interpreted
コード例 #12
0
def applyTDDCorr(fname, utddcorr):
    """
    The default value of tddcorr for all ACS images is True.
    This correction will be performed if all conditions below are True:
    - the user did not turn it off on the command line
    - the detector is WFC
    - the idc table specified in the primary header is available.
    """

    phdr = fits.getheader(fname)
    instrument = phdr['INSTRUME']
    try:
        detector = phdr['DETECTOR']
    except KeyError:
        detector = None
    try:
        tddswitch = phdr['TDDCORR']
    except KeyError:
        tddswitch = 'PERFORM'

    if instrument == 'ACS' and detector == 'WFC' and utddcorr == True and tddswitch == 'PERFORM':
        tddcorr = True
        try:
            idctab = phdr['IDCTAB']
        except KeyError:
            tddcorr = False
            #print "***IDCTAB keyword not found - not applying TDD correction***\n"
        if os.path.exists(fileutil.osfn(idctab)):
            tddcorr = True
        else:
            tddcorr = False
            #print "***IDCTAB file not found - not applying TDD correction***\n"
    else:
        tddcorr = False
    return tddcorr
コード例 #13
0
def applyTDDCorr(fname, utddcorr):
    """
    The default value of tddcorr for all ACS images is True.
    This correction will be performed if all conditions below are True:
    - the user did not turn it off on the command line
    - the detector is WFC
    - the idc table specified in the primary header is available.
    """

    phdr = fits.getheader(fname)
    instrument = phdr['INSTRUME']
    try:
        detector = phdr['DETECTOR']
    except KeyError:
        detector = None
    try:
        tddswitch = phdr['TDDCORR']
    except KeyError:
        tddswitch = 'PERFORM'

    if instrument == 'ACS' and detector == 'WFC' and utddcorr == True and tddswitch == 'PERFORM':
        tddcorr = True
        try:
            idctab = phdr['IDCTAB']
        except KeyError:
            tddcorr = False
            #print "***IDCTAB keyword not found - not applying TDD correction***\n"
        if os.path.exists(fileutil.osfn(idctab)):
            tddcorr = True
        else:
            tddcorr = False
            #print "***IDCTAB file not found - not applying TDD correction***\n"
    else:
        tddcorr = False
    return tddcorr
コード例 #14
0
ファイル: tweakutils.py プロジェクト: bsugerman/drizzlepac
def parse_exclusions(exclusions):
    """ Read in exclusion definitions from file named by 'exclusions'
        and return a list of positions and distances
    """
    fname = fileutil.osfn(exclusions)
    if os.path.exists(fname):
        fobj = open(fname)
        flines = fobj.readlines()
        fobj.close()
    else:
        print('No valid exclusions file "',fname,'" could be found!')
        print('Skipping application of exclusions files to source catalogs.')
        return None

    # Parse out lines which can be interpreted as positions and distances
    exclusion_list = []
    units = None
    for line in flines:
        if line[0] == '#' or 'global' in line[:6]:
            continue
        # Only interpret the part of the line prior to the comment
        # if a comment has been attached to the line
        if '#' in line:
            line = line.split('#')[0].rstrip()

        if units is None:
            units='pixels'
            if line[:3] in ['fk4','fk5','sky']:
                units = 'sky'
            if line[:5] in ['image','physi','pixel']:
                units = 'pixels'
            continue

        if 'circle(' in line:
            nline = line.replace('circle(','')
            nline = nline.replace(')','')
            nline = nline.replace('"','')
            vals = nline.split(',')
            if ':' in vals[0]:
                posval = vals[0]+' '+vals[1]
            else:
                posval = (float(vals[0]),float(vals[1]))
        else:
            # Try to interpret unformatted line
            if ',' in line:
                split_tok = ','
            else:
                split_tok=' '
            vals = line.split(split_tok)
            if len(vals) == 3:
                if ':' in vals[0]:
                    posval = vals[0]+' '+vals[1]
                else:
                    posval = (float(vals[0]),float(vals[1]))
            else:
                continue
        exclusion_list.append({'pos':posval,'distance':float(vals[2]),
                                    'units':units})
    return exclusion_list
コード例 #15
0
def parse_exclusions(exclusions):
    """ Read in exclusion definitions from file named by 'exclusions'
        and return a list of positions and distances
    """
    fname = fileutil.osfn(exclusions)
    if os.path.exists(fname):
        fobj = open(fname)
        flines = fobj.readlines()
        fobj.close()
    else:
        print('No valid exclusions file "',fname,'" could be found!')
        print('Skipping application of exclusions files to source catalogs.')
        return None

    # Parse out lines which can be interpreted as positions and distances
    exclusion_list = []
    units = None
    for line in flines:
        if line[0] == '#' or 'global' in line[:6]:
            continue
        # Only interpret the part of the line prior to the comment
        # if a comment has been attached to the line
        if '#' in line:
            line = line.split('#')[0].rstrip()

        if units is None:
            units='pixels'
            if line[:3] in ['fk4','fk5','sky']:
                units = 'sky'
            if line[:5] in ['image','physi','pixel']:
                units = 'pixels'
            continue

        if 'circle(' in line:
            nline = line.replace('circle(','')
            nline = nline.replace(')','')
            nline = nline.replace('"','')
            vals = nline.split(',')
            if ':' in vals[0]:
                posval = vals[0]+' '+vals[1]
            else:
                posval = (float(vals[0]),float(vals[1]))
        else:
            # Try to interpret unformatted line
            if ',' in line:
                split_tok = ','
            else:
                split_tok=' '
            vals = line.split(split_tok)
            if len(vals) == 3:
                if ':' in vals[0]:
                    posval = vals[0]+' '+vals[1]
                else:
                    posval = (float(vals[0]),float(vals[1]))
            else:
                continue
        exclusion_list.append({'pos':posval,'distance':float(vals[2]),
                                    'units':units})
    return exclusion_list
コード例 #16
0
ファイル: updatehdr.py プロジェクト: geoffcfchen/drizzlepac
def update_from_shiftfile(shiftfile,wcsname=None,force=False):
    """
    Update headers of all images specified in shiftfile with shifts
    from shiftfile.

    Parameters
    ----------
    shiftfile : str
        Filename of shiftfile.

    wcsname : str
        Label to give to new WCS solution being created by this fit. If
        a value of None is given, it will automatically use 'TWEAK' as the
        label. [Default =None]

    force : bool
        Update header even though WCS already exists with this solution or
        wcsname? [Default=False]

    """
    f = open(fileutil.osfn(shiftfile))
    shift_lines = [x.strip() for x in f.readlines()]
    f.close()

    # interpret header of shift file
    for line in shift_lines:
        if 'refimage' in line or 'reference' in line:
            refimage = line.split(':')[-1]
            refimage = refimage[:refimage.find('[wcs]')].lstrip()
            break

    # Determine the max length in the first column (filenames)
    fnames = []
    for row in shift_lines:
        if row[0] == '#': continue
        fnames.append(len(row.split(' ')[0]))
    fname_fmt = 'S{0}'.format(max(fnames))

    # Now read in numerical values from shiftfile
    type_list = {'names':('fnames','xsh','ysh','rot','scale','xrms','yrms'),
                 'formats':(fname_fmt,'f4','f4','f4','f4','f4','f4')}
    try:
        sdict = np.loadtxt(shiftfile,dtype=type_list,unpack=False)
    except IndexError:
        tlist = {'names':('fnames','xsh','ysh','rot','scale'),
                     'formats':(fname_fmt,'f4','f4','f4','f4')}
        s = np.loadtxt(shiftfile,dtype=tlist,unpack=False)
        sdict = np.zeros([s['fnames'].shape[0],],dtype=type_list)
        for sname in s.dtype.names:
            sdict[sname] = s[sname]

    for img in sdict:
        updatewcs_with_shift(img['fnames'], refimage, wcsname=wcsname,
                rot=img['rot'], scale=img['scale'],
                xsh=img['xsh'], ysh=img['ysh'],
                xrms=img['xrms'], yrms=img['yrms'],
                force=force)
コード例 #17
0
ファイル: updatehdr.py プロジェクト: srodney/drizzlepac
def update_from_shiftfile(shiftfile, wcsname=None, force=False):
    """
    Update headers of all images specified in shiftfile with shifts
    from shiftfile.

    Parameters
    ----------
    shiftfile : str
        Filename of shiftfile.

    wcsname : str
        Label to give to new WCS solution being created by this fit. If
        a value of None is given, it will automatically use 'TWEAK' as the
        label. [Default =None]

    force : bool
        Update header even though WCS already exists with this solution or
        wcsname? [Default=False]

    """
    with open(fileutil.osfn(shiftfile)) as f:
        lines = f.readlines()

    refimage = None
    shift_info = {}

    for line in lines:
        line = line.strip()
        if not line or line.startswith('#'):
            continue

        if refimage is not None and ('refimage' in line
                                     or 'reference' in line):
            refimage = (line.split(':')[-1]).strip()
            idx = refimage.find('[wcs]')
            if idx >= 0:
                refimage = refimage[:idx].lstrip()
            continue

        cols = list(map(str.strip, line.split()))

        if len(cols) not in [5, 7]:
            raise ValueError("Unsupported shift file format: invalid number "
                             "of columns.")

        shift_info[cols[0]] = {
            k: float(v)
            for k, v in zip(_SHIFT_COLNAMES, cols[1:])
        }

    for filename, pars in shift_info:
        updatewcs_with_shift(filename,
                             refimage,
                             wcsname=wcsname,
                             force=force,
                             **pars)
コード例 #18
0
ファイル: stisData.py プロジェクト: sosey/drizzlepac
    def getflat(self, chip):
        """
        Method for retrieving a detector's flat field. For STIS there are three.
        This method will return an array the same shape as the image.

        """
        sci_chip = self._image[self.scienceExt, chip]
        exten = self.errExt + ',' + str(chip)

        # The keyword for STIS flat fields in the primary header of the flt

        lflatfile = fileutil.osfn(self._image["PRIMARY"].header['LFLTFILE'])
        pflatfile = fileutil.osfn(self._image["PRIMARY"].header['PFLTFILE'])

        # Try to open the file in the location specified by LFLTFILE.
        try:
            handle = fileutil.openImage(lflatfile,
                                        mode='readonly',
                                        memmap=False)
            hdu = fileutil.getExtn(handle, extn=exten)
            lfltdata = hdu.data
            if lfltdata.shape != self.full_shape:
                lfltdata = interp2d.expand2d(lfltdata, self.full_shape)
        except IOError:
            lfltdata = np.ones(self.full_shape, dtype=sci_chip.data.dtype)
            print("Cannot find file '{:s}'. Treating flatfield constant value "
                  "of '1'.\n".format(lflatfile))

        # Try to open the file in the location specified by PFLTFILE.
        try:
            handle = fileutil.openImage(pflatfile,
                                        mode='readonly',
                                        memmap=False)
            hdu = fileutil.getExtn(handle, extn=exten)
            pfltdata = hdu.data
        except IOError:
            pfltdata = np.ones(self.full_shape, dtype=sci_chip.data.dtype)
            print("Cannot find file '{:s}'. Treating flatfield constant value "
                  "of '1'.\n".format(pflatfile))

        flat = lfltdata * pfltdata

        return flat
コード例 #19
0
ファイル: drutil.py プロジェクト: spacetelescope/pydrizzle
def getIDCFile(image,keyword="",directory=None):
    # Open the primary header of the file and read the name of
    # the IDCTAB.
    # Parameters:
    #   header -  primary and extension header object to read IDCTAB info
    #
    #   keyword(optional) - header keyword with name of IDCTAB
    #            --OR-- 'HEADER' if need to build IDCTAB name from scratch
    #           (default value: 'IDCTAB')
    #   directory(optional) - directory with default drizzle coeffs tables
    #                   (default value: 'drizzle$coeffs')
    #
    # This function needs to be generalized to support
    # keyword='HEADER', as would be the case for WFPC2 data.
    #


    if type(image) == type(''):
        # We were provided an image name, so read in the header...
        header = fileutil.getHeader(image)
    else:
        # otherwise, we were provided an image header we can work with directly
        header = image

    if keyword.lower() == 'header':
        idcfile,idctype = __getIDCTAB(header)
        if (idcfile == None):
            idcfile,idctype = __buildIDCTAB(header,directory)

    elif keyword.lower() == 'idctab':
        # keyword specifies header keyword with IDCTAB name
        idcfile,idctype = __getIDCTAB(header)

    elif keyword == '':
        idcfile = None
        idctype = None
    else:
        # Need to build IDCTAB filename from scratch
        idcfile,idctype = __buildIDCTAB(header,directory,kw = keyword)

    # Account for possible absence of IDCTAB name in header
    if idcfile == 'N/A':
        idcfile = None

    if idcfile != None and idcfile != '':
        # Now we need to recursively expand any IRAF symbols to full paths...
        #if directory:
        idcfile = fileutil.osfn(idcfile)


    if idcfile == None:
        print('WARNING: No valid distortion coefficients available!')
        print('Using default unshifted, unscaled, unrotated model.')

    return idcfile,idctype
コード例 #20
0
ファイル: stisData.py プロジェクト: brechmos-stsci/drizzlepac
    def getflat(self,chip):
        """
        Method for retrieving a detector's flat field. For STIS there are three.
        This method will return an array the same shape as the image.

        """
        sci_chip = self._image[self.scienceExt,chip]
        exten = self.errExt+','+str(chip)

        # The keyword for STIS flat fields in the primary header of the flt

        lflatfile = fileutil.osfn(self._image["PRIMARY"].header['LFLTFILE'])
        pflatfile = fileutil.osfn(self._image["PRIMARY"].header['PFLTFILE'])

        # Try to open the file in the location specified by LFLTFILE.
        try:
            handle = fileutil.openImage(lflatfile,mode='readonly',memmap=0)
            hdu = fileutil.getExtn(handle,extn=exten)
            lfltdata = hdu.data
            if lfltdata.shape != self.full_shape:
                lfltdata = interp2d.expand2d(lfltdata,self.full_shape)
        except:
            lfltdata = np.ones(self.full_shape,dtype=sci_chip.image_dtype)
            str = "Cannot find file "+filename+".  Treating flatfield constant value of '1'.\n"
            print(str)

        # Try to open the file in the location specified by PFLTFILE.
        try:
            handle = fileutil.openImage(pflatfile,mode='readonly',memmap=0)
            hdu = fileutil.getExtn(handle,extn=exten)
            pfltdata = hdu.data
        except:
            pfltdata = np.ones(self.image_shape,dtype=sci_chip.image_dtype)
            str = "Cannot find file "+filename+".  Treating flatfield constant value of '1'.\n"
            print(str)

        print("lfltdata shape: ",lfltdata.shape)
        print("pfltdata shape: ",pfltdata.shape)
        flat = lfltdata * pfltdata

        return flat
コード例 #21
0
    def getflat(self, chip, flat_file=None, flat_ext=None):
        """
        Method for retrieving a detector's flat field.

        Parameters
        ----------
        chip : int
            Chip number. Same as FITS ``EXTVER``.

        flat_file : str, None
            Flat field file name. If not specified, it will be determined
            automatically from image header.

        flat_ext : str, None
            Flat field extension name (same as FITS ``EXTNAME``). Specifies
            extension name containing flat field data.

        Returns
        -------
        flat : numpy.ndarray
            The flat-field array in the same shape as the input image.

        """
        # For the WFPC2 flat we need to invert
        # for use in Multidrizzle
        if flat_file is None:
            filename = fileutil.osfn(
                self._image["PRIMARY"].header[self.flatkey])
            if filename in WFPC2InputImage.flat_file_map:
                flat_file, mef_flat_ext = WFPC2InputImage.flat_file_map[
                    filename]
            else:
                h = fileutil.openImage(filename, mode='readonly', memmap=False)
                flat_file = h.filename()
                mef_flat_ext = h[0].header.get('FILETYPE', '')
                mef_flat_ext = h[1].header.get('EXTNAME', mef_flat_ext)
                h.close()
                WFPC2InputImage.flat_file_map[filename] = (flat_file,
                                                           mef_flat_ext)
            if flat_ext is None:
                flat_ext = mef_flat_ext

        elif flat_ext is None:
            h = fileutil.openImage(flat_file,
                                   mode='readonly',
                                   memmap=False,
                                   writefits=False)
            flat_ext = h[0].header.get('FILETYPE', '')
            flat_ext = h[1].header.get('EXTNAME', flat_ext)
            h.close()

        flat = 1.0 / super().getflat(chip, flat_file, flat_ext)
        return flat
コード例 #22
0
 def __init__(self, file, kwinfo):
     self._opt_elem = kwinfo['opt_elem']
     self._cenwave = kwinfo['cenwave']
     self._sporder = kwinfo['sporder']
     self._nelem = None
     self._a2displ = None
     self._a1center = None
     self._a2center = None
     self._snr_thresh = None
     self._pedigree = None
     self.sptrctabname = kwinfo['sptrctab']
     self.sptrctab = self.openTraceFile(fu.osfn(self.sptrctabname))
コード例 #23
0
ファイル: mktrace.py プロジェクト: spacetelescope/stistools
 def __init__(self, file, kwinfo):
     self._opt_elem = kwinfo['opt_elem']
     self._cenwave = kwinfo['cenwave']
     self._sporder = kwinfo['sporder']
     self._nelem = None
     self._a2displ = None
     self._a1center = None
     self._a2center = None
     self._snr_thresh = None
     self._pedigree = None
     self.sptrctabname = kwinfo['sptrctab']
     self.sptrctab = self.openTraceFile(fu.osfn(self.sptrctabname))
コード例 #24
0
ファイル: updatewcs.py プロジェクト: brechmos-stsci/stwcs
def run(configObj=None):

    # Interpret primary parameters from configObj instance
    extname = configObj['extname']
    input = configObj['input']

    # create dictionary of remaining parameters, deleting extraneous ones
    # such as those above
    cdict = configObj.dict()
    # remove any rules defined for the TEAL interface
    if "_RULES_" in cdict: del cdict['_RULES_']
    del cdict['_task_name_']
    del cdict['input']
    del cdict['extname']

    # parse input
    input, altfiles = parseinput.parseinput(configObj['input'])

    # Insure that all input files have a correctly archived
    #    set of OPUS WCS keywords
    # Legacy files from OTFR, like all WFPC2 data from OTFR, will only
    #   have the OPUS WCS keywords archived using a prefix of 'O'
    # These keywords need to be converted to the Paper I alternate WCS
    #   standard using a wcskey (suffix) of 'O'
    # If an alternate WCS with wcskey='O' already exists, this will copy
    #   the values from the old prefix-'O' WCS keywords to insure the correct
    #   OPUS keyword values get archived for use with updatewcs.
    #
    for file in input:
        # Check to insure that there is a valid reference file to be used
        idctab = fits.getval(file, 'idctab')
        if not os.path.exists(fileutil.osfn(idctab)):
            print('No valid distortion reference file ', idctab, ' found in ',
                  file, '!')
            raise ValueError

    # Re-define 'cdict' to only have switches for steps supported by that instrument
    # the set of supported steps are defined by the dictionary
    #    updatewcs.apply_corrections.allowed_corrections
    #
    for file in input:
        # get instrument name from input file
        instr = fits.getval(file, 'INSTRUME')
        # make copy of input parameters dict for this file
        fdict = cdict.copy()
        # Remove any parameter that is not part of this instrument's allowed corrections
        for step in allowed_corr_dict:
            if allowed_corr_dict[
                    step] not in updatewcs.apply_corrections.allowed_corrections[
                        instr]:
                fdict[step]
        # Call 'updatewcs' on correctly archived file
        updatewcs.updatewcs(file, **fdict)
コード例 #25
0
    def applyNPOLCorr(cls, fobj):
        """
        For each science extension in a fits file object:
            - create a WCSDVARR extension
            - update science header
            - add/update NPOLEXT keyword
        """
        nplfile = fileutil.osfn(fobj[0].header['NPOLFILE'])
        # Map WCSDVARR EXTVER numbers to extension numbers
        wcsdvarr_ind = cls.getWCSIndex(fobj)
        for ext in fobj:
            try:
                extname = ext.header['EXTNAME'].lower()
            except KeyError:
                continue
            if extname == 'sci':
                extversion = ext.header['EXTVER']
                ccdchip = cls.get_ccdchip(fobj,
                                          extname='SCI',
                                          extver=extversion)
                header = ext.header
                # get the data arrays from the reference file and transform
                # them for use with SIP
                dx, dy = cls.getData(nplfile, ccdchip)
                idccoeffs = cls.getIDCCoeffs(header)

                if idccoeffs is not None:
                    dx, dy = cls.transformData(dx, dy, idccoeffs)

                # Determine EXTVER for the WCSDVARR extension from the
                # NPL file (EXTNAME, EXTVER) kw.
                # This is used to populate DPj.EXTVER kw
                wcsdvarr_x_version = 2 * extversion - 1
                wcsdvarr_y_version = 2 * extversion
                for ename in zip(['DX', 'DY'],
                                 [wcsdvarr_x_version, wcsdvarr_y_version],
                                 [dx, dy]):
                    error_val = ename[2].max()
                    cls.addSciExtKw(header,
                                    wdvarr_ver=ename[1],
                                    npol_extname=ename[0],
                                    error_val=error_val)
                    hdu = cls.createNpolHDU(header,
                                            npolfile=nplfile,
                                            wdvarr_ver=ename[1],
                                            npl_extname=ename[0],
                                            data=ename[2],
                                            ccdchip=ccdchip)
                    if wcsdvarr_ind:
                        fobj[wcsdvarr_ind[ename[1]]] = hdu
                    else:
                        fobj.append(hdu)
コード例 #26
0
def foundIDCTAB(idctab):
    idctab_found = True
    try:
        idctab = fileutil.osfn(idctab)
        if idctab == 'N/A' or idctab == "":
            idctab_found = False
        if os.path.exists(idctab):
            idctab_found = True
        else:
            idctab_found = False
    except KeyError:
        idctab_found = False
    return idctab_found
コード例 #27
0
ファイル: utils.py プロジェクト: brechmos-stsci/stwcs
def foundIDCTAB(idctab):
    idctab_found = True
    try:
        idctab = fileutil.osfn(idctab)
        if idctab == 'N/A' or idctab == "":
            idctab_found = False
        if os.path.exists(idctab):
            idctab_found = True
        else:
            idctab_found = False
    except KeyError:
        idctab_found = False
    return idctab_found
コード例 #28
0
ファイル: imageObject.py プロジェクト: srodney/drizzlepac
    def getflat(self, chip):
        """
        Method for retrieving a detector's flat field.

        Returns
        -------
        flat: array
            This method will return an array the same shape as the image in
            **units of electrons**.

        """
        sci_chip = self._image[self.scienceExt, chip]
        # The keyword for ACS flat fields in the primary header of the flt
        # file is pfltfile.  This flat file is already in the required
        # units of electrons.

        # The use of fileutil.osfn interprets any environment variable, such as
        # jref$, used in the specification of the reference filename
        filename = fileutil.osfn(self._image["PRIMARY"].header[self.flatkey])
        hdulist = None
        try:
            hdulist = fileutil.openImage(filename,
                                         mode='readonly',
                                         memmap=False)
            data = hdulist[(self.scienceExt, chip)].data

            if data.shape[0] != sci_chip.image_shape[0]:
                ltv2 = int(np.round(sci_chip.ltv2))
            else:
                ltv2 = 0
            size2 = sci_chip.image_shape[0] + ltv2

            if data.shape[1] != sci_chip.image_shape[1]:
                ltv1 = int(np.round(sci_chip.ltv1))
            else:
                ltv1 = 0
            size1 = sci_chip.image_shape[1] + ltv1

            flat = data[ltv2:size2, ltv1:size1]

        except FileNotFoundError:
            flat = np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype)
            log.warning("Cannot find flat field file '{}'".format(filename))
            log.warning("Treating flatfield as a constant value of '1'.")

        finally:
            if hdulist is not None:
                hdulist.close()

        return flat
コード例 #29
0
def applyD2ImCorr(fname, d2imcorr):
    applyD2IMCorr = True
    try:
        # get D2IMFILE kw from primary header
        fd2im0 = fits.getval(fname, 'D2IMFILE')
        if fd2im0 == 'N/A':
            utils.remove_distortion(fname, "D2IMFILE")
            return False
        fd2im0 = fileutil.osfn(fd2im0)
        if not fileutil.findFile(fd2im0):
            msg = """\n\tKw D2IMFILE exists in primary header but file %s not found\n
                     Detector to image correction will not be applied\n""" % fd2im0
            logger.critical(msg)
            print(msg)
            raise IOError("D2IMFILE {0} not found".format(fd2im0))
        try:
            # get D2IMEXT kw from first extension header
            fd2imext = fits.getval(fname, 'D2IMEXT', ext=1)
            fd2imext = fileutil.osfn(fd2imext)
            if fd2imext and fileutil.findFile(fd2imext):
                if fd2im0 != fd2imext:
                    applyD2IMCorr = True
                else:
                    applyD2IMCorr = False
            else:
                # D2IM file defined in first extension may not be found
                # but if a valid kw exists in the primary header,
                # detector to image correction should be applied.
                applyD2IMCorr = True
        except KeyError:
            # the case of D2IMFILE kw present in primary header but D2IMEXT missing
            # in first extension header
            applyD2IMCorr = True
    except KeyError:
        print('D2IMFILE keyword not found in primary header')
        applyD2IMCorr = False
        return applyD2IMCorr
コード例 #30
0
def applyD2ImCorr(fname, d2imcorr):
    applyD2IMCorr = True
    try:
        # get D2IMFILE kw from primary header
        fd2im0 = fits.getval(fname, 'D2IMFILE')
        if fd2im0 == 'N/A':
            utils.remove_distortion(fname, "D2IMFILE")
            return False
        fd2im0 = fileutil.osfn(fd2im0)
        if not fileutil.findFile(fd2im0):
            msg = """\n\tKw D2IMFILE exists in primary header but file %s not found\n
                     Detector to image correction will not be applied\n""" % fd2im0
            logger.critical(msg)
            print(msg)
            raise IOError("D2IMFILE {0} not found".format(fd2im0))
        try:
            # get D2IMEXT kw from first extension header
            fd2imext = fits.getval(fname, 'D2IMEXT', ext=1)
            fd2imext = fileutil.osfn(fd2imext)
            if fd2imext and fileutil.findFile(fd2imext):
                if fd2im0 != fd2imext:
                    applyD2IMCorr = True
                else:
                    applyD2IMCorr = False
            else:
                # D2IM file defined in first extension may not be found
                # but if a valid kw exists in the primary header,
                # detector to image correction should be applied.
                applyD2IMCorr = True
        except KeyError:
            # the case of D2IMFILE kw present in primary header but D2IMEXT missing
            # in first extension header
            applyD2IMCorr = True
    except KeyError:
        print('D2IMFILE keyword not found in primary header')
        applyD2IMCorr = False
        return applyD2IMCorr
コード例 #31
0
ファイル: imageObject.py プロジェクト: jhunkeler/drizzlepac
    def getflat(self, chip):
        """
        Method for retrieving a detector's flat field.

        Returns
        -------
        flat: array
            This method will return an array the same shape as the image in
            **units of electrons**.

        """
        sci_chip = self._image[self.scienceExt, chip]
        # The keyword for ACS flat fields in the primary header of the flt
        # file is pfltfile.  This flat file is already in the required
        # units of electrons.

        # The use of fileutil.osfn interprets any environment variable, such as
        # jref$, used in the specification of the reference filename
        filename = fileutil.osfn(self._image["PRIMARY"].header[self.flatkey])
        hdulist = None
        try:
            hdulist = fileutil.openImage(filename, mode='readonly',
                                         memmap=False)
            data = hdulist[(self.scienceExt, chip)].data

            if data.shape[0] != sci_chip.image_shape[0]:
                ltv2 = int(np.round(sci_chip.ltv2))
            else:
                ltv2 = 0
            size2 = sci_chip.image_shape[0] + ltv2

            if data.shape[1] != sci_chip.image_shape[1]:
                ltv1 = int(np.round(sci_chip.ltv1))
            else:
                ltv1 = 0
            size1 = sci_chip.image_shape[1] + ltv1

            flat = data[ltv2:size2, ltv1:size1]

        except FileNotFoundError:
            flat = np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype)
            log.warning("Cannot find flat field file '{}'".format(filename))
            log.warning("Treating flatfield as a constant value of '1'.")

        finally:
            if hdulist is not None:
                hdulist.close()

        return flat
コード例 #32
0
    def getflat(self, chip):
        """
        Method for retrieving a detector's flat field.

        Returns
        -------
        flat: array
            This method will return an array the same shape as the image in
            **units of electrons**.

        """

        sci_chip = self._image[self.scienceExt, chip]
        exten = '%s,%d' % (self.scienceExt, chip)
        # The keyword for ACS flat fields in the primary header of the flt
        # file is pfltfile.  This flat file is already in the required
        # units of electrons.

        # The use of fileutil.osfn interprets any environment variable, such as jref$,
        # used in the specification of the reference filename
        filename = fileutil.osfn(self._image["PRIMARY"].header[self.flatkey])

        try:
            handle = fileutil.openImage(filename,
                                        mode='readonly',
                                        memmap=False)
            hdu = fileutil.getExtn(handle, extn=exten)
            if hdu.data.shape[0] != sci_chip.image_shape[0]:
                _ltv2 = np.round(sci_chip.ltv2)
            else:
                _ltv2 = 0
            _size2 = sci_chip.image_shape[0] + _ltv2
            if hdu.data.shape[1] != sci_chip.image_shape[1]:
                _ltv1 = np.round(sci_chip.ltv1)
            else:
                _ltv1 = 0
            _size1 = sci_chip.image_shape[1] + _ltv1

            data = hdu.data[_ltv2:_size2, _ltv1:_size1]
            handle.close()
        except:
            data = np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype)
            log.warning("Cannot find file %s.\n    Treating flatfield "
                        "constant value of '1'." % filename)
        flat = data
        return flat
コード例 #33
0
 def applyDet2ImCorr(cls, fobj):
     """
     For each science extension in a fits file object:
         - create a WCSDVARR extension
         - update science header
         - add/update D2IMEXT keyword
     """
     d2imfile = fileutil.osfn(fobj[0].header['D2IMFILE'])
     # Map D2IMARR EXTVER numbers to FITS extension numbers
     wcsdvarr_ind = cls.getWCSIndex(fobj)
     d2im_num_ext = 1
     for ext in fobj:
         try:
             extname = ext.header['EXTNAME'].lower()
         except KeyError:
             continue
         if extname == 'sci':
             extversion = ext.header['EXTVER']
             ccdchip = cls.get_ccdchip(fobj,
                                       extname='SCI',
                                       extver=extversion)
             header = ext.header
             # get the data arrays from the reference file
             dx, dy = cls.getData(d2imfile, ccdchip)
             # Determine EXTVER for the D2IMARR extension from the D2I file (EXTNAME, EXTVER) kw.
             # This is used to populate DPj.EXTVER kw
             for ename in zip(['DX', 'DY'], [dx, dy]):
                 if ename[1] is not None:
                     error_val = ename[1].max()
                     cls.addSciExtKw(header,
                                     wdvarr_ver=d2im_num_ext,
                                     d2im_extname=ename[0],
                                     error_val=error_val)
                     hdu = cls.createD2ImHDU(header,
                                             d2imfile=d2imfile,
                                             wdvarr_ver=d2im_num_ext,
                                             d2im_extname=ename[0],
                                             data=ename[1],
                                             ccdchip=ccdchip)
                     if wcsdvarr_ind and d2im_num_ext in wcsdvarr_ind:
                         fobj[wcsdvarr_ind[d2im_num_ext]] = hdu
                     else:
                         fobj.append(hdu)
                     d2im_num_ext = d2im_num_ext + 1
コード例 #34
0
ファイル: util.py プロジェクト: bsugerman/drizzlepac
def verifyFilePermissions(filelist, chmod=True):
    """ Verify that images specified in 'filelist' can be updated.

    A message will be printed reporting the names of any images which
    do not have write-permission, then quit.
    """
    badfiles = []
    archive_dir = False
    for img in filelist:
        fname = fileutil.osfn(img)
        if 'OrIg_files' in os.path.split(fname)[0]:
            archive_dir = True
        try:
            fp = open(fname,mode='a')
            fp.close()
        except IOError as e:
            if e.errno == errno.EACCES:
                badfiles.append(img)
            # Not a permission error.
            pass

    num_bad = len(badfiles)
    if num_bad > 0:
        if archive_dir:
            print('\n')
            print('#'*40)
            print('    Working in "OrIg_files" (archive) directory. ')
            print('    This directory has been created to serve as an archive')
            print('    for the original input images. ')
            print('\n    These files should be copied into another directory')
            print('     for processing. ')
            print('#'*40)

        print('\n')
        print('#'*40)
        print('Found %d files which can not be updated!'%(num_bad))
        for img in badfiles:
            print('    %s'%(img))
        print('\nPlease reset permissions for these files and restart...')
        print('#'*40)
        print('\n')
        filelist = None

    return filelist
コード例 #35
0
    def getflat(self, chip):
        """
        Method for retrieving a detector's flat field.

        Returns
        -------
        flat: array
            This method will return an array the same shape as the image in
            **units of electrons**.

        """

        sci_chip = self._image[self.scienceExt, chip]
        exten = '%s,%d' % (self.scienceExt, chip)
        # The keyword for ACS flat fields in the primary header of the flt
        # file is pfltfile.  This flat file is already in the required
        # units of electrons.

        # The use of fileutil.osfn interprets any environment variable, such as jref$,
        # used in the specification of the reference filename
        filename = fileutil.osfn(self._image["PRIMARY"].header[self.flatkey])

        try:
            handle = fileutil.openImage(filename, mode='readonly', memmap=0)
            hdu = fileutil.getExtn(handle,extn=exten)
            if hdu.data.shape[0] != sci_chip.image_shape[0]:
                _ltv2 = np.round(sci_chip.ltv2)
            else:
                _ltv2 = 0
            _size2 = sci_chip.image_shape[0]+_ltv2
            if hdu.data.shape[1] != sci_chip.image_shape[1]:
                _ltv1 = np.round(sci_chip.ltv1)
            else:
                _ltv1 = 0
            _size1 = sci_chip.image_shape[1]+_ltv1

            data = hdu.data[_ltv2:_size2, _ltv1:_size1]
            handle.close()
        except:
            data = np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype)
            log.warning("Cannot find file %s.\n    Treating flatfield "
                        "constant value of '1'." % filename)
        flat = data
        return flat
コード例 #36
0
ファイル: util.py プロジェクト: gbrammer/drizzlepac
def verifyFilePermissions(filelist, chmod=True):
    """ Verify that images specified in 'filelist' can be updated.

    A message will be printed reporting the names of any images which
    do not have write-permission, then quit.
    """
    badfiles = []
    archive_dir = False
    for img in filelist:
        fname = fileutil.osfn(img)
        if 'OrIg_files' in os.path.split(fname)[0]:
            archive_dir = True
        try:
            fp = open(fname, mode='a')
            fp.close()
        except IOError as e:
            if e.errno == errno.EACCES:
                badfiles.append(img)
            # Not a permission error.
            pass

    num_bad = len(badfiles)
    if num_bad > 0:
        if archive_dir:
            print('\n')
            print('#' * 40)
            print('    Working in "OrIg_files" (archive) directory. ')
            print('    This directory has been created to serve as an archive')
            print('    for the original input images. ')
            print('\n    These files should be copied into another directory')
            print('     for processing. ')
            print('#' * 40)

        print('\n')
        print('#' * 40)
        print('Found %d files which can not be updated!' % (num_bad))
        for img in badfiles:
            print('    %s' % (img))
        print('\nPlease reset permissions for these files and restart...')
        print('#' * 40)
        print('\n', flush=True)
        filelist = None

    return filelist
コード例 #37
0
ファイル: drutil.py プロジェクト: spacetelescope/pydrizzle
def __buildIDCTAB(header, directory, kw = 'cubic'):
    # Need to build IDCTAB filename from scratch
    instrument = header['INSTRUME']
    if instrument != 'NICMOS':
        detector = header['DETECTOR']
    else:
        detector = str(header['CAMERA'])

    # Default non-IDCTAB distortion model
    """
    if (kw == None):
        keyword = 'cubic'
    else :
    """
    keyword = kw

    if not directory:
        default_dir = DEFAULT_IDCDIR
    else:
        default_dir = directory

    if instrument == 'WFPC2':
        if detector == 1:
            detname = 'pc'
        else:
            detname = 'wf'
        idcfile = default_dir+detname+str(detector)+'-'+keyword.lower()

    elif instrument == 'STIS':
        idcfile = default_dir+'stis-'+detector.lower()

    elif instrument == 'NICMOS':
        if detector != None:
            idcfile = default_dir+'nic-'+detector
        else:
            idcfile = None
    else:
        idcfile = None

    idctype = getIDCFileType(fileutil.osfn(idcfile))

    return idcfile,idctype
コード例 #38
0
ファイル: npol.py プロジェクト: brechmos-stsci/stwcs
    def applyNPOLCorr(cls, fobj):
        """
        For each science extension in a fits file object:
            - create a WCSDVARR extension
            - update science header
            - add/update NPOLEXT keyword
        """
        nplfile = fileutil.osfn(fobj[0].header['NPOLFILE'])
        # Map WCSDVARR EXTVER numbers to extension numbers
        wcsdvarr_ind = cls.getWCSIndex(fobj)
        for ext in fobj:
            try:
                extname = ext.header['EXTNAME'].lower()
            except KeyError:
                continue
            if extname == 'sci':
                extversion = ext.header['EXTVER']
                ccdchip = cls.get_ccdchip(fobj, extname='SCI', extver=extversion)
                header = ext.header
                # get the data arrays from the reference file and transform
                # them for use with SIP
                dx,dy = cls.getData(nplfile, ccdchip)
                idccoeffs = cls.getIDCCoeffs(header)

                if idccoeffs is not None:
                    dx, dy = cls.transformData(dx,dy, idccoeffs)

                # Determine EXTVER for the WCSDVARR extension from the
                # NPL file (EXTNAME, EXTVER) kw.
                # This is used to populate DPj.EXTVER kw
                wcsdvarr_x_version = 2 * extversion -1
                wcsdvarr_y_version = 2 * extversion
                for ename in zip(['DX', 'DY'], [wcsdvarr_x_version,wcsdvarr_y_version],[dx, dy]):
                    error_val = ename[2].max()
                    cls.addSciExtKw(header, wdvarr_ver=ename[1], npol_extname=ename[0], error_val=error_val)
                    hdu = cls.createNpolHDU(header, npolfile=nplfile, \
                        wdvarr_ver=ename[1], npl_extname=ename[0], data=ename[2],ccdchip=ccdchip)
                    if wcsdvarr_ind:
                        fobj[wcsdvarr_ind[ename[1]]] = hdu
                    else:
                        fobj.append(hdu)
コード例 #39
0
def applyTDDCorr(fname, utddcorr):
    """
    The default value of tddcorr for all ACS images is True.
    This correction will be performed if all conditions below are True:
    - the user did not turn it off on the command line
    - the detector is WFC
    - the idc table specified in the primary header is available.

    Parameters
    ----------
    fname : `~astropy.io.fits.HDUList`
        Input FITS file object.

    """

    phdr = fname[0].header
    instrument = phdr['INSTRUME']
    try:
        detector = phdr['DETECTOR']
    except KeyError:
        detector = None
    try:
        tddswitch = phdr['TDDCORR']
    except KeyError:
        tddswitch = 'PERFORM'

    if instrument == 'ACS' and detector == 'WFC' and utddcorr and tddswitch == 'PERFORM':
        tddcorr = True
        try:
            idctab = phdr['IDCTAB']
        except KeyError:
            tddcorr = False
        if os.path.exists(fileutil.osfn(idctab)):
            tddcorr = True
        else:
            tddcorr = False
    else:
        tddcorr = False
    return tddcorr
コード例 #40
0
def applyTDDCorr(fname, utddcorr):
    """
    The default value of tddcorr for all ACS images is True.
    This correction will be performed if all conditions below are True:
    - the user did not turn it off on the command line
    - the detector is WFC
    - the idc table specified in the primary header is available.

    Parameters
    ----------
    fname : `~astropy.io.fits.HDUList`
        Input FITS file object.

    """

    phdr = fname[0].header
    instrument = phdr['INSTRUME']
    try:
        detector = phdr['DETECTOR']
    except KeyError:
        detector = None
    try:
        tddswitch = phdr['TDDCORR']
    except KeyError:
        tddswitch = 'PERFORM'

    if instrument == 'ACS' and detector == 'WFC' and utddcorr and tddswitch == 'PERFORM':
        tddcorr = True
        try:
            idctab = phdr['IDCTAB']
        except KeyError:
            tddcorr = False
        if os.path.exists(fileutil.osfn(idctab)):
            tddcorr = True
        else:
            tddcorr = False
    else:
        tddcorr = False
    return tddcorr
コード例 #41
0
ファイル: det2im.py プロジェクト: jhunkeler/stwcs
 def applyDet2ImCorr(cls, fobj):
     """
     For each science extension in a fits file object:
         - create a WCSDVARR extension
         - update science header
         - add/update D2IMEXT keyword
     """
     d2imfile = fileutil.osfn(fobj[0].header['D2IMFILE'])
     # Map D2IMARR EXTVER numbers to FITS extension numbers
     wcsdvarr_ind = cls.getWCSIndex(fobj)
     d2im_num_ext = 1
     for ext in fobj:
         try:
             extname = ext.header['EXTNAME'].lower()
         except KeyError:
             continue
         if extname == 'sci':
             extversion = ext.header['EXTVER']
             ccdchip = cls.get_ccdchip(fobj, extname='SCI', extver=extversion)
             header = ext.header
             # get the data arrays from the reference file
             dx, dy = cls.getData(d2imfile, ccdchip)
             # Determine EXTVER for the D2IMARR extension from the D2I file (EXTNAME, EXTVER) kw.
             # This is used to populate DPj.EXTVER kw
             for ename in zip(['DX', 'DY'], [dx, dy]):
                 if ename[1] is not None:
                     error_val = ename[1].max()
                     cls.addSciExtKw(header, wdvarr_ver=d2im_num_ext, d2im_extname=ename[0], error_val=error_val)
                     hdu = cls.createD2ImHDU(header, d2imfile=d2imfile,
                                             wdvarr_ver=d2im_num_ext,
                                             d2im_extname=ename[0],
                                             data=ename[1], ccdchip=ccdchip)
                     if wcsdvarr_ind and d2im_num_ext in wcsdvarr_ind:
                         fobj[wcsdvarr_ind[d2im_num_ext]] = hdu
                     else:
                         fobj.append(hdu)
                     d2im_num_ext = d2im_num_ext + 1
コード例 #42
0
ファイル: imageObject.py プロジェクト: mdlpstsci/drizzlepac
    def getflat(self, chip, flat_file=None, flat_ext=None):
        """
        Method for retrieving a detector's flat field.

        Parameters
        ----------
        chip : int
            Chip number. Same as FITS ``EXTVER``.

        flat_file : str, None
            Flat field file name. If not specified, it will be determined
            automatically from image header.

        flat_ext : str, None
            Flat field extension name (same as FITS ``EXTNAME``). Specifies
            extension name containing flat field data.

        Returns
        -------
        flat: array
            This method will return an array the same shape as the image in
            **units of electrons**.

        """
        if flat_ext is None:
            flat_ext = self.scienceExt
        sci_chip = self._image[self.scienceExt, chip]
        # The keyword for ACS flat fields in the primary header of the flt
        # file is pfltfile.  This flat file is already in the required
        # units of electrons.

        # The use of fileutil.osfn interprets any environment variable, such as
        # jref$, used in the specification of the reference filename
        if flat_file is None:
            flat_file = fileutil.osfn(self._image["PRIMARY"].header[self.flatkey])
        hdulist = None
        try:
            hdulist = fileutil.openImage(flat_file, mode='readonly', memmap=False)
            data = hdulist[(flat_ext, chip)].data

            if data.shape[0] != sci_chip.image_shape[0]:
                ltv2 = int(np.round(sci_chip.ltv2))
            else:
                ltv2 = 0
            size2 = sci_chip.image_shape[0] + ltv2

            if data.shape[1] != sci_chip.image_shape[1]:
                ltv1 = int(np.round(sci_chip.ltv1))
            else:
                ltv1 = 0
            size1 = sci_chip.image_shape[1] + ltv1

            flat = data[ltv2:size2, ltv1:size1]

        except FileNotFoundError:
            flat = np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype)
            log.warning("Cannot find flat field file '{}'".format(flat_file))
            log.warning("Treating flatfield as a constant value of '1'.")

        finally:
            if hdulist is not None:
                hdulist.close()

        return flat
コード例 #43
0
    def writeTrace(self, fname, sciline, refline, interp_trace, trace1024,
                   tr_ind, a2disp_ind):
        """
        The 'writeTrace' method performs the following steps:

          - Adds sciline-refline to all traces with the relevent OPT_ELEM,
            CENWAVE and SPORDER.
          - Writes the new trace table to the current directory.
          - Updates the SPTRCTAB keyword in the header to point to the new table.
          - Writes out fits files with the

            - science trace - '_sci'
            - the fit to the science trace - '_scifit'
            - the interpolated trace - '_interp'
            - the linear fit to the interpolated trace - '_interpfit'

        """
        fpath = fu.osfn(self.sptrctabname)
        infile = fname.split('.')
        newname = infile[0] + '_1dt.' + infile[1]

        # refine all traces for this CENWAVE, OPT_ELEM
        fu.copyFile(fpath, newname)
        hdulist = fits.open(newname, mode='update')
        tab = hdulist[1].data
        ind = np.nonzero(a2disp_ind)[0]
        for i in np.arange(ind[0], ind[-1] + 1):
            tab[i].setfield('A2DISPL',
                            tab[i].field('A2DISPL') + (sciline - refline))
        if 'DEGPERYR' in tab.names:
            for i in np.arange(ind[0], ind[-1] + 1):
                tab[i].setfield('DEGPERYR', 0.0)

        hdulist.flush()
        hdulist.close()

        # update SPTRCTAB keyword in the science file primary header
        hdulist = fits.open(fname, mode='update')
        hdr0 = hdulist[0].header
        hdr0['SPTRCTAB'] = newname
        hdulist.close()

        # write out the fit to the interpolated trace ('_interpfit' file)
        refhdu = fits.PrimaryHDU(refline)
        refname = infile[0] + '_1dt_interpfit.' + infile[1]
        if os.path.exists(refname):
            os.remove(refname)
        refhdu.writeto(refname)

        # write out the interpolated trace ('_interp' file)
        inthdu = fits.PrimaryHDU(interp_trace)
        intname = infile[0] + '_1dt_interp.' + infile[1]
        if os.path.exists(intname):
            os.remove(intname)
        inthdu.writeto(intname)

        # write out the the fit to the science trace ('_scifit' file)
        scihdu = fits.PrimaryHDU(sciline)
        sciname = infile[0] + '_1dt_scifit.' + infile[1]
        if os.path.exists(sciname):
            os.unlink(sciname)
        scihdu.writeto(sciname)

        # write out the science trace ('_sci' file)
        trhdu = fits.PrimaryHDU(trace1024)
        trname = infile[0] + '_1dt_sci.' + infile[1]
        if os.path.exists(trname):
            os.unlink(trname)
        trhdu.writeto(trname)
コード例 #44
0
ファイル: saaclean.py プロジェクト: jhunkeler/nictools
def osfn(filename):
    """Return a filename with iraf syntax and os environment names substituted out"""
    if filename is None:
        return filename
    return fileutil.osfn(filename)
コード例 #45
0
ファイル: drutil.py プロジェクト: spacetelescope/pydrizzle
from stsci.tools import fileutil
from stsci.tools.fileutil import buildRotMatrix

# Convenience definitions
DEGTORAD = fileutil.DEGTORAD

no = False
yes = True

# Constants
IDCTAB  = 1
DRIZZLE = 2
TRAUGER = 3

try:
    DEFAULT_IDCDIR = fileutil.osfn('stsdas$pkg/analysis/dither/drizzle/coeffs/')
except:
    DEFAULT_IDCDIR = os.getcwd()


"""
def factorial(n):
    #Compute a factorial for integer n.
    m = 1
    for i in range(int(n)):
        m = m * (i+1)
    return m

def combin(j,n):
    #Return the combinatorial factor for j in n.
    return (factorial(j) / (factorial(n) * factorial( (j-n) ) ) )
コード例 #46
0
ファイル: astrometry_utils.py プロジェクト: stsci-hack/stwcs
    def updateObs(self, obsname, all_wcs=False, remove_duplicates=True):
        """Update observation with any available solutions.

        Parameters
        ==========
        obsname : str
           Filename for observation to be updated

        all_wcs : bool
            If True, all solutions from the Astrometry database
            are appended to the input file as separate FITS
            extensions.  If False, only those solutions based on the
            same IDCTAB will be appended.

        remove_duplicates : bool
            If True, any headerlet extensions with the same
            HDRNAME are found, the copies will
            be deleted until only the first version added remains.
        """
        if not self.perform_step:
            return

        obs_open = False
        # User provided only an input filename, so open in 'update' mode
        if isinstance(obsname, str):
            obsfile = obsname
            obsname = fits.open(obsfile, mode='update')
            obs_open = True
        elif isinstance(obsname, fits.HDUList):
            obsfile = obsname.filename()
            # User provided an HDUList - make sure it is opened in 'update' mode
            if obsname.fileinfo(0)['filemode'] != 'update':
                # Not opened in 'update' mode, so close and re-open
                obsname.close()
                logger.info(
                    "Opening {} in 'update' mode to append new WCSs".format(
                        obsfile))
                obsname = fits.open(obsfile, mode='update')
        else:
            # We do not know what kind of input this is, so raise an Exception with an explanation.
            error_msg = "Input not valid!  Please provide either a filename or fits.HDUList object"
            logger.error(error_msg)
            raise ValueError(error_msg)

        obsroot = obsname[0].header.get('rootname', None)
        observationID = obsroot.split('_')[:1][0]
        logger.info("Updating astrometry for {}".format(observationID))

        # take inventory of what hdrlets are already appended to this file
        wcsnames = headerlet.get_headerlet_kw_names(obsname, 'wcsname')

        # Get all the WCS solutions available from the astrometry database
        # for this observation, along with what was flagged as the 'best'
        # solution.  The 'best' solution should be the one that aligns the
        # observation closest to the GAIA frame.
        headerlets, best_solution_id = self.getObservation(observationID)
        if headerlets is None:
            logger.warning("Problems getting solutions from database")
            logger.warning(
                " NO Updates performed for {}".format(observationID))
            if self.raise_errors:
                raise ValueError("No new solution found in AstrometryDB.")
            else:
                return

        # Get IDCTAB filename from file header
        idctab = obsname[0].header.get('IDCTAB', None)
        idcroot = os.path.basename(fileutil.osfn(idctab)).split('_')[0]

        # Determine what WCSs to append to this observation
        # If headerlet found in database, update file with all new WCS solutions
        # according to the 'all_wcs' parameter
        apriori_added = False
        if not self.new_observation:
            # Attach new unique hdrlets to file...
            logger.info("Updating {} with:".format(observationID))
            for h in headerlets:
                newname = headerlets[h][0].header['wcsname']
                # Only append the WCS from the database if `all_wcs` was turned on,
                # or the WCS was based on the same IDCTAB as in the image header.
                append_wcs = True if ((idcroot in newname) or all_wcs
                                      or newname == 'OPUS') else False
                if append_wcs and (idcroot in newname):
                    apriori_added = True

                # Check to see whether this WCS has already been appended or
                # if it was never intended to be appended.  If so, skip it.
                if newname in wcsnames:
                    continue  # do not add duplicate hdrlets
                # Add solution as an alternate WCS
                if append_wcs:
                    try:
                        logger.info(
                            "\tHeaderlet with WCSNAME={}".format(newname))
                        headerlets[h].attach_to_file(obsname)
                    except ValueError:
                        pass

        if remove_duplicates:
            hdr_kw = headerlet.get_headerlet_kw_names(obsname, kw='HDRNAME')
            for hname in [kwd for kwd in set(hdr_kw) if hdr_kw.count(kwd) > 1]:
                headerlet.delete_headerlet([obsname],
                                           hdrname=hname,
                                           keep_first=True)
                logger.warn(
                    f"Duplicate headerlet with 'HDRNAME'='{hname}' found.")
                logger.warn("Duplicate headerlets have been removed.")

        # Obtain the current primary WCS name
        current_wcsname = obsname[('sci', 1)].header['wcsname']

        # At this point, we have appended all applicable headerlets from the database
        # However, if no database-provided headerlet was applicable, we need to
        # compute a new a priori WCS based on the IDCTAB from the observation header.
        # This will also re-define the 'best_solution_id'.
        if not apriori_added:
            # No headerlets were appended from the database, so we need to define
            # a new a priori solution and apply it as the new 'best_solution_id'
            self.apply_new_apriori(obsname)

        else:
            # Once all the new headerlet solutions have been added as new extensions
            # Apply the best solution, if one was specified, as primary WCS
            # This needs to be separate logic in order to work with images which have already
            # been updated with solutions from the database, and we are simply resetting.
            if best_solution_id and best_solution_id != current_wcsname:
                # get full list of all headerlet extensions now in the file
                hdrlet_extns = headerlet.get_extname_extver_list(
                    obsname, 'hdrlet')

                for h in hdrlet_extns:
                    hdrlet = obsname[h].headerlet
                    wcsname = hdrlet[0].header['wcsname']
                    if wcsname == best_solution_id:
                        # replace primary WCS with this solution
                        hdrlet.init_attrs()
                        hdrlet.apply_as_primary(obsname,
                                                attach=False,
                                                force=True)
                        logger.info('Replacing primary WCS with')
                        logger.info(
                            '\tHeaderlet with WCSNAME={}'.format(newname))
                        break

        # Insure changes are written to the file and that the file is closed.
        if obs_open:
            obsname.close()
コード例 #47
0
ファイル: astrometry_utils.py プロジェクト: stsci-hack/stwcs
    def apply_new_apriori(self, obsname):
        """ Compute and apply a new a priori WCS based on offsets from astrometry database.

        Parameters
        -----------
        obsname : str
            Full filename or `astropy.io.fits.HDUList` object \
            for the observation to be corrected

        Returns
        -------
        wcsname : str
            Value of WCSNAME keyword for this new WCS

        """
        filename = os.path.basename(obsname.filename())

        # Start by archiving and writing out pipeline-default based on new IDCTAB
        # Save this new WCS as a headerlet extension and separate headerlet file
        wname = obsname[('sci', 1)].header['wcsname']
        hlet_extns = headerlet.get_headerlet_kw_names(obsname, kw='EXTVER')
        # newly processed data will not have any hlet_extns, so we need to account for that
        newhlt = max(hlet_extns) + 1 if len(hlet_extns) > 0 else 1
        hlet_names = [
            obsname[('hdrlet', e)].header['wcsname'] for e in hlet_extns
        ]

        if wname not in hlet_names:
            wname_hash = hashlib.sha1(wname.encode()).hexdigest()[:6]
            hdrname = "{}_{}".format(filename.replace('.fits', ''), wname_hash)
            # Create full filename for headerlet:
            hfilename = "{}_hlet.fits".format(hdrname)
            logger.info("Archiving pipeline-default WCS {} to {}".format(
                wname, filename))
            descrip = "Pipeline-default WCS"
            numext = len(obsname)
            headerlet.archive_as_headerlet(obsname,
                                           hfilename,
                                           sciext='SCI',
                                           wcskey="PRIMARY",
                                           author="stwcs.updatewcs",
                                           descrip=descrip)
            obsname[numext].header['EXTVER'] = newhlt

            # Now, write out pipeline-default WCS to a unique headerlet file
            logger.info(
                "Writing out pipeline-default WCS {} to headerlet file: {}".
                format(wname, hfilename))
            headerlet.extract_headerlet(obsname,
                                        hfilename,
                                        extnum=numext,
                                        clobber=True)

        # We need to create new apriori WCS based on new IDCTAB
        # Get guide star offsets from DB
        # Getting observationID (rootname) from header to avoid
        # potential issues with actual filename being changed
        pix_offsets = find_gsc_offset(obsname)

        # Determine rootname for IDCTAB
        idctab = obsname[0].header['IDCTAB']
        idcroot = os.path.basename(fileutil.osfn(idctab)).split('_')[0]
        # Create WCSNAME for this new a priori WCS
        if pix_offsets['catalog']:
            wname = 'IDC_{}-{}'.format(idcroot, pix_offsets['catalog'])
        else:
            wname = 'IDC_{}'.format(idcroot)
        # Compute and add new solution if it is not already an alternate WCS
        # Save this new WCS as a headerlet extension and separate headerlet file
        wname_hash = hashlib.sha1(wname.encode()).hexdigest()[:6]
        hdrname = "{}_{}".format(filename.replace('.fits', ''), wname_hash)
        # Create full filename for headerlet:
        hfilename = "{}_hlet.fits".format(hdrname)

        # apply offsets to image using the same tangent plane
        # which was used to compute the offsets
        updatehdr.updatewcs_with_shift(obsname,
                                       pix_offsets['expwcs'],
                                       hdrname=hfilename,
                                       wcsname=wname,
                                       reusename=True,
                                       fitgeom='rscale',
                                       rot=0.0,
                                       scale=1.0,
                                       xsh=pix_offsets['delta_x'],
                                       ysh=pix_offsets['delta_y'],
                                       verbose=False,
                                       force=True)

        sci_extns = updatehdr.get_ext_list(obsname, extname='SCI')

        # Update list of alternate WCSs
        alt_wnames = _get_alt_wcsnames(obsname)
        # Remove any alternate WCS solutions which are not based on the current IDCTAB
        for alt_key, alt_name in alt_wnames.items():
            if idcroot not in alt_name and alt_key not in [' ', 'O']:
                for sci_extn in sci_extns:
                    altwcs.deleteWCS(obsname, sci_extn, wcskey=alt_key)

        if wname not in alt_wnames.values():
            for sci_ext in sci_extns:
                # Create alternate WCS for this new WCS
                _, wname = altwcs.archive_wcs(
                    obsname,
                    sci_ext,
                    wcsname=wname,
                    mode=altwcs.ArchiveMode.QUIET_ABORT)
                logger.info('Archived {} in {}'.format(wname, sci_ext))

        # Get updated list of headerlet names
        hlet_extns = headerlet.get_headerlet_kw_names(obsname, kw='EXTVER')
        hlet_names = [
            obsname[('hdrlet', e)].header['wcsname'] for e in hlet_extns
        ]
        if wname not in hlet_names:
            newhlt += 1
            descrip = "A Priori WCS based on ICRS guide star positions"
            logger.info("Appending a priori WCS {} to {}".format(
                wname, filename))
            headerlet.archive_as_headerlet(obsname,
                                           hfilename,
                                           sciext='SCI',
                                           wcskey="PRIMARY",
                                           author="stwcs.updatewcs",
                                           descrip=descrip)

            hlet_extns = headerlet.find_headerlet_HDUs(obsname, strict=False)
            newext = max(hlet_extns)

            obsname[newext].header['EXTVER'] = newext
            # Update a priori headerlet with offsets used to compute new WCS
            apriori_hdr = obsname[newext].headerlet[0].header
            apriori_hdr['D_RA'] = pix_offsets['delta_ra']
            apriori_hdr['D_DEC'] = pix_offsets['delta_dec']
            apriori_hdr['D_ROLL'] = pix_offsets['roll']
            apriori_hdr['D_SCALE'] = pix_offsets['scale']
            apriori_hdr['NMATCH'] = 2
            apriori_hdr['CATALOG'] = pix_offsets['catalog']

        if not os.path.exists(hfilename):
            # Now, write out new a priori WCS to a unique headerlet file
            logger.info(
                "Writing out a priori WCS {} to headerlet file: {}".format(
                    wname, hfilename))
            try:
                newext = headerlet.find_headerlet_HDUs(obsname,
                                                       hdrname=hfilename)[0]
            except ValueError:
                newext = headerlet.find_headerlet_HDUs(obsname,
                                                       hdrname=hdrname)[0]
            headerlet.extract_headerlet(obsname, hfilename, extnum=newext)

        return wname
コード例 #48
0
ファイル: test_small_dgeo.py プロジェクト: jhunkeler/reftools
def compare_sub_to_full_sci(subarray,full_sci,output=False,update=True):
    from stsci.tools import fileutil
    from stwcs import updatewcs

    if update:
        # update input SCI file to be consistent with reference files in header
        print('Updating input file ',subarray,' to be consistent with reference files listed in header...')
        updatewcs.updatewcs(subarray)
        print('Updating input file ',full_sci,' to be consistent with reference files listed in header...')
        updatewcs.updatewcs(full_sci)

    fulldgeofile = fileutil.osfn(pyfits.getval(subarray,'ODGEOFIL'))
    # parse out rootname from input file if user wants results written to file
    if output:
        soutroot = fileutil.buildNewRootname(subarray)
        foutroot = fileutil.buildNewRootname(full_sci)
        hdulist = pyfits.open(fulldgeofile)

    detector = pyfits.getval(fulldgeofile,'DETECTOR')
    filter_names = fileutil.getFilterNames(pyfits.getheader(subarray))

    # count the number of chips in subarray image
    xyfile = pyfits.open(subarray)
    numchips = 0
    ccdchip = []
    extname = xyfile[1].header['EXTNAME']
    for extn in xyfile:
        if 'extname' in extn.header and extn.header['extname'] == extname:
            numchips += 1
            if 'ccdchip' in extn.header:
                ccdchip.append([extn.header['ccdchip'],extn.header['extver']])
            else:
                ccdchip.append([1,1])

    snx = xyfile['sci',1].header['NAXIS1']
    sny = xyfile['sci',1].header['NAXIS2']
    ltv1 = xyfile['sci',1].header['ltv1']
    ltv2 = xyfile['sci',1].header['ltv2']
    xyfile.close()

    # build grid of points for full-size image for
    #    chips corresponding to subarray
    xyfile = pyfits.open(full_sci)
    fullchip = []
    for extn in xyfile:
        if ('extname' in extn.header and extn.header['extname'] == extname) and \
        extn.header['ccdchip'] == ccdchip[0][0]:
            fullchip.append([extn.header['ccdchip'],extn.header['extver']])
    xyfile.close()

    sxarr,syarr = build_grid_arrays(snx,sny,1)
    full_range = [slice(-ltv2,-ltv2+sny),slice(-ltv1,-ltv1+snx)]


    fnx = pyfits.getval(full_sci,'NAXIS1','sci',1)
    fny = pyfits.getval(full_sci,'NAXIS2','sci',1)
    fxarr,fyarr = build_grid_arrays(fnx,fny,1)

    # initialize plot here
    if has_matplotlib:
        pl.clf()
        pl.gray()

    for chip,det,fext in zip(list(range(1,numchips+1)),ccdchip,fullchip):
        # Compute the correction imposed by the D2IM+DGEO corrections
        #   on the subarray
        sxout,syout = transform_d2im_dgeo(subarray,det[1],sxarr,syarr)
        sdx= (sxout-sxarr).reshape(sny,snx)
        sdy= (syout-syarr).reshape(sny,snx)
        # Compute the correction imposed by the D2IM+DGEO corrections
        #    on the full sized SCI image
        fxout,fyout = transform_d2im_dgeo(full_sci,fext[1],fxarr,fyarr)
        fdx= (fxout-fxarr).reshape(fny,fnx)
        fdy= (fyout-fyarr).reshape(fny,fnx)

        # determine the difference
        diffx = (sdx - fdx[full_range[0],full_range[1]]).astype(np.float32)
        if has_matplotlib:
            pl.imshow(diffx)
            pl.title('sub_dx-full_x: %s %s[%d:%d,%d:%d] with %g +/- %g' %
                     (filter_names, detector, full_range[0].start,
                      full_range[0].stop, full_range[1].start,
                      full_range[1].stop, diffx.mean(),diffx.std()))
            pl.colorbar()

            if sys.version_info[0] < 3:
                raw_input("Press 'ENTER' to close figure and plot DY...")
            else:
                input("Press 'ENTER' to close figure and plot DY...")

            pl.close()

        # determine the difference
        diffy = (sdy - fdy[full_range[0],full_range[1]]).astype(np.float32)
        if has_matplotlib:
            pl.imshow(diffy)
            pl.title('sub_dy-full_y: %s %s[%d:%d,%d:%d] with %g +/- %g' %
                     (filter_names, detector, full_range[0].start,
                      full_range[0].stop, full_range[1].start,
                      full_range[1].stop, diffy.mean(), diffy.std()))
            pl.colorbar()

            if sys.version_info[0] < 3:
                raw_input("Press 'ENTER' to close figure and exit...")
            else:
                input("Press 'ENTER' to close figure and exit...")

            pl.close()

        if output:
            outname = foutroot+'_sci'+str(chip)+'_newfull_dxy.fits'
            if os.path.exists(outname): os.remove(outname)
            hdulist['dx',chip].data = fdx
            hdulist['dy',chip].data = fdy
            hdulist.writeto(outname)
            outname = soutroot+'_sci'+str(chip)+'_newsub_dxy.fits'
            if os.path.exists(outname): os.remove(outname)
            hdulist['dx',chip].data = sdx
            hdulist['dy',chip].data = sdy
            hdulist.writeto(outname)

            """
            outname = outroot+'_sci'+str(chip)+'_diff_dxy.fits'
            if os.path.exists(outname): os.remove(outname)
            hdulist['dx',chip].data = diffx
            hdulist['dy',chip].data = diffy
            hdulist.writeto(outname)
            """
            print('Created output file with differences named: ',outname)
    if output:
        hdulist.close()
コード例 #49
0
ファイル: test_small_dgeo.py プロジェクト: jhunkeler/reftools
def run(scifile,dgeofile=None,output=False,match_sci=False,update=True,vmin=None,vmax=None,plot_offset=0,plot_samp=32):
    """
        This routine compares how well the sub-sampled DGEOFILE (generated
        using the 'makesmall' module) corrects the input science image as
        opposed to the full-size DGEOFILE.

        SYNTAX:
            import test_small_dgeo
            test_small_dgeo.run(scifile,dgeofile=None,output=False)

        where:
            scifile   - name of science image
            dgeofile  - name of full-sized DGEOFILE if not in DGEOFILE keyword
            output    - if True, write out differences to FITS file(s)

        The user can either specify the full-size DGEOFILE reference filename
        as the 'dgeofile' parameter or the code will look for the 'DGEOFILE'
        keyword in the primary header for the name of the full-sized reference
        file.

        The primary output will be a series of plots showing the difference images
        with the mean and stddev of the differences in the label of the image display.

        If the 'output' parameter is set to True, these differences
        will then be written out to FITS files based on the input science image
        rootname. Both the DX and DY differences for a single chip will be written
        out to the same file, with a separate file for each chip.

    """
    from stsci.tools import fileutil
    from stwcs import updatewcs

    if update:
        # update input SCI file to be consistent with reference files in header
        print('Updating input file ',scifile,' to be consistent with reference files listed in header...')
        updatewcs.updatewcs(scifile)
    # Now, get the original NPOLFILE and overwrite the data in the scifile
    # WCSDVARR extensions to remove the scaling by the linear terms imposed by
    # the SIP convention
    npolfile = fileutil.osfn(pyfits.getval(scifile,'NPOLFILE'))
    npolroot = os.path.split(npolfile)[1]
    dxextns = []
    for extn in pyfits.open(npolfile):
        if 'extname' in extn.header and extn.header['extname'] in ['DX','DY']:
            dxextns.append([extn.header['extname'],extn.header['extver']])
    #dxextns = [['dx',1],['dy',1],['dx',2],['dy',2]]
    ndxextns = len(dxextns)
    # Update input file with NPOLFILE arrays now
    print('Updating input file ',scifile,' with original ')
    print('    NPOLFILE arrays from ',npolfile)
    fsci =pyfits.open(scifile,mode='update')
    try:
        next = fsci.index_of(('wcsdvarr',1))
    except KeyError:
        fsci.close()
        print('=====')
        print('ERROR: No WCSDVARR extensions found!')
        print('       Please make sure NPOLFILE is specified and run this task with "update=True".')
        print('=====')
        return
    # Replace WCSDVARR arrays here...
    for dxe,wextn in zip(dxextns,list(range(1,ndxextns+1))):
        fsci['wcsdvarr',wextn].data = pyfits.getdata(npolfile,dxe[0],dxe[1])
    # Now replace the NPOLEXT keyword value with a new one so that it will automatically
    # update with the correct file next time updatewcs is run.
    fsci['sci',1].header['npolext'] = npolroot
    print('Updated NPOLEXT with ',npolroot)
    fsci.close()
    print('\n=====')
    print('WARNING: Updated file ',scifile,' NO LONGER conforms to SIP convention!')
    print('         This file will need to be updated with updatewcs before using with MultiDrizzle.')
    print('=====\n')

    # Get info on full-size DGEOFILE
    if dgeofile is None:
        # read in full dgeofile from header
        fulldgeofile = pyfits.getval(scifile,'DGEOFILE')
    else:
        fulldgeofile = dgeofile

    print('Opening full-size DGEOFILE ',fulldgeofile,' for comparison.')
    fulldgeofile = fileutil.osfn(fulldgeofile)
    full_shape = [pyfits.getval(fulldgeofile,'NAXIS2','DX',1),pyfits.getval(fulldgeofile,'NAXIS1','DX',1)]

    filter_names = fileutil.getFilterNames(pyfits.getheader(scifile))

    detector = pyfits.getval(fulldgeofile,'DETECTOR')
    # count the number of chips in DGEOFILE
    xyfile = pyfits.open(scifile)
    numchips = 0
    ccdchip = []
    extname = xyfile[1].header['EXTNAME']
    for extn in xyfile:
        if 'extname' in extn.header and extn.header['extname'] == extname:
            numchips += 1
            if 'ccdchip' in extn.header:
                ccdchip.append(extn.header['ccdchip'])
            else:
                ccdchip.append(1)
    if not match_sci:
        ltv1 = 0
        ltv2 = 0
        nx = full_shape[1]
        ny = full_shape[0]
    else:
        nx = xyfile['sci',1].header['NAXIS1']
        ny = xyfile['sci',1].header['NAXIS2']
        ltv1 = xyfile['sci',1].header['ltv1']
        ltv2 = xyfile['sci',1].header['ltv2']

    grid = [nx,ny,1]
    print('grid of : ',nx,ny)
    xyfile.close()

    xarr,yarr = build_grid_arrays(nx,ny,1)
    xgarr = xarr.reshape(grid[1],grid[0])
    ygarr = yarr.reshape(grid[1],grid[0])

    # initialize plot here
    if has_matplotlib:
        pl.clf()
        pl.gray()

    for chip,det in zip(list(range(1,numchips+1)),ccdchip):

        xout,yout = transform_d2im_dgeo(scifile,chip,xarr,yarr)

        dgeochip = 1
        dgeo = pyfits.open(fulldgeofile)
        for e in dgeo:
            if 'ccdchip' not in e.header:
                continue
            else:
                if e.header['ccdchip'] == det:
                    dgeochip = e.header['extver']
                    break
        dgeo.close()

        print('Matching sci,', chip, ' with DX,', dgeochip)
        dx= (xout-xarr).reshape(grid[1],grid[0])
        fulldatax = pyfits.getdata(fulldgeofile,'DX',dgeochip)
        diffx=(dx-fulldatax[-ltv2:-ltv2+ny,-ltv1:-ltv1+nx]).astype(np.float32)

        if has_matplotlib:
            pl.imshow(diffx, vmin=vmin, vmax=vmax)
            pl.title('dx-full_x: %s %s(DX,%d) with %g +/- %g' %
                     (filter_names, detector, dgeochip, diffx.mean(),
                       diffx.std()))
            pl.colorbar()

            if sys.version_info[0] < 3:
                raw_input("Press 'ENTER' to close figure and plot DY...")
            else:
                input("Press 'ENTER' to close figure and plot DY...")

            pl.close()

        dy= (yout-yarr).reshape(grid[1],grid[0])
        fulldatay = pyfits.getdata(fulldgeofile,'DY',dgeochip)
        diffy=(dy-fulldatay[-ltv2:-ltv2+ny,-ltv1:-ltv1+nx]).astype(np.float32)

        if has_matplotlib:
            pl.imshow(diffy,vmin=vmin,vmax=vmax)
            pl.title('dy-full_y: %s %s(DY,%d) with %g +/- %g ' %
                     (filter_names, detector, dgeochip, diffy.mean(),
                      diffy.std()))
            pl.colorbar()

            if sys.version_info[0] < 3:
                raw_input("Press 'ENTER' to close figure and show next chip...")
            else:
                input("Press 'ENTER' to close figure and show next chip...")

            pl.close()

        if output:
            # parse out rootname from input file if user wants results written to file
            outroot = fileutil.buildNewRootname(scifile)
            #
            # setup DGEOFILE ref file as template for each chip's output results
            # we only need dx,1 and dy,1 since each chip will be written out
            # to a separate file and since we will use this template for
            # writing out 2 different results files
            #
            fhdulist = pyfits.open(fulldgeofile)
            hdulist = pyfits.HDUList()
            hdulist.append(fhdulist[0])
            hdulist.append(fhdulist['dx',1])
            hdulist.append(fhdulist['dy',1])
            fhdulist.close()

            outname = outroot+'_sci'+str(chip)+'_dgeo_diffxy.match'
            if os.path.exists(outname): os.remove(outname)
            dxgarr = xgarr+diffx
            dygarr = ygarr+diffy
            wtraxyutils.write_xy_file(outname,[xgarr[plot_offset::plot_samp,plot_offset::plot_samp].flatten(),
                                                ygarr[plot_offset::plot_samp,plot_offset::plot_samp].flatten(),
                                                dxgarr[plot_offset::plot_samp,plot_offset::plot_samp].flatten(),
                                                dygarr[plot_offset::plot_samp,plot_offset::plot_samp].flatten()],format="%20.8f",append=True)

            outname = outroot+'_sci'+str(chip)+'_newfull_dxy.fits'
            if os.path.exists(outname): os.remove(outname)

            hdulist['dx',1].data = dx
            hdulist['dy',1].data = dy
            hdulist.writeto(outname)

            outname = outroot+'_sci'+str(chip)+'_diff_dxy.fits'
            if os.path.exists(outname): os.remove(outname)
            hdulist['dx',1].data = diffx
            hdulist['dy',1].data = diffy
            hdulist.writeto(outname)
            print('Created output file with differences named: ',outname)

        del dx,dy,diffx,diffy

    if output:
        hdulist.close()
コード例 #50
0
ファイル: util.py プロジェクト: bsugerman/drizzlepac
def getDefaultConfigObj(taskname,configObj,input_dict={},loadOnly=True):
    """ Return default configObj instance for task updated
        with user-specified values from input_dict.

        Parameters
        ----------
        taskname : string
            Name of task to load into TEAL

        configObj : string
            The valid values for 'configObj' would be::

                None                      - loads last saved user .cfg file
                'defaults'                - loads task default .cfg file
                name of .cfg file (string)- loads user-specified .cfg file

        input_dict : dict
            Set of parameters and values specified by user to be different from
            what gets loaded in from the .cfg file for the task

        loadOnly : bool
            Setting 'loadOnly' to False causes the TEAL GUI to start allowing the
            user to edit the values further and then run the task if desired.

    """
    if configObj is None:
        # Start by grabbing the default values without using the GUI
        # This insures that all subsequent use of the configObj includes
        # all parameters and their last saved values
        configObj = teal.load(taskname)
    elif isinstance(configObj,str):
        if configObj.lower().strip() == 'defaults':
            # Load task default .cfg file with all default values
            configObj = teal.load(taskname,defaults=True)
            # define default filename for configObj
            configObj.filename = taskname.lower()+'.cfg'
        else:
            # Load user-specified .cfg file with its special default values
            # we need to call 'fileutil.osfn()' to insure all environment
            # variables specified by the user in the configObj filename are
            # expanded to the full path
            configObj = teal.load(fileutil.osfn(configObj))

    # merge in the user values for this run
    # this, though, does not save the results for use later
    if input_dict not in [None,{}]:# and configObj not in [None, {}]:
        # check to see whether any input parameters are unexpected.
        # Any unexpected parameters provided on input should be reported and
        # the code should stop
        validateUserPars(configObj,input_dict)

        # If everything looks good, merge user inputs with configObj and continue
        cfgpars.mergeConfigObj(configObj, input_dict)
        # Update the input .cfg file with the updated parameter values
        #configObj.filename = os.path.join(cfgpars.getAppDir(),os.path.basename(configObj.filename))
        #configObj.write()

    if not loadOnly:
    # We want to run the GUI AFTER merging in any parameters
    # specified by the user on the command-line and provided in
    # input_dict
        configObj = teal.teal(configObj,loadOnly=False)

    return configObj
コード例 #51
0
def run(configObj, wcsmap=None):
    """ Interface for running `wdrizzle` from TEAL or Python command-line.

    This code performs all file ``I/O`` to set up the use of the drizzle code for
    a single exposure to replicate the functionality of the original `wdrizzle`.
    """

    # Insure all output filenames specified have .fits extensions
    if configObj['outdata'][-5:] != '.fits': configObj['outdata'] += '.fits'
    if not util.is_blank(
            configObj['outweight']) and configObj['outweight'][-5:] != '.fits':
        configObj['outweight'] += '.fits'
    if not util.is_blank(configObj['outcontext']
                         ) and configObj['outcontext'][-5:] != '.fits':
        configObj['outcontext'] += '.fits'

    # Keep track of any files we need to open
    in_sci_handle = None
    in_wht_handle = None
    out_sci_handle = None
    out_wht_handle = None
    out_con_handle = None

    _wcskey = configObj['wcskey']
    if util.is_blank(_wcskey):
        _wcskey = ' '

    scale_pars = configObj['Data Scaling Parameters']
    user_wcs_pars = configObj['User WCS Parameters']

    # Open the SCI (and WHT?) image
    # read file to get science array
    insci = get_data(configObj['input'])
    expin = fileutil.getKeyword(configObj['input'], scale_pars['expkey'])
    in_sci_phdr = fits.getheader(fileutil.parseFilename(configObj['input'])[0],
                                 memmap=False)

    # we need to read in the input WCS
    input_wcs = stwcs.wcsutil.HSTWCS(configObj['input'], wcskey=_wcskey)

    if not util.is_blank(configObj['inweight']):
        inwht = get_data(configObj['inweight']).astype(np.float32)
    else:
        # Generate a default weight map of all good pixels
        inwht = np.ones(insci.shape, dtype=insci.dtype)

    output_exists = False
    outname = fileutil.osfn(fileutil.parseFilename(configObj['outdata'])[0])
    if os.path.exists(outname):
        output_exists = True
    # Output was specified as a filename, so open it in 'update' mode
    outsci = get_data(configObj['outdata'])

    if output_exists:
        # we also need to read in the output WCS from pre-existing output
        output_wcs = stwcs.wcsutil.HSTWCS(configObj['outdata'])

        out_sci_hdr = fits.getheader(outname, memmap=False)
        outexptime = out_sci_hdr['DRIZEXPT']
        if 'ndrizim' in out_sci_hdr:
            uniqid = out_sci_hdr['ndrizim'] + 1
        else:
            uniqid = 1

    else:  # otherwise, define the output WCS either from user pars or refimage
        if util.is_blank(configObj['User WCS Parameters']['refimage']):
            # Define a WCS based on user provided WCS values
            # NOTE:
            #   All parameters must be specified, not just one or a few
            if not util.is_blank(user_wcs_pars['outscale']):
                output_wcs = wcs_functions.build_hstwcs(
                    user_wcs_pars['raref'], user_wcs_pars['decref'],
                    user_wcs_pars['xrefpix'], user_wcs_pars['yrefpix'],
                    int(user_wcs_pars['outnx']), int(user_wcs_pars['outny']),
                    user_wcs_pars['outscale'], user_wcs_pars['orient'])
            else:
                # Define default WCS based on input image
                applydist = True
                if input_wcs.sip is None or input_wcs.instrument == 'DEFAULT':
                    applydist = False
                output_wcs = stwcs.distortion.utils.output_wcs(
                    [input_wcs], undistort=applydist)
        else:
            refimage = configObj['User WCS Parameters']['refimage']
            refroot, extroot = fileutil.parseFilename(refimage)
            if extroot is None:
                fimg = fits.open(refroot, memmap=False)
                for i, extn in enumerate(fimg):
                    if 'CRVAL1' in extn.header:  # Key on CRVAL1 for valid WCS
                        refwcs = stwcs.wcsutil.HSTWCS('{}[{}]'.format(
                            refroot, i))
                        if refwcs.wcs.has_cd():
                            extroot = i
                            break
                fimg.close()
                # try to find extension with valid WCS
                refimage = "{}[{}]".format(refroot, extroot)
            # Define the output WCS based on a user specified reference image WCS
            output_wcs = stwcs.wcsutil.HSTWCS(refimage)
        # Initialize values used for combining results
        outexptime = 0.0
        uniqid = 1

    # Set up the output data array and insure that the units for that array is 'cps'
    if outsci is None:
        # Define a default blank array based on definition of output_wcs
        outsci = np.empty(output_wcs.array_shape, dtype=np.float32)
        outsci.fill(np.nan)
    else:
        # Convert array to units of 'cps', if needed
        if outexptime != 0.0:
            np.divide(outsci, outexptime, outsci)
        outsci = outsci.astype(np.float32)

    # Now update output exposure time for additional input file
    outexptime += expin

    outwht = None
    if not util.is_blank(configObj['outweight']):
        outwht = get_data(configObj['outweight'])

    if outwht is None:
        outwht = np.zeros(output_wcs.array_shape, dtype=np.float32)
    else:
        outwht = outwht.astype(np.float32)

    outcon = None
    keep_con = False

    if not util.is_blank(configObj['outcontext']):
        outcon = get_data(configObj['outcontext'])
        keep_con = True
        if outcon is None:
            outcon = np.zeros((1, ) + output_wcs.array_shape, dtype=np.int32)
        else:
            outcon = outcon.astype(np.int32)
            planeid = int((uniqid - 1) / 32)

            # Add a new plane to the context image if planeid overflows
            while outcon.shape[0] <= planeid:
                plane = np.zeros_like(outcon[0])
                outcon = np.append(outcon, plane, axis=0)

    # Interpret wt_scl parameter
    if configObj['wt_scl'] == 'exptime':
        wt_scl = expin
    elif configObj['wt_scl'] == 'expsq':
        wt_scl = expin * expin
    else:
        wt_scl = float(configObj['wt_scl'])

    # Interpret coeffs parameter to determine whether to apply coeffs or not
    undistort = True
    if not configObj[
            'coeffs'] or input_wcs.sip is None or input_wcs.instrument == 'DEFAULT':
        undistort = False
    # turn off use of coefficients if undistort is False (coeffs == False)
    if not undistort:
        input_wcs.sip = None
        input_wcs.cpdis1 = None
        input_wcs.cpdis2 = None
        input_wcs.det2im = None

    wcslin = distortion.utils.output_wcs([input_wcs], undistort=undistort)

    # Perform actual drizzling now...
    _vers = do_driz(insci,
                    input_wcs,
                    inwht,
                    output_wcs,
                    outsci,
                    outwht,
                    outcon,
                    expin,
                    scale_pars['in_units'],
                    wt_scl,
                    wcslin_pscale=wcslin.pscale,
                    uniqid=uniqid,
                    pixfrac=configObj['pixfrac'],
                    kernel=configObj['kernel'],
                    fillval=scale_pars['fillval'],
                    stepsize=configObj['stepsize'],
                    wcsmap=None)

    out_sci_handle, outextn = create_output(configObj['outdata'])
    if not output_exists:
        # Also, define default header based on input image Primary header
        out_sci_handle[outextn].header = in_sci_phdr.copy()

    # Update header of output image with exptime used to scale the output data
    # if out_units is not counts, this will simply be a value of 1.0
    # the keyword 'exptime' will always contain the total exposure time
    # of all input image regardless of the output units
    out_sci_handle[outextn].header['EXPTIME'] = outexptime

    # create CTYPE strings
    ctype1 = input_wcs.wcs.ctype[0]
    ctype2 = input_wcs.wcs.ctype[1]
    if ctype1.find('-SIP'): ctype1 = ctype1.replace('-SIP', '')
    if ctype2.find('-SIP'): ctype2 = ctype2.replace('-SIP', '')

    # Update header with WCS keywords
    out_sci_handle[outextn].header['ORIENTAT'] = output_wcs.orientat
    out_sci_handle[outextn].header['CD1_1'] = output_wcs.wcs.cd[0][0]
    out_sci_handle[outextn].header['CD1_2'] = output_wcs.wcs.cd[0][1]
    out_sci_handle[outextn].header['CD2_1'] = output_wcs.wcs.cd[1][0]
    out_sci_handle[outextn].header['CD2_2'] = output_wcs.wcs.cd[1][1]
    out_sci_handle[outextn].header['CRVAL1'] = output_wcs.wcs.crval[0]
    out_sci_handle[outextn].header['CRVAL2'] = output_wcs.wcs.crval[1]
    out_sci_handle[outextn].header['CRPIX1'] = output_wcs.wcs.crpix[0]
    out_sci_handle[outextn].header['CRPIX2'] = output_wcs.wcs.crpix[1]
    out_sci_handle[outextn].header['CTYPE1'] = ctype1
    out_sci_handle[outextn].header['CTYPE2'] = ctype2
    out_sci_handle[outextn].header['VAFACTOR'] = 1.0

    if scale_pars['out_units'] == 'counts':
        np.multiply(outsci, outexptime, outsci)
        out_sci_handle[outextn].header['DRIZEXPT'] = outexptime

    else:
        out_sci_handle[outextn].header['DRIZEXPT'] = 1.0

    # Update header keyword NDRIZIM to keep track of how many images have
    # been combined in this product so far
    out_sci_handle[outextn].header['NDRIZIM'] = uniqid

    # define keywords to be written out to product header
    drizdict = outputimage.DRIZ_KEYWORDS.copy()

    # Update drizdict with current values
    drizdict['VER']['value'] = _vers[:44]
    drizdict['DATA']['value'] = configObj['input'][:64]
    drizdict['DEXP']['value'] = expin
    drizdict['OUDA']['value'] = configObj['outdata'][:64]
    drizdict['OUWE']['value'] = configObj['outweight'][:64]
    drizdict['OUCO']['value'] = configObj['outcontext'][:64]
    drizdict['MASK']['value'] = configObj['inweight'][:64]
    drizdict['WTSC']['value'] = wt_scl
    drizdict['KERN']['value'] = configObj['kernel']
    drizdict['PIXF']['value'] = configObj['pixfrac']
    drizdict['OUUN']['value'] = scale_pars['out_units']
    drizdict['FVAL']['value'] = scale_pars['fillval']
    drizdict['WKEY']['value'] = configObj['wcskey']
    outputimage.writeDrizKeywords(out_sci_handle[outextn].header, uniqid,
                                  drizdict)

    # add output array to output file
    out_sci_handle[outextn].data = outsci
    out_sci_handle.close()

    if not util.is_blank(configObj['outweight']):
        out_wht_handle, outwhtext = create_output(configObj['outweight'])
        out_wht_handle[outwhtext].header = out_sci_handle[outextn].header.copy(
        )
        out_wht_handle[outwhtext].data = outwht
        out_wht_handle.close()

    if keep_con:
        out_con_handle, outconext = create_output(configObj['outcontext'])
        out_con_handle[outconext].data = outcon
        out_con_handle.close()
コード例 #52
0
ファイル: exposure.py プロジェクト: spacetelescope/pydrizzle
    def __init__(self,expname, handle=None, dqname=None, idckey=None,
                    new=no,wcs=None,mask=None,pa_key=None, parity=None,
                    idcdir=None, rot=None, extver=1, exptime=None,
                    ref_pscale=1.0, binned=1, mt_wcs=None, group_indx = None):

        # This name should be formatted for use in image I/O
        self.name = fileutil.osfn(expname)

        # osfn() will expand '.' unnecessarily, potentially
        # creating a string-length problem for 'drizzle', which
        # is limited to strings of 80 chars.
        _path,_name = os.path.split(self.name)
        # if path for this filename is the same as the current dir,
        # then there is no need to pass along the path.
        if _path == os.getcwd(): self.name = _name


        # Keep track of any associated mask file created for
        # this exposure from its DQ file, or other mask file.
        _fname,_extn = fileutil.parseFilename(expname)
        _open = False

        # Make sure we have an open file handle to use for getting the
        # header and data arrays.
        if not handle and not new:
            handle = fileutil.openImage(expname)
            _open = True

        # If no extension was specified, try to interrogate the file
        # to find whether the SCI array is in the Primary
        # (as in Simple FITS) or first extension (as in MEF).
        if handle and _extn == None:
            if handle[0].data == None:
                # Primary extension specified and no data present.
                # Try looking for data in next extension.
                if len(handle) > 1 and handle[1].data != None:
                    _extn = 1
                    expname += '[1]'
                else:
                    raise IOError("No valid image data in %s.\n"%expname)
            else:
                _extn = 0

        self.dgeoname = None
        self.xgeoim = ""
        self.ygeoim = ""
        self.exptime = exptime
        self.group_indx = group_indx
        if not new:
            # Read in a copy of the header for this exposure/group/extension
            _header = fileutil.getHeader(expname,handle=handle)
            _chip = drutil.getChipId(_header)
            self.chip = str(_chip)
            # Keep track of any distortion correction images provided
            # for this chip
            self.dgeoname = fileutil.getKeyword(expname,'DGEOFILE',handle=handle)
            self.xgeoim,self.ygeoim = self.getDGEOExtn()
            if self.exptime == None:
                self.exptime = float(_header['EXPTIME'])
                if self.exptime == 0.: self.exptime = 1.0
            #
            # Extract photometric transformation keywords
            #    If they do not exist, use default values of 0 and 1
            #
            self.plam = float(fileutil.getKeyword(expname,'PHOTPLAM',handle=handle)) / 10.
            if self.plam == None:
                # Setup a default value in case this keyword does not exist
                self.plam = 555.
            self.photzpt = float(fileutil.getKeyword(expname,'PHOTZPT',handle=handle))
            if self.photzpt == None: self.photzpt = 0.0
            self.photflam = float(fileutil.getKeyword(expname,'PHOTFLAM',handle=handle))
            if self.photflam == None: self.photflam = 1.0

            # Read in date-obs from primary header
            if _header:
                if 'date-obs' in _header:
                    self.dateobs = _header['date-obs']
                elif 'date_obs' in _header:
                    self.dateobs = _header['date_obs']
                else:
                    self.dateobs = None
            else:
                self.dateobs = None

            # Initialize the value of BUNIT based on the header information, if
            # the header has the keyword
            if 'BUNIT' in _header and _header['BUNIT'].find('ergs') < 0:
                self.bunit = _header['BUNIT']
            else:
                self.bunit = 'ELECTRONS'

        else:
            _chip = 1
            _header = None
            self.chip = str(_chip)
            # Set a default value for pivot wavelength
            self.plam = 555.
            self.photzpt = 0.0
            self.photflam = 1.0
            self.dateobs = None
            if self.exptime == None:
                self.exptime = 1.

        self.parity = parity
        self.header = _header
        self.extver = extver

        # Create a pointer to the mask file's data array
        # and the name of the original input DQ file
        self.maskname = None
        self.singlemaskname = None
        self.masklist = None
        if mask != None:
            # Specifies filenames to be used if created.
            self.maskname = mask[0]
            self.singlemaskname = mask[1]
            self.masklist = mask[2]

        self.dqname = dqname

        # Remember the name of the coeffs file generated for this chip
        self.coeffs = self.buildCoeffsName()

        # Read the name of idcfile from image header if not explicitly
        # provided by user.
        if idckey != None and idckey.lower() != 'wcs':
            _indx = expname.find('[')
            if  _indx > -1:
                _idc_fname = expname[:_indx]+'[0]'
            else: _idc_fname = expname+'[0]'

            idcfile, idctype = drutil.getIDCFile(self.header,keyword=idckey,
                                        directory=idcdir)
        else:
            idcfile = None
            idctype = None

        if (idckey != None)  and (idckey.lower() == 'header'):
            idckey = idctype

        # Get distortion model and WCS info.
        self.geometry = ObsGeometry(expname, idcfile, idckey=idckey,
                chip=_chip, new=new, header=self.header,
                pa_key=pa_key, rot=rot, date=self.dateobs,
                ref_pscale=ref_pscale, binned=binned, mt_wcs=mt_wcs)

        # Remember the name and type of the IDC file used...
        self.idcfile = idcfile
        self.idctype = idctype

        # Remember the names of the filters used for the exposure
        self.filters = self.geometry.filter1+','+self.geometry.filter2

        # Define shape here...
        # nx,ny,pixel scale
        #
        if wcs != None:
            # We have been passed a WCS to use
            self.geometry.wcs = wcs
            self.geometry.model.pscale = wcs.pscale
            if expname != None:
                self.geometry.wcs.rootname = expname

        self.naxis1 = self.geometry.wcs.naxis1
        self.naxis2 = self.geometry.wcs.naxis2
        self.pscale = self.geometry.wcs.pscale
        self.shape = (self.naxis1,self.naxis2,self.pscale)

        # Keep track of the positions of the corners of the exposure
        # both for the RAW image and the
        # distortion-corrected, unscaled, unrotated image
        self.corners = {'raw':np.zeros((4,2),dtype=np.float64),'corrected':np.zeros((4,2),dtype=np.float64)}
        self.setCorners()

        # Generate BLOT output name specific to this Exposure
        _blot_extn = '_sci'+repr(extver)+'_blt.fits'
        self.outblot = fileutil.buildNewRootname(self.name,extn=_blot_extn)

        # Keep track of undistorted frame's position relative to metachip
        # Zero-point offset for chip relative to meta-chip product
        # These values get computed using 'setSingleOffsets' from 'writeCoeffs'
        # to insure that the final XDELTA/YDELTA values have been computed.
        self.product_wcs = self.geometry.wcslin
        self.xzero = 0.
        self.yzero = 0.
        self.chip_shape = (0.,0.)
        self.xsh2 = 0.
        self.ysh2 = 0.

        if _open:
            handle.close()
            del handle
コード例 #53
0
ファイル: makesmall.py プロジェクト: jhunkeler/reftools
def dxy(dgeofile, filter=None, colcorr=None, corrext='DX', minsize=32,
        debug=False):
    """Build subsampled CDBS _dxy files from full-frame (Jay's)
    DXM and DYM images.

    If there is a column/row correction provided, it will be removed
    before resampling the full DXM,DYM images. In that case, the
    'corrext' parameter specifies the extension from the full-sized
    image that needs to have this column/row correction removed.

    """
    from stsci.tools import fileutil

    wheel1 = ['F475W', 'F555W', 'F606W', 'F850LP', 'F625W',
              'F658N', 'F775W', 'F502N', 'F550M']
    wheel2 = ['F435W', 'F814W', 'F660N']

    odgeofile = dgeofile
    # Interpret input filename, expanding any variables used for path to file
    dgeofile = fileutil.osfn(dgeofile)
    dgeoroot = os.path.split(dgeofile)[1]
    if not os.path.exists(dgeofile):
        print(' Input: ',dgeofile)
        raise InputError("No valid input found! ")

    # If column/row correction image is specified, open it for removal from
    # DGEOFILE...
    if colcorr is not None:
        if not os.path.exists(colcorr):
            raise InputError('No valid column/row correction image found!')

        # User specified a column/row correction image to be subtracted from
        # full DGEOFILE before resampling
        corrimg = pyfits.open(colcorr,'readonly')
    else:
        print("======")
        print('No column/row correction removed from DGEOFILE before resampling.')
        print("======")

    # Open file for processing
    #dxyfile = pyfits.open('qbu16424j_dxy.fits','readonly')         # Model _DXY file
    dxyfile = pyfits.open(dgeofile,'readonly')         # Model _DXY file
    detector = dxyfile[0].header['detector']
    print('DETECTOR', detector)
    print('')

    # Read in filter name from DGEOFILE directly, if not specified by user
    filter1 = None
    filter2 = None
    if filter is None:
        if 'FILTER1' in dxyfile[0].header:
            filter1 = dxyfile[0].header['FILTER1']
            filter2 = dxyfile[0].header['FILTER2']
            if filter2 == 'N/A': filter2 = 'CLEAR2S'
            if filter2.find('CLEAR') > -1: filter = filter1
            else: filter = filter2
        else:
            # Error case, filter name not provided in file or by user
            dxyfile.close()
            print('ERROR: Filter name not found in DGEOFILE! Please specify...')
            raise InputError

    filter = filter.upper()     # On input, case can be upper or lower
    print('Filter', filter)

    # Get the shape of each chip from the first DGEOFILE array
    dxy_shape = dxyfile[1].data.shape

    # compute step size needed to create DXY file no smaller than 32x32
    stepsize = min(dxy_shape[1]/minsize, dxy_shape[0]/minsize)
    # create the grid in such a way that it contains the idices of the
    # first and last element of the full size array (0-4096) inclusive
    grid=[dxy_shape[1],dxy_shape[0],stepsize,stepsize]
    xpts = np.array(list(range(0,grid[0]+1,grid[2])),np.float32)
    ypts = np.array(list(range(0,grid[1]+1,grid[3])),np.float32)
    xygrid = np.meshgrid(xpts,ypts)
    # this padding is necessary so that the end points in the small npol file
    # match the end point of the large dgeo file.
    if xygrid[0][:,-1][-1] >= dxy_shape[1]: xygrid[0][:,-1] = [dxy_shape[1]-1]*len(xygrid[0][:,-1])
    if xygrid[1][-1][-1] >= dxy_shape[0]: xygrid[1][-1] = [dxy_shape[0]-1]*len(xygrid[1][-1])

    # count the number of chips in DGEOFILE
    numchips = 0
    extname = dxyfile[1].header['EXTNAME']

    for extn in dxyfile:
        if 'extname' in extn.header and extn.header['extname'] == extname:
            numchips += 1

    # process each chip
    for chip in range(1,numchips+1):
        # Process DX and DY for each chip
        for xy in ['DX','DY']:
            print('Processing chip from extension: ',xy,',',chip)
            onaxis1 = dxyfile[xy,chip].header['NAXIS1']
            onaxis2 = dxyfile[xy,chip].header['NAXIS2']
            if 'CCDCHIP' not in dxyfile[xy,chip].header:
                ccdchip = 1
            else:
                ccdchip = dxyfile[xy,chip].header['CCDCHIP']
            cdelt1 = grid[2]
            cdelt2 = grid[3]

            dxy = dxyfile[xy,chip].data.copy()
            if colcorr is not None and xy == corrext:
                # Remove column/row correction from this extension
                xycorr = corrimg[0].data
            else:
                xycorr = None

            # define default chipname for debug results; None indicates no debugging
            cname = None
            if debug:
                cname = 'full_68corr_'+xy+str(chip)+'_dxy.fits'

            # CDELT1/2 are the stepsize used to create the npl files.
            # It's best to write cdelt in the headers of the npl files instead of try
            # to calculate cdelt from naxis and onaxis. Keep onaxis for reference.
            dxyfile[xy,chip].data = resample_chip(dxy,xygrid,corrxy=xycorr,chipname=cname)
            dxyfile[xy,chip].header.update('ONAXIS1', onaxis1, "NAXIS1 of full size dgeo file")
            dxyfile[xy,chip].header.update('ONAXIS2', onaxis2, "NAXIS2 of full size dgeo file")
            dxyfile[xy,chip].header.update('CCDCHIP', ccdchip, "CCDCHIP from full size dgeo file")
            dxyfile[xy,chip].header.update('CDELT1',  cdelt1,  "Coordinate increment along axis")
            dxyfile[xy,chip].header.update('CDELT2',  cdelt2,  "Coordinate increment along axis")
    # Get filter info ready for use in updating output image header

    if filter1 is None:
        if filter in wheel1:
            filter1 = filter
            filter2 = 'CLEAR2L'

        if filter in wheel2:
            filter2 = filter
            filter1 = 'CLEAR1L'

    print(filter1, filter2)

    # Update keywords
    #newname = detector.lower()+'_'+str(stepsize)+'_' + filter.lower() + '_npl.fits'
    newname = dgeoroot[:dgeoroot.find('_dxy.fits')] + '_npl.fits'
    if os.path.exists(newname): os.remove(newname)
    dxyfile[0].header['filename'] = newname
    dxyfile[0].header['filter1'] = filter1
    dxyfile[0].header['filter2'] = filter2
    dxyfile[0].header.update('pedigree','INFLIGHT 01/03/2002 01/10/2005')
    dxyfile[0].header.update('date',fileutil.getDate())
    dxyfile[0].header.add_history('File generated from DGEOFILE: %s'%odgeofile,after='pedigree')
    dxyfile.writeto(newname)

    # close open file handles
    if colcorr is not None:
        corrimg.close()
    dxyfile.close()

    # finish by cleaning up output image header by removing unnecessary keywords
    print('Cleaning up header')
    fixheader(filter,newname, odgeofile)
    print('Finished creating new file: ',newname)
コード例 #54
0
def apply_d2im_correction(fname, d2imcorr):
    """
    Logic to decide whether to apply the D2IM correction.

    Parameters
    ----------
    fname : `~astropy.io.fits.HDUList` or str
        Input FITS science file object.
    d2imcorr : bool
        Flag indicating if D2IM is should be enabled if allowed.

    Return
    ------
    applyD2IMCorr : bool
        Flag whether to apply the correction.

    The D2IM correction is applied to a science file if it is in the
    allowed corrections for the instrument. The name of the file
    with the correction is saved in the ``D2IMFILE`` keyword in the
    primary header. When the correction is applied the name of the
    file is saved in the ``D2IMEXT`` keyword in the 1st extension header.

    """
    fname, toclose = _toclose(fname)

    applyD2IMCorr = True
    if not d2imcorr:
        logger.info("D2IM correction not requested - not applying it.")
        return False
    # get D2IMFILE kw from primary header
    try:
        fd2im0 = fname[0].header['D2IMFILE']
    except KeyError:
        logger.info(
            "D2IMFILE keyword is missing - D2IM correction will not be applied."
        )
        return False
    if fd2im0 == 'N/A':
        utils.remove_distortion(fname, "D2IMFILE")
        return False
    fd2im0 = fileutil.osfn(fd2im0)
    if not fileutil.findFile(fd2im0):
        message = "D2IMFILE {0} not found.".format(fd2im0)
        logger.critical(message)
        raise IOError(message)
    try:
        # get D2IMEXT kw from first extension header
        fd2imext = fname[1].header['D2IMEXT']

    except KeyError:
        # the case of D2IMFILE kw present in primary header but D2IMEXT missing
        # in first extension header
        return True
    fd2imext = fileutil.osfn(fd2imext)
    if fd2imext and fileutil.findFile(fd2imext):
        if fd2im0 != fd2imext:
            applyD2IMCorr = True
        else:
            applyD2IMCorr = False
    else:
        # D2IM file defined in first extension may not be found
        # but if a valid kw exists in the primary header,
        # detector to image correction should be applied.
        applyD2IMCorr = True
    if toclose:
        fname.close()
    return applyD2IMCorr
コード例 #55
0
ファイル: util.py プロジェクト: gbrammer/drizzlepac
def getDefaultConfigObj(taskname, configObj, input_dict={}, loadOnly=True):
    """ Return default configObj instance for task updated
        with user-specified values from input_dict.

        Parameters
        ----------
        taskname : string
            Name of task to load into TEAL

        configObj : string
            The valid values for 'configObj' would be::

                None                      - loads last saved user .cfg file
                'defaults'                - loads task default .cfg file
                name of .cfg file (string)- loads user-specified .cfg file

        input_dict : dict
            Set of parameters and values specified by user to be different from
            what gets loaded in from the .cfg file for the task

        loadOnly : bool
            Setting 'loadOnly' to False causes the TEAL GUI to start allowing the
            user to edit the values further and then run the task if desired.

    """
    if configObj is None:
        # Start by grabbing the default values without using the GUI
        # This insures that all subsequent use of the configObj includes
        # all parameters and their last saved values
        configObj = teal.load(taskname)
    elif isinstance(configObj, str):
        if configObj.lower().strip() == 'defaults':
            # Load task default .cfg file with all default values
            configObj = teal.load(taskname, defaults=True)
            # define default filename for configObj
            configObj.filename = taskname.lower() + '.cfg'
        else:
            # Load user-specified .cfg file with its special default values
            # we need to call 'fileutil.osfn()' to insure all environment
            # variables specified by the user in the configObj filename are
            # expanded to the full path
            configObj = teal.load(fileutil.osfn(configObj))

    # merge in the user values for this run
    # this, though, does not save the results for use later
    if input_dict not in [None, {}]:  # and configObj not in [None, {}]:
        # check to see whether any input parameters are unexpected.
        # Any unexpected parameters provided on input should be reported and
        # the code should stop
        validateUserPars(configObj, input_dict)

        # If everything looks good, merge user inputs with configObj and continue
        cfgpars.mergeConfigObj(configObj, input_dict)
        # Update the input .cfg file with the updated parameter values
        #configObj.filename = os.path.join(cfgpars.getAppDir(),os.path.basename(configObj.filename))
        #configObj.write()

    if not loadOnly:
        # We want to run the GUI AFTER merging in any parameters
        # specified by the user on the command-line and provided in
        # input_dict
        configObj = teal.teal(configObj, loadOnly=False)

    return configObj
コード例 #56
0
def apply_d2im_correction(fname, d2imcorr):
    """
    Logic to decide whether to apply the D2IM correction.

    Parameters
    ----------
    fname : `~astropy.io.fits.HDUList` or str
        Input FITS science file object.
    d2imcorr : bool
        Flag indicating if D2IM is should be enabled if allowed.

    Return
    ------
    applyD2IMCorr : bool
        Flag whether to apply the correction.

    The D2IM correction is applied to a science file if it is in the
    allowed corrections for the instrument. The name of the file
    with the correction is saved in the ``D2IMFILE`` keyword in the
    primary header. When the correction is applied the name of the
    file is saved in the ``D2IMEXT`` keyword in the 1st extension header.

    """
    fname, toclose = _toclose(fname)

    applyD2IMCorr = True
    if not d2imcorr:
        logger.info("D2IM correction not requested - not applying it.")
        return False
    # get D2IMFILE kw from primary header
    try:
        fd2im0 = fname[0].header['D2IMFILE']
    except KeyError:
        logger.info("D2IMFILE keyword is missing - D2IM correction will not be applied.")
        return False
    if fd2im0 == 'N/A':
        utils.remove_distortion(fname, "D2IMFILE")
        return False
    fd2im0 = fileutil.osfn(fd2im0)
    if not fileutil.findFile(fd2im0):
        message = "D2IMFILE {0} not found.".format(fd2im0)
        logger.critical(message)
        raise IOError(message)
    try:
        # get D2IMEXT kw from first extension header
        fd2imext = fname[1].header['D2IMEXT']

    except KeyError:
        # the case of D2IMFILE kw present in primary header but D2IMEXT missing
        # in first extension header
        return True
    fd2imext = fileutil.osfn(fd2imext)
    if fd2imext and fileutil.findFile(fd2imext):
        if fd2im0 != fd2imext:
            applyD2IMCorr = True
        else:
            applyD2IMCorr = False
    else:
        # D2IM file defined in first extension may not be found
        # but if a valid kw exists in the primary header,
        # detector to image correction should be applied.
        applyD2IMCorr = True
    if toclose:
        fname.close()
    return applyD2IMCorr
コード例 #57
0
def update(input,refdir="jref$",local=None,interactive=False,wcsupdate=True):
    """
    Updates headers of files given as input to point to the new reference files
    NPOLFILE and D2IMFILE required with the new C version of MultiDrizzle.

    Parameters
    -----------
    input : string or list
                Name of input file or files acceptable forms:
                  - single filename with or without directory
                  - @-file
                  - association table
                  - python list of filenames
                  - wildcard specification of filenames

    refdir : string
                Path to directory containing new reference files, either
                environment variable or full path.

    local : boolean
                Specifies whether or not to copy new reference files to local
                directory for use with the input files.

    interactive : boolean
                Specifies whether or not to interactively ask the user for the
                exact names of the new reference files instead of automatically
                searching a directory for them.

    updatewcs : boolean
                Specifies whether or not to update the WCS information in this
                file to use the new reference files.

    Examples
    --------
    1. A set of associated images specified by an ASN file can be updated to use
       the NPOLFILEs and D2IMFILE found in the local directory defined using
       the `myjref$` environment variable under PyRAF using::

            >>>import updatenpol
            >>>updatenpol.update('j8bt06010_asn.fits', 'myref$')

    2. Another use under Python would be to feed it a specific list of files
       to be updated using::

          >>> updatenpol.update(['file1_flt.fits','file2_flt.fits'],'myjref$')

    3. Files in another directory can also be processed using::

          >>> updatenpol.update('data$*flt.fits','../new/ref/')

    Notes
    -----
    .. warning::
        This program requires access to the `jref$` directory in order
        to evaluate the DGEOFILE specified in the input image header.
        This evaluation allows the program to get the information it
        needs to identify the correct NPOLFILE.

    The use of this program now requires that a directory be set up with
    all the new NPOLFILE and D2IMFILE reference files for ACS (a single
    directory for all files for all ACS detectors will be fine, much like
    jref).  Currently, all the files generated by the ACS team has initially
    been made available at::

        /grp/hst/acs/lucas/new-npl/

    The one known limitation to how this program works comes from
    confusion if more than 1 file could possibly be used as the new
    reference file. This would only happen when NPOLFILE reference files
    have been checked into CDBS multiple times, and there are several
    versions that apply to the same detector/filter combination.  However,
    that can be sorted out later if we get into that situation at all.

    """
    print('UPDATENPOL Version',__version__+'('+__vdate__+')')
    # expand (as needed) the list of input files
    files,fcol = parseinput.parseinput(input)

    if not interactive:
        # expand reference directory name (if necessary) to
        # interpret IRAF or environment variable names
        rdir = fu.osfn(refdir)
        ngeofiles,ngcol = parseinput.parseinput(os.path.join(rdir,'*npl.fits'))
        # Find D2IMFILE in refdir for updating input file header as well
        d2ifiles,d2col = parseinput.parseinput(os.path.join(rdir,"*d2i.fits"))

    # Now, build a matched list of input files and DGEOFILE reference files
    # to use for selecting the appropriate new reference file from the
    # refdir directory.
    for f in files:
        print('Updating: ',f)
        fdir = os.path.split(f)[0]
        # Open each file...
        fimg = fits.open(f, mode='update')
        phdr = fimg['PRIMARY'].header
        fdet = phdr['detector']
        # get header of DGEOFILE
        dfile = phdr.get('DGEOFILE','')
        if dfile in ['N/A','',' ',None]:
            npolname = ''
        else:
            dhdr = fits.getheader(fu.osfn(dfile))
            if not interactive:
                # search all new NPOLFILEs for one that matches current DGEOFILE config
                npol = find_npolfile(ngeofiles,fdet,[phdr['filter1'],phdr['filter2']])
            else:
                if sys.version_info[0] >= 3:
                    npol = input("Enter name of NPOLFILE for %s:"%f)
                else:
                    npol = raw_input("Enter name of NPOLFILE for %s:"%f)
                if npol == "": npol = None

            if npol is None:
                errstr =  "No valid NPOLFILE found in "+rdir+" for detector="+fdet+"\n"
                errstr += " filters = "+phdr['filter1']+","+phdr['filter2']
                raise ValueError(errstr)

            npolname = os.path.split(npol)[1]
            if local:
                npolname = os.path.join(fdir,npolname)
                # clobber any previous copies of this reference file
                if os.path.exists(npolname): os.remove(npolname)
                shutil.copy(npol,npolname)
            else:
                if '$' in refdir:
                    npolname = refdir+npolname
                else:
                    npolname = os.path.join(refdir,npolname)
        phdr.set('NPOLFILE', value=npolname,
                 comment="Non-polynomial corrections in Paper IV LUT",
                 after='DGEOFILE')

        # Now find correct D2IFILE
        if not interactive:
            d2i = find_d2ifile(d2ifiles,fdet)
        else:
            if sys.version_info[0] >= 3:
                d2i = input("Enter name of D2IMFILE for %s:"%f)
            else:
                d2i = raw_input("Enter name of D2IMFILE for %s:"%f)
            if d2i == "": d2i = None

        if d2i is None:
            print('=============\nWARNING:')
            print("    No valid D2IMFILE found in "+rdir+" for detector ="+fdet)
            print("    D2IMFILE correction will not be applied.")
            print('=============\n')
            d2iname = ""
        else:
            d2iname = os.path.split(d2i)[1]
            if local:
                # Copy D2IMFILE to local data directory alongside input file as well
                d2iname = os.path.join(fdir,d2iname)
                # clobber any previous copies of this reference file
                if os.path.exists(d2iname): os.remove(d2iname)
                shutil.copy(d2i,d2iname)
            else:
                if '$' in refdir:
                    d2iname = refdir+d2iname
                else:
                    d2iname = os.path.join(refdir,d2iname)

        phdr.set('D2IMFILE', value=d2iname,
                 comment="Column correction table",
                 after='DGEOFILE')

        # Close this input file header and go on to the next
        fimg.close()

        if wcsupdate:
            updatewcs.updatewcs(f)
コード例 #58
0
ファイル: adrizzle.py プロジェクト: brechmos-stsci/drizzlepac
def run(configObj, wcsmap=None):
    """ Interface for running `wdrizzle` from TEAL or Python command-line.

    This code performs all file ``I/O`` to set up the use of the drizzle code for
    a single exposure to replicate the functionality of the original `wdrizzle`.
    """

    # Insure all output filenames specified have .fits extensions
    if configObj['outdata'][-5:] != '.fits': configObj['outdata'] += '.fits'
    if not util.is_blank(configObj['outweight']) and configObj['outweight'][-5:] != '.fits': configObj['outweight'] += '.fits'
    if not util.is_blank(configObj['outcontext']) and configObj['outcontext'][-5:] != '.fits': configObj['outcontext'] += '.fits'

    # Keep track of any files we need to open
    in_sci_handle = None
    in_wht_handle = None
    out_sci_handle = None
    out_wht_handle = None
    out_con_handle = None

    _wcskey = configObj['wcskey']
    if util.is_blank(_wcskey):
        _wcskey = ' '

    scale_pars = configObj['Data Scaling Parameters']
    user_wcs_pars = configObj['User WCS Parameters']

    # Open the SCI (and WHT?) image
    # read file to get science array
    insci = get_data(configObj['input'])
    expin = fileutil.getKeyword(configObj['input'],scale_pars['expkey'])
    in_sci_phdr = fits.getheader(fileutil.parseFilename(configObj['input'])[0])

    # we need to read in the input WCS
    input_wcs = stwcs.wcsutil.HSTWCS(configObj['input'],wcskey=_wcskey)

    if not util.is_blank(configObj['inweight']):
        inwht = get_data(configObj['inweight']).astype(np.float32)
    else:
        # Generate a default weight map of all good pixels
        inwht = np.ones(insci.shape,dtype=insci.dtype)

    output_exists = False
    outname = fileutil.osfn(fileutil.parseFilename(configObj['outdata'])[0])
    if os.path.exists(outname):
        output_exists = True
    # Output was specified as a filename, so open it in 'update' mode
    outsci = get_data(configObj['outdata'])

    if output_exists:
        # we also need to read in the output WCS from pre-existing output
        output_wcs = stwcs.wcsutil.HSTWCS(configObj['outdata'])

        out_sci_hdr = fits.getheader(outname)
        outexptime = out_sci_hdr['DRIZEXPT']
        if 'ndrizim' in out_sci_hdr:
            uniqid = out_sci_hdr['ndrizim']+1
        else:
            uniqid = 1

    else:  # otherwise, define the output WCS either from user pars or refimage
        if util.is_blank(configObj['User WCS Parameters']['refimage']):
            # Define a WCS based on user provided WCS values
            # NOTE:
            #   All parameters must be specified, not just one or a few
            if not util.is_blank(user_wcs_pars['outscale']):
                output_wcs = wcs_functions.build_hstwcs(
                    user_wcs_pars['raref'], user_wcs_pars['decref'],
                    user_wcs_pars['xrefpix'], user_wcs_pars['yrefpix'],
                    user_wcs_pars['outnx'], user_wcs_pars['outny'],
                    user_wcs_pars['outscale'], user_wcs_pars['orient'] )
            else:
                # Define default WCS based on input image
                applydist = True
                if input_wcs.sip is None or input_wcs.instrument=='DEFAULT':
                    applydist = False
                output_wcs = stwcs.distortion.utils.output_wcs([input_wcs],undistort=applydist)
        else:
            refimage = configObj['User WCS Parameters']['refimage']
            refroot,extroot = fileutil.parseFilename(refimage)
            if extroot is None:
                fimg = fits.open(refroot)
                for i,extn in enumerate(fimg):
                    if 'CRVAL1' in extn.header: # Key on CRVAL1 for valid WCS
                        refwcs = wcsutil.HSTWCS('{}[{}]'.format(refroot,i))
                        if refwcs.wcs.has_cd():
                            extroot = i
                            break
                fimg.close()
                # try to find extension with valid WCS
                refimage = "{}[{}]".format(refroot,extroot)
            # Define the output WCS based on a user specified reference image WCS
            output_wcs = stwcs.wcsutil.HSTWCS(refimage)
        # Initialize values used for combining results
        outexptime = 0.0
        uniqid = 1

    # Set up the output data array and insure that the units for that array is 'cps'
    if outsci is None:
        # Define a default blank array based on definition of output_wcs
        #outsci = np.zeros((output_wcs._naxis2,output_wcs._naxis1),dtype=np.float32)
        outsci = np.empty((output_wcs._naxis2,output_wcs._naxis1),dtype=np.float32)
        outsci.fill(np.nan)
    else:
        # Convert array to units of 'cps', if needed
        if outexptime != 0.0:
            np.divide(outsci, outexptime, outsci)
        outsci = outsci.astype(np.float32)

    # Now update output exposure time for additional input file
    outexptime += expin

    outwht = None
    if not util.is_blank(configObj['outweight']):
        outwht = get_data(configObj['outweight'])

    if outwht is None:
        outwht = np.zeros((output_wcs._naxis2,output_wcs._naxis1),dtype=np.float32)
    else:
        outwht = outwht.astype(np.float32)

    outcon = None
    keep_con = False

    if not util.is_blank(configObj['outcontext']):
        outcon = get_data(configObj['outcontext'])
        keep_con = True
        if outcon is None:
            outcon = np.zeros((1,output_wcs._naxis2,output_wcs._naxis1),dtype=np.int32)
        else:
            outcon = outcon.astype(np.int32)
            planeid = int((uniqid - 1)/ 32)

            # Add a new plane to the context image if planeid overflows
            while outcon.shape[0] <= planeid:
                plane = np.zeros_like(outcon[0])
                outcon = np.append(outcon, plane, axis=0)

    # Interpret wt_scl parameter
    if configObj['wt_scl'] == 'exptime':
        wt_scl = expin
    elif configObj['wt_scl'] == 'expsq':
        wt_scl = expin*expin
    else:
        wt_scl = float(configObj['wt_scl'])

    # Interpret coeffs parameter to determine whether to apply coeffs or not
    undistort = True
    if not configObj['coeffs'] or input_wcs.sip is None or input_wcs.instrument == 'DEFAULT':
        undistort = False
    # turn off use of coefficients if undistort is False (coeffs == False)
    if not undistort:
        input_wcs.sip = None
        input_wcs.cpdis1 = None
        input_wcs.cpdis2 = None
        input_wcs.det2im = None

    wcslin = distortion.utils.output_wcs([input_wcs],undistort=undistort)

    # Perform actual drizzling now...
    _vers = do_driz(insci, input_wcs, inwht,
            output_wcs, outsci, outwht, outcon,
            expin, scale_pars['in_units'],
            wt_scl, wcslin_pscale=wcslin.pscale ,uniqid=uniqid,
            pixfrac=configObj['pixfrac'], kernel=configObj['kernel'],
            fillval=scale_pars['fillval'], stepsize=configObj['stepsize'],
            wcsmap=None)

    out_sci_handle,outextn = create_output(configObj['outdata'])
    if not output_exists:
        # Also, define default header based on input image Primary header
        out_sci_handle[outextn].header = in_sci_phdr.copy()

    # Update header of output image with exptime used to scale the output data
    # if out_units is not counts, this will simply be a value of 1.0
    # the keyword 'exptime' will always contain the total exposure time
    # of all input image regardless of the output units
    out_sci_handle[outextn].header['EXPTIME'] = outexptime

    # create CTYPE strings
    ctype1 = input_wcs.wcs.ctype[0]
    ctype2 = input_wcs.wcs.ctype[1]
    if ctype1.find('-SIP'): ctype1 = ctype1.replace('-SIP','')
    if ctype2.find('-SIP'): ctype2 = ctype2.replace('-SIP','')

    # Update header with WCS keywords
    out_sci_handle[outextn].header['ORIENTAT'] = output_wcs.orientat
    out_sci_handle[outextn].header['CD1_1'] = output_wcs.wcs.cd[0][0]
    out_sci_handle[outextn].header['CD1_2'] = output_wcs.wcs.cd[0][1]
    out_sci_handle[outextn].header['CD2_1'] = output_wcs.wcs.cd[1][0]
    out_sci_handle[outextn].header['CD2_2'] = output_wcs.wcs.cd[1][1]
    out_sci_handle[outextn].header['CRVAL1'] = output_wcs.wcs.crval[0]
    out_sci_handle[outextn].header['CRVAL2'] = output_wcs.wcs.crval[1]
    out_sci_handle[outextn].header['CRPIX1'] = output_wcs.wcs.crpix[0]
    out_sci_handle[outextn].header['CRPIX2'] = output_wcs.wcs.crpix[1]
    out_sci_handle[outextn].header['CTYPE1'] = ctype1
    out_sci_handle[outextn].header['CTYPE2'] = ctype2
    out_sci_handle[outextn].header['VAFACTOR'] = 1.0


    if scale_pars['out_units'] == 'counts':
        np.multiply(outsci, outexptime, outsci)
        out_sci_handle[outextn].header['DRIZEXPT'] = outexptime

    else:
        out_sci_handle[outextn].header['DRIZEXPT'] = 1.0

    # Update header keyword NDRIZIM to keep track of how many images have
    # been combined in this product so far
    out_sci_handle[outextn].header['NDRIZIM'] = uniqid

    #define keywords to be written out to product header
    drizdict = outputimage.DRIZ_KEYWORDS.copy()

    # Update drizdict with current values
    drizdict['VER']['value'] = _vers[:44]
    drizdict['DATA']['value'] = configObj['input'][:64]
    drizdict['DEXP']['value'] = expin
    drizdict['OUDA']['value'] = configObj['outdata'][:64]
    drizdict['OUWE']['value'] = configObj['outweight'][:64]
    drizdict['OUCO']['value'] = configObj['outcontext'][:64]
    drizdict['MASK']['value'] = configObj['inweight'][:64]
    drizdict['WTSC']['value'] = wt_scl
    drizdict['KERN']['value'] = configObj['kernel']
    drizdict['PIXF']['value'] = configObj['pixfrac']
    drizdict['OUUN']['value'] = scale_pars['out_units']
    drizdict['FVAL']['value'] = scale_pars['fillval']
    drizdict['WKEY']['value'] = configObj['wcskey']
    outputimage.writeDrizKeywords(out_sci_handle[outextn].header,uniqid,drizdict)

    # add output array to output file
    out_sci_handle[outextn].data = outsci
    out_sci_handle.close()

    if not util.is_blank(configObj['outweight']):
        out_wht_handle,outwhtext = create_output(configObj['outweight'])
        out_wht_handle[outwhtext].header = out_sci_handle[outextn].header.copy()
        out_wht_handle[outwhtext].data = outwht
        out_wht_handle.close()

    if keep_con:
        out_con_handle,outconext = create_output(configObj['outcontext'])
        out_con_handle[outconext].data = outcon
        out_con_handle.close()