コード例 #1
0
ファイル: catalogs.py プロジェクト: r3fang/subpixal
    def set_image(self, image):
        """
        Set image to be used for source finding.

        Parameters
        ----------
        image: numpy.ndarray, str
            When setting an image either a `numpy.ndarray` of image data or
            a string file name is acceptable. Image file name may be followed
            by an extension specification such as ``'file1.fits[1]'`` or
            ``'file1.fits[(sci,1)]'`` (by default, the first image-like
            extension will be used).

        """
        self._image = image
        self._image_ext = None

        if isinstance(image, str):
            files = parseat.parse_cs_line(image,
                                          default_ext='*',
                                          clobber=False,
                                          fnamesOnly=False,
                                          doNotOpenDQ=True,
                                          im_fmode='readonly',
                                          dq_fmode='readonly',
                                          msk_fmode='readonly',
                                          logfile=None,
                                          verbose=False)

            if len(files) > 1:
                for f in files:
                    f.release_all_images()
                raise ValueError("Only a single file can be specified as "
                                 "an image.")

            # get extension number
            self._image_ext = files[0].image.hdu.index_of(files[0].fext[0])
            files[0].release_all_images()
コード例 #2
0
def photeq(files='*_flt.fits',
           sciext='SCI',
           errext='ERR',
           ref_phot=None,
           ref_phot_ext=None,
           phot_kwd='PHOTFLAM',
           aux_phot_kwd='PHOTFNU',
           search_primary=True,
           readonly=True,
           clobber=False,
           logfile='photeq.log'):
    """
    Adjust data values of images by equalizing each chip's PHOTFLAM value
    to a single common value so that all chips can be treated equally
    by ``AstroDrizzle``.

    Parameters
    ----------

    files : str (Default = ``'*_flt.fits'``)

        A string containing one of the following:

            * a comma-separated list of valid science image file names,
              e.g.: ``'j1234567q_flt.fits, j1234568q_flt.fits'``;

            * an @-file name, e.g., ``'@files_to_match.txt'``. See notes
              section for details on the format of the @-files.

        .. note::

            **Valid science image file names** are:

            * file names of existing FITS, GEIS, or WAIVER FITS files;

            * partial file names containing wildcard characters, e.g.,
              ``'*_flt.fits'``;

            * Association (ASN) tables (must have ``_asn``, or ``_asc``
              suffix), e.g., ``'j12345670_asn.fits'``.

    sciext : str (Default = 'SCI')
        Extension *name* of extensions whose data and/or headers should
        be corrected.

    errext : str (Default = 'ERR')
        Extension *name* of the extensions containing corresponding error
        arrays. Error arrays are corrected in the same way as science data.

    ref_phot : float, None (Default = None)
        A number indicating the new value of PHOTFLAM or PHOTFNU
        (set by 'phot_kwd') to which the data should be adjusted.

    ref_phot_ext : int, str, tuple, None (Default = None)
        Extension from which the `photeq` should get the reference photometric
        value specified by the `phot_kwd` parameter. This parameter is ignored
        if `ref_phot` **is not** `None`. When `ref_phot_ext` is `None`, then
        the reference inverse sensitivity value will be picked from the
        first `sciext` of the first input image containing `phot_kwd`.

    phot_kwd : str (Default = 'PHOTFLAM')
        Specifies the primary keyword which contains inverse sensitivity
        (e.g., PHOTFLAM). It is used to compute conversion factors by
        which data should be rescaled.

    aux_phot_kwd : str, None, list of str (Default = 'PHOTFNU')
        Same as `phot_kwd` but describes *other* photometric keyword(s)
        that should be corrected by inverse of the scale factor used to correct
        data. These keywords are *not* used to compute conversion factors.
        Multiple keywords can be specified as a Python list of strings:
        ``['PHOTFNU', 'PHOTOHMY']``.

        .. note::

            If specifying multiple secondary photometric keywords in the TEAL
            interface, use a comma-separated list of keywords.

    search_primary : bool (Default = True)
        Specifies whether to first search the primary header for the
        presence of `phot_kwd` keyword and compute conversion factor based on
        that value. This is (partially) ignored when `ref_phot` is not `None` in
        the sense that the value specified by `ref_phot` will be used as the
        reference *but* in all images primary will be searched for `phot_kwd`
        and `aux_phot_kwd` and those values will be corrected
        (if ``search_primary=True``).

    readonly : bool (Default = True)
        If `True`, `photeq` will not modify input files (nevertheless, it will
        convert input GEIS or WAVERED FITS files to MEF and could overwrite
        existing MEF files if `clobber` is set to `True`).
        The (console or log file) output however will be identical to the case
        when ``readonly=False`` and it can be examined before applying these
        changes to input files.

    clobber : bool (Default = False)
        Overwrite existing MEF files when converting input WAVERED FITS or GEIS
        to MEF.

    logfile : str, None (Default = 'photeq.log')
        File name of the log file.

    Notes
    -----

    By default, `photeq` will search for the first inverse sensitivity
    value (given by the header keyword specified by the `phot_kwd` parameter,
    e.g., PHOTFLAM or PHOTFNU) found in the input images and it will equalize
    all other images to this reference value.

    It is possible to tell `photeq` to look for the reference inverse
    sensitivity value only in a specific extension of input images, e.g.: 3,
    ('sci',3), etc. This can be done by setting `ref_phot_ext` to a specific
    extension. This may be useful, for example, for WFPC2 images: WF3 chip was
    one of the better calibrated chips, and so, if one prefers to have
    inverse sensitivities equalized to the inverse sensitivity of the WF3 chip,
    one can set ``ref_phot_ext=3``.

    Alternatively, one can provide their own reference inverse sensitivity
    value to which all other images should be "equalized" through the
    parameter `ref_phot`.

    .. note::

       Default parameter values (except for `files`, `readonly`, and `clobber`)
       should be acceptable for most HST images.

    .. warning::

       If images are intended to be used with ``AstroDrizzle``, it is
       recommended that sky background measurement be performed on "equalized"
       images as the `photeq` is not aware of sky user keyword in the image
       headers and thus it cannot correct sky values already recorded in the
       headers.

    Examples
    --------

    #. In most cases the default parameters should suffice:

           >>> from drizzlepac import photeq
           >>> photeq.photeq(files='*_flt.fits', readonly=False)

    #. If the re-calibration needs to be done on PHOTFNU rather than
       PHOTFLAM, then:

           >>> photeq.photeq(files='*_flt.fits', ref_phot='PHOTFNU',
           ... aux_phot_kwd='PHOTFLAM')

    #. If for WFPC2 data one desires that PHOTFLAM from WF3 be used as the
       reference in WFPC2 images, then:

           >>> photeq.photeq(files='*_flt.fits', ref_phot_ext=3) # or ('sci',3)

    """

    # Time it
    runtime_begin = datetime.now()

    # check that input file name is a string:
    if not isinstance(files, str):
        raise TypeError("Argument 'files' must be a comma-separated list of "
                        " file names")

    # Set-up log files:
    if isinstance(logfile, str):
        # first, in case there are any "leftover" file handlers,
        # close and remove them:
        for h in _log.handlers:
            if h is not _sh_log and isinstance(h, logging.FileHandler):
                h.close()
                _log.removeHandler(h)
        # create file handler:
        log_formatter = logging.Formatter('[%(levelname)s:] %(message)s')
        log_file_handler = logging.FileHandler(logfile)
        log_file_handler.setFormatter(log_formatter)
        # add log_file_handler to logger
        _log.addHandler(log_file_handler)

    elif logfile is not None:
        raise TypeError("Unsupported 'logfile' type")

    #  BEGIN:
    _mlinfo("***** {0} started on {1}".format(__taskname__, runtime_begin))
    _mlinfo("      Version {0} ({1})".format(__version__, __version_date__))

    # check that extension names are strings (or None for error ext):
    if sciext is None:
        sci_ext4parse = '*'
        ext2get = None
    else:
        if not isinstance(sciext, str):
            raise TypeError("Argument 'sciext' must be a string or None")
        sciext = sciext.strip()
        if sciext.upper() == 'PRIMARY':
            sciext = sciext.upper()
            ext2get = (sciext, 1)
        else:
            ext2get = (sciext, '*')

        sci_ext4parse = ext2get

    if errext is not None and not isinstance(errext, str):
        raise TypeError("Argument 'errext' must be a string or None")

    # check that phot_kwd is supported:
    if not isinstance(phot_kwd, str):
        raise TypeError("Argument 'phot_kwd' must be a string")
    phot_kwd = phot_kwd.strip().upper()

    # check that ref_phot_ext has correct type:
    if ref_phot_ext is not None and not \
       (isinstance(ref_phot_ext, int) or isinstance(ref_phot_ext, str) \
        or (isinstance(ref_phot_ext, tuple) and len(ref_phot_ext) == 2 \
            and isinstance(ref_phot_ext[0], str) and \
            isinstance(ref_phot_ext[1], int))):
        raise TypeError("Unsupported 'ref_phot_ext' type")
    if isinstance(ref_phot_ext, str):
        ref_phot_ext = (ref_phot_ext, 1)

    if aux_phot_kwd is None:
        aux_phot_kwd = []

    elif isinstance(aux_phot_kwd, str):
        aux_phot_kwd = [aux_phot_kwd.strip().upper()]
        if phot_kwd == aux_phot_kwd:
            raise ValueError("Auxiliary photometric keyword must be different "
                             "from the main photometric keyword 'phot_kwd'.")

    elif hasattr(aux_phot_kwd, '__iter__'):
        if not all([isinstance(phot, str) for phot in aux_phot_kwd]):
            raise TypeError(
                "Argument 'aux_phot_kwd' must be a string, list of "
                "strings, or None")
        aux_phot_kwd = [phot.strip().upper() for phot in aux_phot_kwd]
        if ref_phot in aux_phot_kwd:
            raise ValueError("Auxiliary photometric keyword(s) must be "
                             "different from the main photometric keyword "
                             "'phot_kwd'.")

    else:
        raise TypeError("Argument 'aux_phot_kwd' must be a string, list of "
                        "strings, or None")

    # read input file list:
    fl = parseat.parse_cs_line(csline=files,
                               default_ext=sci_ext4parse,
                               im_fmode='readonly' if readonly else 'update',
                               clobber=clobber,
                               fnamesOnly=True,
                               doNotOpenDQ=True)

    # check if user supplied file extensions, set them to the sciext,
    # and warn that they will be ignored:
    for f in fl:
        if f.count > 1 or f.fext[0] != sci_ext4parse:
            _mlwarn("WARNING: Extension specifications for file {:s} "
                    "will be ignored. Using all {:s} extensions instead."
                    .format(f.image,  'image-like' if sciext is None else \
                            "{:s}".format(utils.ext2str(sciext,
                                                        default_extver=None))))

    # find the reference PHOTFLAM/PHOTNU:
    flc = fl[:]
    ref_hdu = None
    ref_ext = None
    ref_user = True

    if ref_phot is None:
        ref_user = False
        for f in flc:
            f.convert2ImageRef()

            # get primary hdu:
            pri_hdu = f.image.hdu[0]

            # find all valid extensions:
            if ref_phot_ext is None:
                if sciext == 'PRIMARY':
                    extnum = [0]
                else:
                    extnum = utils.get_ext_list(f.image, sciext)

                is_pri_hdu = [f.image.hdu[ext] is pri_hdu for ext in extnum]

                # if necessary, add primary header to the hdu list:
                if search_primary:
                    try:
                        pri_index = is_pri_hdu.index(True)
                        extnum.insert(0, extnum.pop(pri_index))
                    except ValueError:
                        extnum.insert(0, 0)

            else:
                extnum = [ref_phot_ext]

            for ext in extnum:
                hdu = f.image.hdu[ext]
                if phot_kwd in hdu.header:
                    ref_phot = hdu.header[phot_kwd]
                    ref_ext = ext
                    ref_hdu = hdu
                    break

            if ref_phot is None:
                _mlwarn("WARNING: Could not find specified inverse "
                        "         sensitivity keyword '{:s}'\n"
                        "         in any of the {} extensions of file '{}'.\n"
                        "         This input file will be ignored."
                        .format(phot_kwd, 'image-like' if sciext is None else \
                                "{:s}".format(utils.ext2str(sciext,
                                                            default_extver=None)),
                                os.path.basename(f.image.original_fname)))
                f.release_all_images()
                fl.remove(f)

            else:
                break

    if ref_phot is None:
        raise RuntimeError(
            "Could not find the inverse sensitivity keyword "
            "'{:s}' in the specified headers of "
            "the input image(s).\nCannot continue.".format(phot_kwd))

    aux_phot_kwd_list = ','.join(aux_phot_kwd)

    _mlinfo("\nPRIMARY PHOTOMETRIC KEYWORD: {:s}".format(phot_kwd))
    _mlinfo("SECONDARY PHOTOMETRIC KEYWORD(S): {:s}".format(
        aux_phot_kwd_list if aux_phot_kwd_list else 'None'))
    if ref_user:
        _mlinfo("REFERENCE VALUE PROVIDED BY USER: '******'={}\n".format(
            phot_kwd, ref_phot))
    else:
        _mlinfo("REFERENCE VALUE FROM FILE: '{:s}[{:s}]'\n".format(
            os.path.basename(f.image.original_fname), utils.ext2str(ref_ext)))
        _mlinfo("REFERENCE '{:s}' VALUE IS: {}".format(phot_kwd, ref_phot))

    # equalize PHOTFLAM/PHOTNU
    for f in fl:
        # open the file if necessary:
        if f.fnamesOnly:
            _mlinfo("\nProcessing file '{:s}'".format(f.image))
            f.convert2ImageRef()
        else:
            _mlinfo("\nProcessing file '{:s}'".format(f.image.original_fname))

        # first, see if photflam is in the primary header and save this value:
        pri_conv = None
        if search_primary:
            whdu = f.image.hdu[0]
            if phot_kwd in whdu.header:
                _mlinfo("   * Primary header:")
                if whdu is ref_hdu:
                    pri_conv = 1.0
                    _mlinfo(
                        "     - '{}' = {} found in the primary header.".format(
                            phot_kwd, whdu.header[phot_kwd]))
                    _mlinfo("     - Data conversion factor based on primary "
                            "header: {}".format(pri_conv))
                else:
                    _mlinfo("     - '{}' found in the primary header.".format(
                        phot_kwd))
                    pri_conv = whdu.header[phot_kwd] / ref_phot
                    _mlinfo("     - Setting {:s} in the primary header to {} "
                            "(old value was {})".format(
                                phot_kwd, ref_phot, whdu.header[phot_kwd]))
                    _mlinfo("     - Data conversion factor based on primary "
                            "header: {}".format(pri_conv))
                    whdu.header[phot_kwd] = ref_phot

            # correct the "other" photometric keyword, if present:
            if pri_conv is not None and whdu is not ref_hdu:
                for aux_kwd in aux_phot_kwd:
                    if aux_kwd in whdu.header:
                        old_aux_phot = whdu.header[aux_kwd]
                        new_aux_phot = old_aux_phot / pri_conv
                        whdu.header[aux_kwd] = new_aux_phot
                        _mlinfo("     - Setting {:s} in the primary header "
                                "to {} (old value was {})".format(
                                    aux_kwd, new_aux_phot, old_aux_phot))

            # process data and error arrays when 'sciext' was specifically set to
            # 'PRIMARY':
            if sciext == 'PRIMARY' and pri_conv is not None:
                has_data = (hasattr(whdu, 'data') and whdu.data is not None)

                # correct data:
                if has_data:
                    if np.issubdtype(whdu.data.dtype, np.floating):
                        whdu.data *= pri_conv
                        _mlinfo(
                            "     - Data have been multiplied by {}".format(
                                pri_conv))
                    else:
                        _mlwarn("WARNING: Data not converted because it is of "
                                "non-floating point type.")

                # correct error array:
                if errext is not None:
                    eext = (errext, 1)
                    try:
                        whdu = f.image.hdu[eext]
                    except KeyError:
                        _mlwarn(
                            "     - WARNING: Error extension {:s} not found.".
                            format(utils.ext2str(eext)))

                        f.release_all_images()
                        continue

                    if hasattr(whdu, 'data') and whdu.data is not None:
                        if np.issubdtype(whdu.data.dtype, np.floating):
                            whdu.data *= pri_conv
                            _mlinfo("     - Error array (ext={}) has been "
                                    "multiplied by {}".format(eext, pri_conv))
                        else:
                            _mlinfo("     - Error array in extension {:s} "
                                    "contains non-floating point data.\n"
                                    "       Skipping this extension".format(
                                        utils.ext2str(ext)))

                f.release_all_images()
                continue

        # find all valid extensions:
        extnum = utils.get_ext_list(f.image, sciext)

        for ext in extnum:
            whdu = f.image.hdu[ext]
            conv = None

            if whdu is ref_hdu:
                _mlinfo("   * EXT: {} - This is the \"reference\" extension.\n"
                        "          Nothing to do. Skipping this extension...".
                        format(ext))
                continue

            has_data = (hasattr(whdu, 'data') and whdu.data is not None)

            if has_data and not np.issubdtype(whdu.data.dtype, np.floating):
                _mlinfo("   * EXT: {} contains non-floating point data. "
                        "Skipping this extension".format(ext))

            # find all auxiliary photometric keywords present in the header:
            paux = [aux_kwd for aux_kwd in aux_phot_kwd if aux_kwd \
                    in whdu.header]

            if phot_kwd in whdu.header:
                _mlinfo("   * EXT: {}".format(ext))
                old_phot = whdu.header[phot_kwd]
                conv = old_phot / ref_phot
                _mlinfo("     - Setting {:s} to {} (old value was {})".format(
                    phot_kwd, ref_phot, old_phot))
                whdu.header[phot_kwd] = ref_phot
                _mlinfo(
                    "     - Computed conversion factor for data: {}".format(
                        conv))

            elif pri_conv is None:
                _mlinfo("   * EXT: {}".format(ext))
                _mlinfo("     - '{:s} not found. Skipping this extension...".
                        format(phot_kwd))
                continue

            else:
                _mlinfo("   * EXT: {}".format(ext))

                # if paux:
                # print("ERROR: Primary photometric keyword ('{:s}') is "
                # "missing but\n       the secondary keywords ('{:s}') "
                # "are present. This extension cannot be processed."
                # .format(phot_kwd, ','.join(paux)))
                # continue

                _mlinfo("     - '{:s} not found. Using conversion factor "
                        "based\n       on the primary header: {}".format(
                            phot_kwd, pri_conv))
                conv = pri_conv

            # correct the "other" photometric keyword, if present:
            if conv is not None:
                for aux_kwd in paux:
                    old_aux_phot = whdu.header[aux_kwd]
                    new_aux_phot = old_aux_phot / conv
                    whdu.header[aux_kwd] = new_aux_phot
                    _mlinfo(
                        "     - Setting {:s} to {} (old value was {})".format(
                            aux_kwd, new_aux_phot, old_aux_phot))

            # correct data:
            if has_data:
                if conv is None:
                    _mlinfo("   * EXT: {}".format(ext))

                if np.issubdtype(whdu.data.dtype, np.floating):
                    whdu.data *= conv
                    _mlinfo(
                        "     - Data have been multiplied by {}".format(conv))
                else:
                    _mlinfo("WARNING: Non-floating point data. Data cannot "
                            "be re-scaled.")

            # correct error array:
            if errext is not None and isinstance(ext, tuple) and len(ext) == 2:
                eext = (errext, ext[1])
                try:
                    whdu = f.image.hdu[eext]
                except KeyError:
                    continue

                if hasattr(whdu, 'data') and whdu.data is not None:
                    if np.issubdtype(whdu.data.dtype, np.floating):
                        whdu.data *= conv
                        _mlinfo("     - Error array (ext={}) has been "
                                "multiplied by {}".format(eext, conv))
                    else:
                        _mlinfo("     - Error array in extension {:s} "
                                "contains non-floating point data.\n"
                                "       Skipping this extension".format(
                                    utils.ext2str(ext)))

        f.release_all_images()

    _mlinfo("\nDone.")

    if readonly:
        _mlinfo("\nNOTE: '{:s}' was run in READONLY mode\n"
                "       and input image(s)' content WAS NOT MODIFIED.".format(
                    __taskname__))

    # close all log file handlers:
    for h in _log.handlers:
        if h is not _sh_log and isinstance(h, logging.FileHandler):
            h.close()
            _log.removeHandler(h)
コード例 #3
0
ファイル: photeq.py プロジェクト: brechmos-stsci/drizzlepac
def photeq(files='*_flt.fits', sciext='SCI', errext='ERR',
           ref_phot=None, ref_phot_ext=None,
           phot_kwd='PHOTFLAM', aux_phot_kwd='PHOTFNU',
           search_primary=True,
           readonly=True, clobber=False, logfile='photeq.log'):
    """
    Adjust data values of images by equalizing each chip's PHOTFLAM value
    to a single common value so that all chips can be treated equally
    by `AstroDrizzle`.

    Parameters
    ----------

    files : str (Default = ``'*_flt.fits'``)

        A string containing one of the following:

            * a comma-separated list of valid science image file names,
              e.g.: ``'j1234567q_flt.fits, j1234568q_flt.fits'``;

            * an @-file name, e.g., ``'@files_to_match.txt'``. See notes
              section for details on the format of the @-files.

        .. note::

            **Valid science image file names** are:

            * file names of existing FITS, GEIS, or WAIVER FITS files;

            * partial file names containing wildcard characters, e.g.,
              ``'*_flt.fits'``;

            * Association (ASN) tables (must have ``_asn``, or ``_asc``
              suffix), e.g., ``'j12345670_asn.fits'``.

    sciext : str (Default = 'SCI')
        Extension *name* of extensions whose data and/or headers should
        be corrected.

    errext : str (Default = 'ERR')
        Extension *name* of the extensions containing corresponding error
        arrays. Error arrays are corrected in the same way as science data.

    ref_phot : float, None (Default = None)
        A number indicating the new value of PHOTFLAM or PHOTFNU
        (set by 'phot_kwd') to which the data should be adjusted.

    ref_phot_ext : int, str, tuple, None (Default = None)
        Extension from which the `photeq` should get the reference photometric
        value specified by the `phot_kwd` parameter. This parameter is ignored
        if `ref_phot` **is not** `None`. When `ref_phot_ext` is `None`, then
        the reference inverse sensitivity value will be picked from the
        first `sciext` of the first input image containing `phot_kwd`.

    phot_kwd : str (Default = 'PHOTFLAM')
        Specifies the primary keyword which contains inverse sensitivity
        (e.g., PHOTFLAM). It is used to compute conversion factors by
        which data should be rescaled.

    aux_phot_kwd : str, None, list of str (Default = 'PHOTFNU')
        Same as `phot_kwd` but describes *other* photometric keyword(s)
        that should be corrected by inverse of the scale factor used to correct
        data. These keywords are *not* used to compute conversion factors.
        Multiple keywords can be specified as a Python list of strings:
        ``['PHOTFNU', 'PHOTOHMY']``.

        .. note::

            If specifying multiple secondary photometric keywords in the TEAL
            interface, use a comma-separated list of keywords.

    search_primary : bool (Default = True)
        Specifies whether to first search the primary header for the
        presence of `phot_kwd` keyword and compute conversion factor based on
        that value. This is (partially) ignored when `ref_phot` is not `None` in
        the sense that the value specified by `ref_phot` will be used as the
        reference *but* in all images primary will be searched for `phot_kwd`
        and `aux_phot_kwd` and those values will be corrected
        (if ``search_primary=True``).

    readonly : bool (Default = True)
        If `True`, `photeq` will not modify input files (nevertheless, it will
        convert input GEIS or WAVERED FITS files to MEF and could overwrite
        existing MEF files if `clobber` is set to `True`).
        The (console or log file) output however will be identical to the case
        when ``readonly=False`` and it can be examined before applying these
        changes to input files.

    clobber : bool (Default = False)
        Overwrite existing MEF files when converting input WAVERED FITS or GEIS
        to MEF.

    logfile : str, None (Default = 'photeq.log')
        File name of the log file.

    Notes
    -----

    By default, `photeq` will search for the first inverse sensitivity
    value (given by the header keyword specified by the `phot_kwd` parameter,
    e.g., PHOTFLAM or PHOTFNU) found in the input images and it will equalize
    all other images to this reference value.

    It is possible to tell `photeq` to look for the reference inverse
    sensitivity value only in a specific extension of input images, e.g.: 3,
    ('sci',3), etc. This can be done by setting `ref_phot_ext` to a specific
    extension. This may be useful, for example, for WFPC2 images: WF3 chip was
    one of the better calibrated chips, and so, if one prefers to have
    inverse sensitivities equalized to the inverse sensitivity of the WF3 chip,
    one can set ``ref_phot_ext=3``.

    Alternatively, one can provide their own reference inverse sensitivity
    value to which all other images should be "equalized" through the
    parameter `ref_phot`.

    .. note::

       Default parameter values (except for `files`, `readonly`, and `clobber`)
       should be acceptable for most HST images.

    .. warning::

       If images are intended to be used with `AstroDrizzle`, it is recommended
       that sky background measurement be performed on "equalized" images as
       the `photeq` is not aware of sky user keyword in the image headers and
       thus it cannot correct sky values already recorded in the headers.

    Examples
    --------

    #. In most cases the default parameters should suffice:

           >>> from drizzlepac import photeq
           >>> photeq.photeq(files='*_flt.fits', readonly=False)

    #. If the re-calibration needs to be done on PHOTFNU rather than
       PHOTFLAM, then:

           >>> photeq.photeq(files='*_flt.fits', ref_phot='PHOTFNU',
           ... aux_phot_kwd='PHOTFLAM')

    #. If for WFPC2 data one desires that PHOTFLAM from WF3 be used as the
       reference in WFPC2 images, then:

           >>> photeq.photeq(files='*_flt.fits', ref_phot_ext=3) # or ('sci',3)

    """

    # Time it
    runtime_begin = datetime.now()

    # check that input file name is a string:
    if not isinstance(files, str):
        raise TypeError("Argument 'files' must be a comma-separated list of "
                        " file names")

    # Set-up log files:
    if isinstance(logfile, str):
        # first, in case there are any "leftover" file handlers,
        # close and remove them:
        for h in _log.handlers:
            if h is not _sh_log and isinstance(h, logging.FileHandler):
                h.close()
                _log.removeHandler(h)
        # create file handler:
        log_formatter = logging.Formatter('[%(levelname)s:] %(message)s')
        log_file_handler = logging.FileHandler(logfile)
        log_file_handler.setFormatter(log_formatter)
        # add log_file_handler to logger
        _log.addHandler(log_file_handler)

    elif logfile is not None:
        raise TypeError("Unsupported 'logfile' type")

    #  BEGIN:
    _mlinfo("***** {0} started on {1}".format(__taskname__, runtime_begin))
    _mlinfo("      Version {0} ({1})".format(__version__, __vdate__))

    # check that extension names are strings (or None for error ext):
    if sciext is None:
        sci_ext4parse = '*'
        ext2get = None
    else:
        if not isinstance(sciext, str):
            raise TypeError("Argument 'sciext' must be a string or None")
        sciext = sciext.strip()
        if sciext.upper() == 'PRIMARY':
            sciext = sciext.upper()
            ext2get = (sciext, 1)
        else:
            ext2get = (sciext, '*')

        sci_ext4parse = ext2get

    if errext is not None and not isinstance(errext, str):
        raise TypeError("Argument 'errext' must be a string or None")

    # check that phot_kwd is supported:
    if not isinstance(phot_kwd, str):
        raise TypeError("Argument 'phot_kwd' must be a string")
    phot_kwd = phot_kwd.strip().upper()

    # check that ref_phot_ext has correct type:
    if ref_phot_ext is not None and not \
       (isinstance(ref_phot_ext, int) or isinstance(ref_phot_ext, str) \
        or (isinstance(ref_phot_ext, tuple) and len(ref_phot_ext) == 2 \
            and isinstance(ref_phot_ext[0], str) and \
            isinstance(ref_phot_ext[1], int))):
        raise TypeError("Unsupported 'ref_phot_ext' type")
    if isinstance(ref_phot_ext, str):
        ref_phot_ext = (ref_phot_ext, 1)

    if aux_phot_kwd is None:
        aux_phot_kwd = []

    elif isinstance(aux_phot_kwd, str):
        aux_phot_kwd = [aux_phot_kwd.strip().upper()]
        if phot_kwd == aux_phot_kwd:
            raise ValueError("Auxiliary photometric keyword must be different "
                             "from the main photometric keyword 'phot_kwd'.")

    elif hasattr(aux_phot_kwd, '__iter__'):
        if not all([isinstance(phot, str) for phot in aux_phot_kwd]):
            raise TypeError("Argument 'aux_phot_kwd' must be a string, list of "
                        "strings, or None")
        aux_phot_kwd = [phot.strip().upper() for phot in aux_phot_kwd]
        if ref_phot in aux_phot_kwd:
            raise ValueError("Auxiliary photometric keyword(s) must be "
                             "different from the main photometric keyword "
                             "'phot_kwd'.")

    else:
        raise TypeError("Argument 'aux_phot_kwd' must be a string, list of "
                        "strings, or None")

    # read input file list:
    fl = parseat.parse_cs_line(csline=files, default_ext=sci_ext4parse,
                               im_fmode='readonly' if readonly else 'update',
                               clobber=clobber, fnamesOnly=True,
                               doNotOpenDQ=True)

    # check if user supplied file extensions, set them to the sciext,
    # and warn that they will be ignored:
    for f in fl:
        if f.count > 1 or f.fext[0] != sci_ext4parse:
            _mlwarn("WARNING: Extension specifications for file {:s} "
                    "will be ignored. Using all {:s} extensions instead."
                    .format(f.image,  'image-like' if sciext is None else \
                            "{:s}".format(utils.ext2str(sciext,
                                                        default_extver=None))))

    # find the reference PHOTFLAM/PHOTNU:
    flc = fl[:]
    ref_hdu = None
    ref_ext = None
    ref_user = True

    if ref_phot is None:
        ref_user = False
        for f in flc:
            f.convert2ImageRef()

            # get primary hdu:
            pri_hdu = f.image.hdu[0]

            # find all valid extensions:
            if ref_phot_ext is None:
                if sciext == 'PRIMARY':
                    extnum = [0]
                else:
                    extnum = utils.get_ext_list(f.image, sciext)

                is_pri_hdu = [f.image.hdu[ext] is pri_hdu for ext in extnum]

                # if necessary, add primary header to the hdu list:
                if search_primary:
                    try:
                        pri_index = is_pri_hdu.index(True)
                        extnum.insert(0, extnum.pop(pri_index))
                    except ValueError:
                        extnum.insert(0, 0)

            else:
                extnum = [ref_phot_ext]

            for ext in extnum:
                hdu = f.image.hdu[ext]
                if phot_kwd in hdu.header:
                    ref_phot = hdu.header[phot_kwd]
                    ref_ext = ext
                    ref_hdu = hdu
                    break

            if ref_phot is None:
                _mlwarn("WARNING: Could not find specified inverse "
                        "         sensitivity keyword '{:s}'\n"
                        "         in any of the {} extensions of file '{}'.\n"
                        "         This input file will be ignored."
                        .format(phot_kwd, 'image-like' if sciext is None else \
                                "{:s}".format(utils.ext2str(sciext,
                                                            default_extver=None)),
                                os.path.basename(f.image.original_fname)))
                f.release_all_images()
                fl.remove(f)

            else:
                break

    if ref_phot is None:
        raise RuntimeError("Could not find the inverse sensitivity keyword "
                           "'{:s}' in the specified headers of "
                           "the input image(s).\nCannot continue."
                           .format(phot_kwd))

    aux_phot_kwd_list = ','.join(aux_phot_kwd)

    _mlinfo("\nPRIMARY PHOTOMETRIC KEYWORD: {:s}".format(phot_kwd))
    _mlinfo("SECONDARY PHOTOMETRIC KEYWORD(S): {:s}"
              .format(aux_phot_kwd_list if aux_phot_kwd_list else 'None'))
    if ref_user:
        _mlinfo("REFERENCE VALUE PROVIDED BY USER: '******'={}\n"
                .format(phot_kwd, ref_phot))
    else:
        _mlinfo("REFERENCE VALUE FROM FILE: '{:s}[{:s}]'\n"
                .format(os.path.basename(f.image.original_fname),
                          utils.ext2str(ref_ext)))
        _mlinfo("REFERENCE '{:s}' VALUE IS: {}".format(phot_kwd, ref_phot))

    # equalize PHOTFLAM/PHOTNU
    for f in fl:
        # open the file if necessary:
        if f.fnamesOnly:
            _mlinfo("\nProcessing file '{:s}'".format(f.image))
            f.convert2ImageRef()
        else:
            _mlinfo("\nProcessing file '{:s}'".format(f.image.original_fname))

        # first, see if photflam is in the primary header and save this value:
        pri_conv = None
        if search_primary:
            whdu = f.image.hdu[0]
            if phot_kwd in whdu.header:
                _mlinfo("   * Primary header:")
                if whdu is ref_hdu:
                    pri_conv = 1.0
                    _mlinfo("     - '{}' = {} found in the primary header."
                            .format(phot_kwd, whdu.header[phot_kwd]))
                    _mlinfo("     - Data conversion factor based on primary "
                              "header: {}".format(pri_conv))
                else:
                    _mlinfo("     - '{}' found in the primary header."
                            .format(phot_kwd))
                    pri_conv = whdu.header[phot_kwd] / ref_phot
                    _mlinfo("     - Setting {:s} in the primary header to {} "
                              "(old value was {})"
                            .format(phot_kwd, ref_phot, whdu.header[phot_kwd]))
                    _mlinfo("     - Data conversion factor based on primary "
                            "header: {}".format(pri_conv))
                    whdu.header[phot_kwd] = ref_phot

            # correct the "other" photometric keyword, if present:
            if pri_conv is not None and whdu is not ref_hdu:
                for aux_kwd in aux_phot_kwd:
                    if aux_kwd in whdu.header:
                        old_aux_phot = whdu.header[aux_kwd]
                        new_aux_phot = old_aux_phot / pri_conv
                        whdu.header[aux_kwd] = new_aux_phot
                        _mlinfo("     - Setting {:s} in the primary header "
                                "to {} (old value was {})"
                                .format(aux_kwd, new_aux_phot, old_aux_phot))

            # process data and error arrays when 'sciext' was specifically set to
            # 'PRIMARY':
            if sciext == 'PRIMARY' and pri_conv is not None:
                has_data = (hasattr(whdu, 'data') and
                            whdu.data is not None)

                # correct data:
                if has_data:
                    if np.issubdtype(whdu.data.dtype, np.float):
                        whdu.data *= pri_conv
                        _mlinfo("     - Data have been multiplied by {}"
                                .format(pri_conv))
                    else:
                        _mlwarn("WARNING: Data not converted because it is of "
                                "non-floating point type.")

                # correct error array:
                if errext is not None:
                    eext = (errext, 1)
                    try:
                        whdu = f.image.hdu[eext]
                    except KeyError:
                        _mlwarn("     - WARNING: Error extension {:s} not found."
                                .format(utils.ext2str(eext)))

                        f.release_all_images()
                        continue

                    if hasattr(whdu, 'data') and whdu.data is not None:
                        if np.issubdtype(whdu.data.dtype, np.float):
                            whdu.data *= pri_conv
                            _mlinfo("     - Error array (ext={}) has been "
                                    "multiplied by {}".format(eext, pri_conv))
                        else:
                            _mlinfo("     - Error array in extension {:s} "
                                    "contains non-floating point data.\n"
                                    "       Skipping this extension"
                                    .format(utils.ext2str(ext)))

                f.release_all_images()
                continue

        # find all valid extensions:
        extnum = utils.get_ext_list(f.image, sciext)

        for ext in extnum:
            whdu = f.image.hdu[ext]
            conv = None

            if whdu is ref_hdu:
                _mlinfo("   * EXT: {} - This is the \"reference\" extension.\n"
                        "          Nothing to do. Skipping this extension..."
                        .format(ext))
                continue

            has_data = (hasattr(whdu, 'data') and
                        whdu.data is not None)

            if has_data and not np.issubdtype(whdu.data.dtype, np.float):
                _mlinfo("   * EXT: {} contains non-floating point data. "
                        "Skipping this extension".format(ext))

            # find all auxiliary photometric keywords present in the header:
            paux = [aux_kwd for aux_kwd in aux_phot_kwd if aux_kwd \
                    in whdu.header]

            if phot_kwd in whdu.header:
                _mlinfo("   * EXT: {}".format(ext))
                old_phot = whdu.header[phot_kwd]
                conv = old_phot / ref_phot
                _mlinfo("     - Setting {:s} to {} (old value was {})"
                        .format(phot_kwd, ref_phot, old_phot))
                whdu.header[phot_kwd] = ref_phot
                _mlinfo("     - Computed conversion factor for data: {}"
                        .format(conv))

            elif pri_conv is None:
                _mlinfo("   * EXT: {}".format(ext))
                _mlinfo("     - '{:s} not found. Skipping this extension..."
                        .format(phot_kwd))
                continue

            else:
                _mlinfo("   * EXT: {}".format(ext))

                # if paux:
                    # print("ERROR: Primary photometric keyword ('{:s}') is "
                          # "missing but\n       the secondary keywords ('{:s}') "
                          # "are present. This extension cannot be processed."
                          # .format(phot_kwd, ','.join(paux)))
                    # continue

                _mlinfo("     - '{:s} not found. Using conversion factor "
                        "based\n       on the primary header: {}"
                        .format(phot_kwd, pri_conv))
                conv = pri_conv

            # correct the "other" photometric keyword, if present:
            if conv is not None:
                for aux_kwd in paux:
                    old_aux_phot = whdu.header[aux_kwd]
                    new_aux_phot = old_aux_phot / conv
                    whdu.header[aux_kwd] = new_aux_phot
                    _mlinfo("     - Setting {:s} to {} (old value was {})"
                            .format(aux_kwd, new_aux_phot, old_aux_phot))

            # correct data:
            if has_data:
                if conv is None:
                    _mlinfo("   * EXT: {}".format(ext))

                if np.issubdtype(whdu.data.dtype, np.float):
                    whdu.data *= conv
                    _mlinfo("     - Data have been multiplied by {}"
                            .format(conv))
                else:
                    _mlinfo("WARNING: Non-floating point data. Data cannot "
                            "be re-scaled.")

            # correct error array:
            if errext is not None and isinstance(ext, tuple) and len(ext) == 2:
                eext = (errext, ext[1])
                try:
                    whdu = f.image.hdu[eext]
                except KeyError:
                    continue

                if hasattr(whdu, 'data') and whdu.data is not None:
                    if np.issubdtype(whdu.data.dtype, np.float):
                        whdu.data *= conv
                        _mlinfo("     - Error array (ext={}) has been "
                                "multiplied by {}".format(eext, conv))
                    else:
                        _mlinfo("     - Error array in extension {:s} "
                                "contains non-floating point data.\n"
                                "       Skipping this extension"
                                .format(utils.ext2str(ext)))

        f.release_all_images()

    _mlinfo("\nDone.")

    if readonly:
        _mlinfo("\nNOTE: '{:s}' was run in READONLY mode\n"
                "       and input image(s)' content WAS NOT MODIFIED."
                .format(__taskname__))

    # close all log file handlers:
    for h in _log.handlers:
        if h is not _sh_log and isinstance(h, logging.FileHandler):
            h.close()
            _log.removeHandler(h)
コード例 #4
0
ファイル: tweakback.py プロジェクト: mdlpstsci/drizzlepac
def apply_tweak(drz_file,
                orig_wcs_name,
                output_wcs_name=None,
                input_files=None,
                default_extname='SCI',
                **kwargs):
    """
    Apply WCS solution recorded in drizzled file to distorted input images
    (``_flt.fits`` files) used to create the drizzled file.

    It is assumed that if input images given by ``input_files`` are drizzled
    together, they would produce the drizzled image given by ``drz_file`` image
    and with the "original" primary WCS. It is also assumed that drizzled image
    was aligned using ``tweakreg`` either to another image or to an external
    reference catalog. We will refer to the primary WCS in the drizzled image
    _before_ ``tweakreg`` was run as the "original" WCS and the WCS _after_
    ``tweakreg`` was run as "tweaked" WCS.

    By comparing both "original" and "tweaked" WCS, ``apply_wcs`` computes
    the correction that was applied by ``tweakreg`` to the "original" WCS
    and converts this correction in the drizzled image frame into a correction
    in the input image's (``input_files``) frame that will be applied to the
    primary WCS of input images. If updated input images are now resampled
    again, they would produce an image very close to ``drz_file`` but with
    a primary WCS very similar to the "tweaked" WCS instead of the "original"
    WCS.

    Parameters
    ----------
    drz_file : str
        File name of the drizzled image that contains both the "original" and
        "tweaked" WCS. Even though wildcards are allowed in the file name,
        their expansion must resolve to a single image file. By default,
        ``apply_tweak`` looks for the first image-like HDU in the drizzled
        image. To specify a particular extension from which to load WCS,
        append extension specification after the file name, for example:
            - ``'image_drz.fits[sci,1]'`` for first "sci" extension
            - ``'image_drz.fits[1]'`` for the first extension
            - ``'image_drz.fits[0]'`` for the primary HDU

    orig_wcs_name : str
        Name (provided by the ``WCSNAME?`` header keyword where ``?``
        respesents a letter A-Z) of the "original" WCS. This is the WCS of
        the resampled image (obtained by drizzling all input images)  _before_
        this resampled image was aligned ("tweaked") to another image/catalog.

        If ``orig_wcs_name`` is `None`, the the original WCS _must be
        specified_ using ``orig_wcs_key``. When ``orig_wcs_key`` is provided,
        ``orig_wcs_name`` is ignored altogether.

    output_wcs_name : str, None
        Value of ``WCSNAME`` to be used to label the updated solution in the
        input (e.g., ``_flt.fits``) files.  If left blank or ``None``, it will
        default to using either the current (primary) ``WCSNAME`` value from
        the ``drz_file`` or from the alternate WCS given by the
        ``tweaked_wcs_name`` or ``tweaked_wcs_key`` parameters.

    input_files : str, None
        Filenames of distorted images whose primary WCS is to be updated
        with the same transformation as used in the "tweaked" drizzled image.
        Default value of `None` indicates that input image filenames will be
        derived from the ``D*DATA`` keywords written out by the ``AstroDrizzle``.
        If they can not be found, the task will quit.

        ``input_files`` string can contain one of the following:

            * a comma-separated list of valid science image file names
              (see note below) and (optionally) extension specifications,
              e.g.: ``'j1234567q_flt.fits[1], j1234568q_flt.fits[sci,2]'``;

            * an @-file name, e.g., ``'@files_to_match.txt'``.

        .. note::
            **Valid** **science** **image** **file** **names** are:

            * file names of existing FITS, GEIS, or WAIVER FITS files;

            * partial file names containing wildcard characters, e.g.,
              ``'*_flt.fits'``;

            * Association (ASN) tables (must have ``_asn``, or ``_asc``
              suffix), e.g., ``'j12345670_asn.fits'``.

        .. warning::
            @-file names **MAY** **NOT** be followed by an extension
            specification.

        .. warning::
            If an association table or a partial file name with wildcard
            characters is followed by an extension specification, it will be
            considered that this extension specification applies to **each**
            file name in the association table or **each** file name
            obtained after wildcard expansion of the partial file name.

    default_extname : str
        Extension name of extensions in input images whose primary WCS
        should be updated. This value is used only when file names provided in
        ``input_files`` do not contain extension specifications.

    Other Parameters
    ----------------
    tweaked_wcs_name : str
        Name of the "tweaked" WCS. This is the WCS of
        the resampled image (obtained by drizzling all input images)  _after_
        this resampled image was aligned ("tweaked") to another image/catalog.

        When neither ``tweaked_wcs_name`` nor ````tweaked_wcs_key`` are not
        provided, ``apply_tweak`` will take the current primary WCS in the
        drizzled image as a "tweaked" WCS. ``tweaked_wcs_name`` is ignored
        when ``tweaked_wcs_key`` is provided.

    tweaked_wcs_key : {' ', 'A'-'Z'}
        Same as ``tweaked_wcs_name`` except it specifies a WCS by key instead
        of name. When provided, ``tweaked_wcs_name`` is ignored.

    orig_wcs_key : {' ', 'A'-'Z'}
        Same as ``orig_wcs_name`` except it specifies a WCS by key instead
        of name. When provided, ``orig_wcs_name`` is ignored.

    Notes
    -----
    The algorithm used by this function is based on linearization of
    the exact compound operator that converts input image coordinates
    to the coordinates (in the input image) that would result in
    alignment with the new drizzled image WCS.

    .. warning::
        Parameters ``orig_wcs_name`` and ``tweaked_wcs_name`` (or their "key"
        equivalents) allow computation of transformation between *any two
        WCS* in the drizzled image and application of this transformation to the
        primary WCS of the input images. This will produce an
        expected result **only if** the WCS pointed to by ``orig_wcs_name`` was
        obtained by drizzling input images with their current primary WCS.


    EXAMPLES
    --------
    A drizzled image named ``acswfc_mos2_drz.fits`` was created from 4 images
    using ``AstroDrizzle``. The primary WCS of this drizzled image was named
    ``'INITIAL_GUESS'``. This drizzled image was then aligned to some other
    image using ``TweakReg`` and the updated ("tweaked") primary WCS was named
    ``'BEST_WCS'`` while the previous primary WCS - the WCS named
    ``'INITIAL_GUESS'`` - was archived by ``TweakReg`` under WCS key ``'C'``.
    We will refer to this archived WCS as the "original" WCS.
    ``apply_tweak`` can now be used to compute the
    transformation between the original and the tweaked WCS and apply this
    transformation to the WCS of each of the input images that were
    drizzle-combined to produce the resampled image ``acswfc_mos2_drz.fits``.

    The simplest way to accomplish this would be to run ``apply_tweak()`` using
    default parameters:

    >>> from drizzlepac import tweakback
    >>> tweakback.apply_tweak('acswfc_mos2_drz.fits', orig_wcs_name='INITIAL_GUESS')

    or

    >>> tweakback.apply_tweak('acswfc_mos2_drz.fits', orig_wcs_key='C')

    If the same WCS should be applied to a specific set of images or
    extensions in those images, then we can explicitly specify input files:

    >>> tweakback.apply_tweak(
    ...     'acswfc_mos2_drz.fits',
    ...     input='img_mos2a_flt.fits,img_mos2c_flt.fits[1],img_mos2d_flt.fits[sci,1]'
    ... )

    In the examples above, current primary WCS of the input
    ``'img_mos2?_flt.fits'`` files will be archived and the primary WCS will
    be replaced with a "tweaked" WCS obtained by applying relevant
    transformations to the current primary WCS. Because we did not specify
    ``output_wcs_name``, the name of this tweaked primary WCS in the
    input images will be set to ``'BEST_WCS'``.

    See Also
    --------
    stwcs.wcsutil.altwcs: Alternate WCS implementation

    """
    print(f"\n*** 'apply_tweak' version {__version__:s} started "
          f"at {util._ptime()[0]:s}: ***\n")

    tweaked_wcs_name = kwargs.get('tweaked_wcs_name', None)
    tweaked_wcs_key = kwargs.get('tweaked_wcs_key', None)
    orig_wcs_key = kwargs.get('orig_wcs_key', None)

    tweaked_wcs_key = _process_wcs_key_par('tweaked_wcs_key', kwargs)
    orig_wcs_key = _process_wcs_key_par('orig_wcs_key', kwargs)

    # load drizzled image and extract input file names (if needed) and
    # load specified WCS:

    fis = parse_cs_line(drz_file,
                        default_ext='*',
                        fnamesOnly=False,
                        doNotOpenDQ=True,
                        im_fmode="readonly")
    if len(fis) == 0:
        raise FileNotFoundError(f"Drizzled file '{drz_file}' not found.")
    elif len(fis) > 1:
        for f in fis:
            f.release_all_images()
        raise ValueError("When expanded, 'drz_file' should correspond to a "
                         "single file.")

    fi = fis[0]
    hdul = fi.image.hdu
    if len(fi.fext) == 1:
        drz_sciext = fi.fext[0]

    elif fi.fext:
        fi.release_all_images()
        raise ValueError(
            "Input drizzled image contains multiple image-like extensions. "
            "Please explicitly specify a single extension containing desired "
            "WCS.")

    else:
        fi.release_all_images()
        raise ValueError(
            "Specified extension was not found in the input drizzled image.")

    # check that there are at least two WCS in the drizzled image header:
    wkeys = altwcs.wcskeys(hdul, ext=drz_sciext)
    if len(wkeys) < 2:
        fi.release_all_images()
        raise ValueError(f"'{fi.image}[{ext2str(drz_sciext)}]' must "
                         "contain at least two valid WCS: original and "
                         "updated by tweakreg.")

    # load "tweaked" WCS
    tweaked_wcs_key, tweaked_wcs_name = _wcs_key_name(
        tweaked_wcs_key,
        tweaked_wcs_name,
        fi=fi,
        ext=drz_sciext,
        default_key=0,
        param_name='tweaked_wcs_name')
    tweaked_wcs = wcsutil.HSTWCS(hdul, ext=drz_sciext, wcskey=tweaked_wcs_key)

    # load "original" WCS
    if orig_wcs_key is None and orig_wcs_name is None:
        fi.release_all_images()
        raise ValueError(
            "Either 'orig_wcs_name' or 'orig_wcs_key' must be specified.")

    # default_key=-1 below is useless since we require that either
    # orig_wcs_key or orig_wcs_name be specified. However, in the future,
    # if we allow both to be None, we can use the last WCS key as the default
    # for the "original" WCS (last WCS key in the list).
    orig_wcs_key, orig_wcs_name = _wcs_key_name(orig_wcs_key,
                                                orig_wcs_name,
                                                fi=fi,
                                                ext=drz_sciext,
                                                default_key=-1,
                                                param_name='orig_wcs_name')
    orig_wcs = wcsutil.HSTWCS(hdul, ext=drz_sciext, wcskey=orig_wcs_key)

    # get RMS values reported for new solution
    crderr1 = fi.image.hdu[drz_sciext].header.get('CRDER1' + orig_wcs_key, 0.0)
    crderr2 = fi.image.hdu[drz_sciext].header.get('CRDER2' + orig_wcs_key, 0.0)

    fi.release_all_images()  # done with the resampled image

    # Process the list of input files:
    if not isinstance(default_extname, str):
        raise TypeError("Argument 'default_extname' must be a string")
    default_extname = default_extname.strip()
    if default_extname.upper() == 'PRIMARY':
        ext2get = ('PRIMARY', 1)
    else:
        ext2get = (default_extname, '*')

    if input_files is None:
        # get input (FLT) file names from the drizzled image. This information
        # is recorded in the primary header of the drizzled image.
        input_files = ",".join(hdul[0].header["D???DATA"].values())

    # Build a list of input files and extensions
    fnames_ext = {}
    fis = parse_cs_line(input_files,
                        default_ext=ext2get,
                        fnamesOnly=False,
                        doNotOpenDQ=True,
                        im_fmode="readonly")
    for f in fis:
        f.release_all_images()
        if f.image in fnames_ext:
            fnames_ext[f.image] |= set(f.fext)
        else:
            fnames_ext[f.image] = set(f.fext)

    if output_wcs_name is None:
        output_wcs_name = tweaked_wcs_name
        print(f"\n* Setting 'output_wcs_name' to '{output_wcs_name}'")
        auto_output_name = True
    else:
        auto_output_name = False

    output_wcs_name_u = output_wcs_name.strip().upper()

    # Compute tweakback transformation to each extension of each input file.
    # This is the main part of this function.
    # Before applying new WCS solution, make sure we can use the same
    # output WCS name for the updated WCS in all input images.
    # Also, this gives us opportunity to remove duplicate extensions, if any.

    final_wcs_info = []

    for fname, extlist in fnames_ext.items():
        print(f"\n* Working on input image {fname:s} ...")

        fis = parse_cs_line(f"{fname}",
                            default_ext=ext2get,
                            fnamesOnly=False,
                            doNotOpenDQ=True,
                            im_fmode="readonly")
        if len(fis) != 1:
            fi.release_all_images()
            raise AssertionError(
                "The algorithm should not open more than one file.")
        fi = fis[0]

        if not fi.fext:
            fi.release_all_images()
            print(
                f"  No valid input image extension found. Skipping image {fname}.\n"
            )
            continue

        current_wcs_info = {
            'fname': fname,
            'extlist': [],
            'archived_wcs_name': [],
            'updated_primary_wcs': []
        }
        final_wcs_info.append(current_wcs_info)

        # Process extensions
        hdu_list = []  # to avoid processing duplicate hdus
        try:
            for ext in extlist:
                imhdulist = fi.image.hdu
                hdu = imhdulist[ext]
                if hdu in hdu_list:
                    continue
                hdu_list.append(hdu)

                current_wcs_info['extlist'].append(ext)

                # Find the name under which to archive current WCS:
                all_wcs_names = [
                    v.upper() for v in altwcs.wcsnames(
                        imhdulist, ext, include_primary=False).values()
                ]

                if output_wcs_name_u in all_wcs_names:
                    if auto_output_name:
                        raise ValueError(
                            "Current value of 'output_wcs_name' was set to "
                            f"'{tweaked_wcs_name}' by default. However, this "
                            f"WCS name value was already used in {fname:s}[{ext2str(ext)}]. "
                            "Please re-run 'apply_tweak' again and explicitly "
                            "provide a unique value for the output WCS name.")
                    else:
                        raise ValueError(
                            "Provided value of 'output_wcs_name' - '{tweaked_wcs_name}' - "
                            f"was already used in {fname:s}[{ext2str(ext)}]. "
                            "Please re-run 'apply_tweak' again and explicitly "
                            "provide a unique value for the output WCS name.")

                if 'WCSNAME' in imhdulist[ext].header:
                    pri_wcs_name = imhdulist[ext].header['WCSNAME'].strip()
                else:
                    pri_wcs_name = 'NONAME'

                # add current output WCS name to the list so that archived
                # primary WCS will be archived under a different name:
                all_wcs_names.append(output_wcs_name)

                archived_name = altwcs._auto_increment_wcsname(
                    pri_wcs_name, all_wcs_names)
                current_wcs_info['archived_wcs_name'].append(archived_name)

                # compute updated WCS:
                new_wcs = wcsutil.HSTWCS(imhdulist, ext=ext)

                update_chip_wcs(new_wcs,
                                orig_wcs,
                                tweaked_wcs,
                                xrms=crderr1,
                                yrms=crderr2)
                new_wcs.setOrient()
                current_wcs_info['updated_primary_wcs'].append(new_wcs)

                print(
                    f"  - Computed new WCS solution for {fname:s}[{ext2str(ext)}]:"
                )
                repr_wcs = repr(new_wcs)
                print('\n'.join(
                    ['      ' + l.strip() for l in repr_wcs.split('\n')]))

        finally:
            fi.release_all_images()

    print("\n* Saving updated WCS to image headers:")

    for fwi in final_wcs_info:
        if not fwi['extlist']:
            continue

        fname = fwi['fname']

        fis = parse_cs_line(f"{fname}",
                            default_ext=ext2get,
                            fnamesOnly=False,
                            doNotOpenDQ=True,
                            im_fmode="update")
        fi = fis[0]

        # Process extensions
        try:
            for ext, archived_name, new_wcs in zip(fwi['extlist'],
                                                   fwi['archived_wcs_name'],
                                                   fwi['updated_primary_wcs']):
                imhdulist = fi.image.hdu
                hdu = imhdulist[ext]

                hdu.header['HISTORY'] = (
                    f"apply_tweak version: {__version__} ({date.today().isoformat():s})"
                )

                # Archive current primary WCS:
                awcs_key, awcs_name = altwcs.archive_wcs(
                    imhdulist,
                    ext,
                    wcsname=archived_name,
                    mode=altwcs.ArchiveMode.NO_CONFLICT)
                hdu.header['HISTORY'] = (
                    "apply_tweak: Archived Primary WCS under key "
                    f"'{awcs_key}' on {date.today().isoformat():s}")
                hdu.header['HISTORY'] = (
                    f"apply_tweak: WCSNAME{awcs_key}='{awcs_name}'")

                # Update primary WCS of this extension:
                wcs_hdr = new_wcs.wcs2header(idc2hdr=new_wcs.idcscale
                                             is not None,
                                             relax=True)
                wcs_hdr.set('WCSNAME', output_wcs_name, before=0)
                wcs_hdr.set('WCSTYPE',
                            updatehdr.interpret_wcsname_type(output_wcs_name),
                            after=0)
                wcs_hdr.set('ORIENTAT', new_wcs.orientat, after=len(wcs_hdr))
                hdu.header.update(wcs_hdr)
                hdu.header['HISTORY'] = (
                    f"apply_tweak: Applied Primary WCS correction on {date.today().isoformat():s}"
                )

            str_extlist = '; '.join(map(ext2str, fwi['extlist']))
            print(f"  - Updated '{fname:s}', extensions: {str_extlist}")

        finally:
            util.updateNEXTENDKw(imhdulist)
            fi.release_all_images()