Ejemplo n.º 1
0
def archive_prefix_OPUS_WCS(fobj, extname='SCI'):
    """ Identifies WCS keywords which were generated by OPUS and archived
        using a prefix of 'O' for all 'SCI' extensions in the file

        Parameters
        ----------
        fobj : str or `astropy.io.fits.HDUList`
            Filename or fits object of a file

    """
    if stwcs is None:
        print('=====================')
        print(
            'The STWCS package is needed to convert an old-style OPUS WCS to an alternate WCS'
        )
        print('=====================')
        raise ImportError

    closefits = False
    if isinstance(fobj, str):
        # A filename was provided as input
        fobj = fits.open(fobj, mode='update')
        closefits = True

    # Define the header
    ext = ('sci', 1)
    hdr = fobj[ext].header

    numextn = fileutil.countExtn(fobj)
    extlist = []
    for e in range(1, numextn + 1):
        extlist.append(('sci', e))

    # Insure that the 'O' alternate WCS is present
    if 'O' not in wcsutil.wcskeys(hdr):
        # if not, archive the Primary WCS as the default OPUS WCS
        wcsutil.archiveWCS(fobj, extlist, wcskey='O', wcsname='OPUS')

    # find out how many SCI extensions are in the image
    numextn = fileutil.countExtn(fobj, extname=extname)
    if numextn == 0:
        extname = 'PRIMARY'

    # create HSTWCS object from PRIMARY WCS
    wcsobj = wcsutil.HSTWCS(fobj, ext=ext, wcskey='O')
    # get list of WCS keywords
    wcskeys = list(wcsobj.wcs2header().keys())

    # For each SCI extension...
    for e in range(1, numextn + 1):
        # Now, look for any WCS keywords with a prefix of 'O'
        for key in wcskeys:
            okey = 'O' + key[:7]
            hdr = fobj[(extname, e)].header
            if okey in hdr:
                # Update alternate WCS keyword with prefix-O OPUS keyword value
                hdr[key] = hdr[okey]

    if closefits:
        fobj.close()
Ejemplo n.º 2
0
def archive_prefix_OPUS_WCS(fobj,extname='SCI'):
    """ Identifies WCS keywords which were generated by OPUS and archived
        using a prefix of 'O' for all 'SCI' extensions in the file

        Parameters
        ----------
        fobj : str or `astropy.io.fits.HDUList`
            Filename or fits object of a file

    """
    if stwcs is None:
        print('=====================')
        print('The STWCS package is needed to convert an old-style OPUS WCS to an alternate WCS')
        print('=====================')
        raise ImportError


    closefits = False
    if isinstance(fobj,str):
        # A filename was provided as input
        fobj = fits.open(fobj,mode='update')
        closefits=True

    # Define the header
    ext = ('sci',1)
    hdr = fobj[ext].header

    numextn = fileutil.countExtn(fobj)
    extlist = []
    for e in range(1,numextn+1):
        extlist.append(('sci',e))

    # Insure that the 'O' alternate WCS is present
    if 'O' not in wcsutil.wcskeys(hdr):
        # if not, archive the Primary WCS as the default OPUS WCS
        wcsutil.archiveWCS(fobj,extlist, wcskey='O', wcsname='OPUS')

    # find out how many SCI extensions are in the image
    numextn = fileutil.countExtn(fobj,extname=extname)
    if numextn == 0:
        extname = 'PRIMARY'

    # create HSTWCS object from PRIMARY WCS
    wcsobj = wcsutil.HSTWCS(fobj,ext=ext,wcskey='O')
    # get list of WCS keywords
    wcskeys = list(wcsobj.wcs2header().keys())

    # For each SCI extension...
    for e in range(1,numextn+1):
        # Now, look for any WCS keywords with a prefix of 'O'
        for key in wcskeys:
            okey = 'O'+key[:7]
            hdr = fobj[(extname,e)].header
            if okey in hdr:
                # Update alternate WCS keyword with prefix-O OPUS keyword value
                hdr[key] = hdr[okey]

    if closefits:
        fobj.close()
Ejemplo n.º 3
0
def build_npolname(fobj, npolfile=None):
    """
    Build a NPOLNAME from NPOLFILE

    Parameters
    ----------
    fobj : `astropy.io.fits.HDUList`
        file object
    npolfile : string
        user supplied NPOLFILE keyword

    Returns
    -------
    npolname, npolfile
    """
    if not npolfile:
        try:
            npolfile = fobj[0].header["NPOLFILE"]
        except KeyError:
            npolfile = ' '
            if fileutil.countExtn(fobj, 'WCSDVARR'):
                npolname = 'UNKNOWN'
            else:
                npolname = 'NOMODEL'
        npolname = extract_rootname(npolfile, suffix='_npl')
        if npolname == 'NONE':
            npolname = 'NOMODEL'
    else:
        npolname = extract_rootname(npolfile, suffix='_npl')
        if npolname == 'NONE':
            npolname = 'NOMODEL'
    return npolname, npolfile
Ejemplo n.º 4
0
def build_d2imname(fobj, d2imfile=None):
    """
    Build a D2IMNAME from D2IMFILE

    Parameters
    ----------
    fobj : `astropy.io.fits.HDUList`
        file object
    d2imfile : string
        user supplied NPOLFILE keyword

    Returns
    -------
    d2imname, d2imfile
    """
    if not d2imfile:
        try:
            d2imfile = fobj[0].header["D2IMFILE"]
        except KeyError:
            d2imfile = 'N/A'
            if fileutil.countExtn(fobj, 'D2IMARR'):
                d2imname = 'UNKNOWN'
            else:
                d2imname = 'NOMODEL'
        d2imname = extract_rootname(d2imfile, suffix='_d2i')
        if d2imname == 'NONE':
            d2imname = 'NOMODEL'
    else:
        d2imname = extract_rootname(d2imfile, suffix='_d2i')
        if d2imname == 'NONE':
            d2imname = 'NOMODEL'
    return d2imname, d2imfile
Ejemplo n.º 5
0
def verify_sci_hdrname(filename):
    """Insures that HDRNAME keyword is populated in SCI extensions.

    This function checks to make sure the HDRNAME keyword in the SCI
    extension of the science image `filename` is populated with a valid
    non-empty string.
    """
    fhdu, closefits = proc_utils._process_input(filename)

    # Find all extensions to be updated
    numext = countExtn(fhdu, extname='SCI')

    for ext in range(1, numext + 1):
        sciext = ('sci', ext)
        scihdr = fhdu[sciext].header
        if 'hdrname' not in scihdr or scihdr['hdrname'].rstrip() == '':
            # We need to create a valid value for the keyword
            # Define new HDRNAME value in case it is needed.
            # Same value for all SCI extensions, so just precompute it and be ready.
            # This code came from 'stwcs.updatewcs.astrometry_utils'
            hdrname = "{}_{}".format(filename.replace('.fits', ''), scihdr['wcsname'])
            # Create full filename for headerlet:
            hfilename = "{}_hlet.fits".format(hdrname)
            # Update the header with the new value, inserting after WCSNAME
            scihdr.set('hdrname', hfilename, 'Name of headerlet file', after='wcsname')

    if closefits:
        fhdu.close()
        del fhdu
Ejemplo n.º 6
0
def build_npolname(fobj, npolfile=None):
    """
    Build a NPOLNAME from NPOLFILE

    Parameters
    ----------
    fobj : `astropy.io.fits.HDUList`
        file object
    npolfile : string
        user supplied NPOLFILE keyword

    Returns
    -------
    npolname, npolfile
    """
    if not npolfile:
        try:
            npolfile = fobj[0].header["NPOLFILE"]
        except KeyError:
            npolfile = ' '
            if fileutil.countExtn(fobj, 'WCSDVARR'):
                npolname = 'UNKNOWN'
            else:
                npolname = 'NOMODEL'
        npolname = extract_rootname(npolfile, suffix='_npl')
        if npolname == 'NONE':
            npolname = 'NOMODEL'
    else:
        npolname = extract_rootname(npolfile, suffix='_npl')
        if npolname == 'NONE':
            npolname = 'NOMODEL'
    return npolname, npolfile
Ejemplo n.º 7
0
def build_d2imname(fobj, d2imfile=None):
    """
    Build a D2IMNAME from D2IMFILE

    Parameters
    ----------
    fobj : `astropy.io.fits.HDUList`
        file object
    d2imfile : string
        user supplied NPOLFILE keyword

    Returns
    -------
    d2imname, d2imfile
    """
    if not d2imfile:
        try:
            d2imfile = fobj[0].header["D2IMFILE"]
        except KeyError:
            d2imfile = 'N/A'
            if fileutil.countExtn(fobj, 'D2IMARR'):
                d2imname = 'UNKNOWN'
            else:
                d2imname = 'NOMODEL'
        d2imname = extract_rootname(d2imfile, suffix='_d2i')
        if d2imname == 'NONE':
            d2imname = 'NOMODEL'
    else:
        d2imname = extract_rootname(d2imfile, suffix='_d2i')
        if d2imname == 'NONE':
            d2imname = 'NOMODEL'

    return d2imname, d2imfile
Ejemplo n.º 8
0
def compute_sregion(image, extname='SCI'):
    """Compute the S_REGION keyword for a given WCS.

    Parameters
    -----------
    image : Astropy io.fits  HDUList object
        Image to update with the S_REGION keyword in each of the SCI extensions.

    extname : str, optional
        EXTNAME value for extension containing the WCS(s) to be updated
    """
    # This function could, conceivably, be called directly...
    hdu, closefits = _process_input(image)

    # Find all extensions to be updated
    numext = countExtn(hdu, extname=extname)

    for extnum in range(1, numext + 1):
        sregion_str = 'POLYGON ICRS '
        sciext = (extname, extnum)
        extwcs = wcsutil.HSTWCS(hdu, ext=sciext)
        footprint = extwcs.calc_footprint(center=True)
        for corner in footprint:
            sregion_str += '{} {} '.format(corner[0], corner[1])
        hdu[sciext].header['s_region'] = sregion_str

    # close file if opened by this functions
    if closefits:
        hdu.close()
Ejemplo n.º 9
0
def create_image_footprint(image, refwcs, border=0.):
    """ Create the footprint of the image in the reference WCS frame.

    Parameters
    ----------
    image : `astropy.io.fits.HDUList` or str
        Image to extract sources for matching to
        the external astrometric catalog.

    refwcs : `stwcs.wcsutil.HSTWCS`
        Reference WCS for coordinate frame of image.

    border : float
        Buffer (in arcseconds) around edge of image to exclude astrometric
        sources.

    """
    # Interpret input image to generate initial source catalog and WCS
    if isinstance(image, str):
        image = pf.open(image)
    numSci = countExtn(image, extname='SCI')
    ref_x = refwcs._naxis1
    ref_y = refwcs._naxis2
    # convert border value into pixels
    border_pixels = int(border / refwcs.pscale)

    mask_arr = np.zeros((ref_y, ref_x), dtype=int)

    for chip in range(numSci):
        chip += 1
        # Build arrays of pixel positions for all edges of chip
        chip_y, chip_x = image['sci', chip].data.shape
        chipwcs = wcsutil.HSTWCS(image, ext=('sci', chip))
        xpix = np.arange(chip_x) + 1
        ypix = np.arange(chip_y) + 1
        edge_x = np.hstack([[1] * chip_y, xpix, [chip_x] * chip_y, xpix])
        edge_y = np.hstack([ypix, [1] * chip_x, ypix, [chip_y] * chip_x])
        edge_ra, edge_dec = chipwcs.all_pix2world(edge_x, edge_y, 1)
        edge_x_out, edge_y_out = refwcs.all_world2pix(edge_ra, edge_dec, 0)
        edge_x_out = np.clip(edge_x_out.astype(np.int32), 0, ref_x - 1)
        edge_y_out = np.clip(edge_y_out.astype(np.int32), 0, ref_y - 1)
        mask_arr[edge_y_out, edge_x_out] = 1

    # Fill in outline of each chip
    mask_arr = ndimage.binary_fill_holes(ndimage.binary_dilation(mask_arr, iterations=2))

    if border > 0.:
        mask_arr = ndimage.binary_erosion(mask_arr, iterations=border_pixels)

    return mask_arr
Ejemplo n.º 10
0
def compute_sregion(image, extname='SCI'):
    """Compute the S_REGION keyword for a given WCS.

    Parameters
    -----------
    image : Astropy io.fits  HDUList object
        Image to update with the S_REGION keyword in each of the SCI extensions.

    extname : str, optional
        EXTNAME value for extension containing the WCS(s) to be updated
    """
    # This function could, conceivably, be called directly...
    hdu, closefits = _process_input(image)

    # Find all extensions to be updated
    numext = countExtn(hdu, extname=extname)

    for extnum in range(1, numext + 1):
        sciext = (extname, extnum)
        if 'd001data' not in hdu[0].header:
            sregion_str = 'POLYGON ICRS '
            # Working with FLT/FLC file, so simply use
            #  the array corners directly
            extwcs = wcsutil.HSTWCS(hdu, ext=sciext)
            footprint = extwcs.calc_footprint(center=True)
            for corner in footprint:
                sregion_str += '{} {} '.format(corner[0], corner[1])
        else:
            if hdu[(extname,
                    extnum)].data.min() == 0 and hdu[(extname,
                                                      extnum)].data.max() == 0:
                continue
            # Working with a drizzled image, so we need to
            # get all the corners from each of the input files
            footprint = find_footprint(hdu, extname=extname, extnum=extnum)
            sregion_str = ''
            for region in footprint.corners:
                # S_REGION string should contain a separate POLYGON
                # for each region or chip in the SCI array
                sregion_str += 'POLYGON ICRS '
                for corner in region:
                    sregion_str += '{} {} '.format(corner[0], corner[1])

        hdu[sciext].header['s_region'] = sregion_str

    # close file if opened by this functions
    if closefits:
        hdu.close()
Ejemplo n.º 11
0
def restore_file_from_wcscorr(image, id='OPUS', wcskey=''):
    """ Copies the values of the WCS from the WCSCORR based on ID specified by user.
    The default will be to restore the original OPUS-derived values to the Primary WCS.
    If wcskey is specified, the WCS with that key will be updated instead.
    """

    if not isinstance(image, fits.HDUList):
        fimg = fits.open(image, mode='update')
        close_image = True
    else:
        fimg = image
        close_image = False
    numsci = fileutil.countExtn(fimg)
    wcs_table = fimg['WCSCORR']
    orig_rows = (wcs_table.data.field('WCS_ID') == 'OPUS')
    # create an HSTWCS object to figure out what WCS keywords need to be updated
    wcsobj = stwcs.wcsutil.HSTWCS(fimg, ext=('sci', 1))
    wcshdr = wcsobj.wcs2header()
    for extn in range(1, numsci + 1):
        # find corresponding row from table
        ext_rows = (wcs_table.data.field('EXTVER') == extn)
        erow = np.where(np.logical_and(ext_rows, orig_rows))[0][0]
        for key in wcshdr:
            if key in wcs_table.data.names:  # insure that keyword is column in table
                tkey = key

                if 'orient' in key.lower():
                    key = 'ORIENT'
                if wcskey == '':
                    skey = key
                else:
                    skey = key[:7] + wcskey
                fimg['sci',
                     extn].header[skey] = wcs_table.data.field(tkey)[erow]
        for key in DEFAULT_PRI_KEYS:
            if key in wcs_table.data.names:
                if wcskey == '':
                    pkey = key
                else:
                    pkey = key[:7] + wcskey
                fimg[0].header[pkey] = wcs_table.data.field(key)[erow]

    utils.updateNEXTENDKw(fimg)

    # close the image now that the update has been completed.
    if close_image:
        fimg.close()
Ejemplo n.º 12
0
def interpret_sregion(image, extname='SCI'):
    """Interpret the S_REGION keyword as a list of RA/Dec points"""
    # This function could, conceivably, be called directly...
    hdu, closefits = _process_input(image)

    # Find all extensions to be updated
    numext = countExtn(hdu, extname=extname)
    sregions = []
    for extnum in range(1, numext + 1):
        sregions.append(fits.getval(image, 's_region', ext=(extname, extnum)))

    coords = []
    for region_str in sregions:
        radec_str = np.array(region_str.split(' ')[2:], dtype=np.float64)
        coords.append(radec_str.reshape((radec_str.shape[0] // 2, 2)))

    return coords
Ejemplo n.º 13
0
def restore_file_from_wcscorr(image, id='OPUS', wcskey=''):
    """ Copies the values of the WCS from the WCSCORR based on ID specified by user.
    The default will be to restore the original OPUS-derived values to the Primary WCS.
    If wcskey is specified, the WCS with that key will be updated instead.
    """

    if not isinstance(image, fits.HDUList):
        fimg = fits.open(image, mode='update')
        close_image = True
    else:
        fimg = image
        close_image = False
    numsci = fileutil.countExtn(fimg)
    wcs_table = fimg['WCSCORR']
    orig_rows = (wcs_table.data.field('WCS_ID') == 'OPUS')
    # create an HSTWCS object to figure out what WCS keywords need to be updated
    wcsobj = stwcs.wcsutil.HSTWCS(fimg,ext=('sci',1))
    wcshdr = wcsobj.wcs2header()
    for extn in range(1,numsci+1):
        # find corresponding row from table
        ext_rows = (wcs_table.data.field('EXTVER') == extn)
        erow = np.where(np.logical_and(ext_rows,orig_rows))[0][0]
        for key in wcshdr:
            if key in wcs_table.data.names: # insure that keyword is column in table
                tkey = key

                if 'orient' in key.lower():
                    key = 'ORIENT'
                if wcskey == '':
                    skey = key
                else:
                    skey = key[:7]+wcskey
                fimg['sci',extn].header[skey] = wcs_table.data.field(tkey)[erow]
        for key in DEFAULT_PRI_KEYS:
            if key in wcs_table.data.names:
                if wcskey == '':
                    pkey = key
                else:
                    pkey = key[:7]+wcskey
                fimg[0].header[pkey] = wcs_table.data.field(key)[erow]

    utils.updateNEXTENDKw(fimg)

    # close the image now that the update has been completed.
    if close_image:
        fimg.close()
Ejemplo n.º 14
0
def create_prefix_OPUS_WCS(fobj, extname='SCI'):
    """ Creates alternate WCS with a prefix of 'O' for OPUS generated WCS values
        to work with old MultiDrizzle.

        Parameters
        ----------
        fobj : str or `astropy.io.fits.HDUList`
            Filename or fits object of a file

        Raises
        ------
        IOError:
            if input FITS object was not opened in 'update' mode

    """
    # List of O-prefix keywords to create
    owcskeys = OPUS_WCSKEYS

    closefits = False
    if isinstance(fobj, str):
        # A filename was provided as input
        fobj = fits.open(fobj, mode='update')
        closefits = True
    else:
        # check to make sure this FITS obj has been opened in update mode
        if fobj.fileinfo(0)['filemode'] != 'update':
            print('File not opened with "mode=update". Quitting...')
            raise IOError

    # check for existance of O-prefix WCS
    if owcskeys[0] not in fobj['sci', 1].header:

        # find out how many SCI extensions are in the image
        numextn = fileutil.countExtn(fobj, extname=extname)
        if numextn == 0:
            extname = ''
        for extn in range(1, numextn + 1):
            hdr = fobj[(extname, extn)].header
            for okey in owcskeys:
                hdr[okey] = hdr[okey[1: ] + 'O']

    # Close FITS image if we had to open it...
    if closefits:
        fobj.close()
Ejemplo n.º 15
0
def _create_input_psf(psf_name, calimg, total_flux):

    # Create copy of input science image based on input psf filename
    psf_root = os.path.basename(psf_name)
    lib_psf_arr = fits.getdata(psf_name)
    lib_psf_arr *= total_flux

    lib_size = [lib_psf_arr.shape[0] // 2, lib_psf_arr.shape[1] // 2]

    # create hamming 2d filter to avoid edge effects
    h = ss.hamming(lib_psf_arr.shape[0])
    h2d = np.sqrt(np.outer(h, h))
    lib_psf_arr *= h2d

    # This will be the name of the new file containing the library PSF that will be drizzled to
    # match the input image `drzimg`
    psf_flt_name = psf_root.replace('.fits', '_psf_flt.fits')

    # create version of PSF that will be drizzled
    psf_base = fits.getdata(calimg, ext=1) * 0.0
    # Copy library PSF into this array
    out_cen = [psf_base.shape[0] // 2, psf_base.shape[1] // 2]
    edge = (lib_psf_arr.shape[0] % 2, lib_psf_arr.shape[1] % 2)
    psf_base[out_cen[0] - lib_size[0]:out_cen[0] + lib_size[0] + edge[0],
             out_cen[1] - lib_size[1]:out_cen[1] + lib_size[1] +
             edge[1]] = lib_psf_arr

    # Write out library PSF FLT file now
    psf_flt = shutil.copy(calimg, psf_flt_name)

    # Update file with library PSF
    flt_hdu = fits.open(psf_flt, mode='update')
    flt_hdu[('sci', 1)].data = psf_base
    flt_hdu[('sci', 1)].header['psf_nx'] = psf_base.shape[1]
    flt_hdu[('sci', 1)].header['psf_ny'] = psf_base.shape[0]
    num_sci = fu.countExtn(calimg)
    # Also zero out all other science data in this 'PSF' file.
    if num_sci > 1:
        for extn in range(2, num_sci + 1):
            flt_hdu[('sci', extn)].data *= 0.0
    flt_hdu.close()
    del flt_hdu, lib_psf_arr

    return psf_flt_name
Ejemplo n.º 16
0
def build_reference_wcs(input, sciname='sci'):
    """Create the reference WCS based on all the inputs for a field

    Parameters
    -----------
    input : str or `astropy.io.fits.HDUList` object or list
        Full filename or `fits.HDUList` object
         of the observation to use in building a tangent plane WCS.
         If a list of filenames or HDUList objects are provided, then all
         of them will be used to generate the reference WCS for the entire
         field of view.

    sciname : str
        EXTNAME of extensions which have WCS information for the observation

    """
    # Insure that input is a list at all times.
    # If a single filename (str) or single HDUList is provided, wrap it as a list.
    if not isinstance(input, list) or isinstance(input, fits.HDUList):
        input = [input]

    # Create a composite field-of-view for all inputs
    wcslist = []
    for img in input:
        nsci = fileutil.countExtn(img)
        for num in range(nsci):
            extname = (sciname, num + 1)
            if sciname == 'sci':
                extwcs = HSTWCS(img, ext=extname)
            else:
                # Working with HDRLET as input and do the best we can...
                extwcs = read_hlet_wcs(img, ext=extname)

            wcslist.append(extwcs)

    # This default output WCS will have the same plate-scale and orientation
    # as the first chip in the list, which for WFPC2 data means the PC.
    # Fortunately, for alignment, this doesn't matter since no resampling of
    # data will be performed
    outwcs = utils.output_wcs(wcslist)

    return outwcs
Ejemplo n.º 17
0
def build_reference_wcs(inputs, sciname='sci'):
    """Create the reference WCS based on all the inputs for a field"""
    # start by creating a composite field-of-view for all inputs
    wcslist = []
    for img in inputs:
        nsci = countExtn(img)
        for num in range(nsci):
            extname = (sciname, num + 1)
            if sciname == 'sci':
                extwcs = wcsutil.HSTWCS(img, ext=extname)
            else:
                # Working with HDRLET as input and do the best we can...
                extwcs = read_hlet_wcs(img, ext=extname)

            wcslist.append(extwcs)

    # This default output WCS will have the same plate-scale and orientation
    # as the first chip in the list, which for WFPC2 data means the PC.
    # Fortunately, for alignment, this doesn't matter since no resampling of
    # data will be performed
    outwcs = utils.output_wcs(wcslist)

    return outwcs
Ejemplo n.º 18
0
def build_reference_wcs(inputs, sciname='sci'):
    """Create the reference WCS based on all the inputs for a field"""
    # start by creating a composite field-of-view for all inputs
    wcslist = []
    for img in inputs:
        nsci = countExtn(img)
        for num in range(nsci):
            extname = (sciname, num + 1)
            if sciname == 'sci':
                extwcs = wcsutil.HSTWCS(img, ext=extname)
            else:
                # Working with HDRLET as input and do the best we can...
                extwcs = read_hlet_wcs(img, ext=extname)

            wcslist.append(extwcs)

    # This default output WCS will have the same plate-scale and orientation
    # as the first chip in the list, which for WFPC2 data means the PC.
    # Fortunately, for alignment, this doesn't matter since no resampling of
    # data will be performed
    outwcs = utils.output_wcs(wcslist)

    return outwcs
Ejemplo n.º 19
0
def _process_input_wcs_single(fname, wcskey, updatewcs):
    """
    See docs for _process_input_wcs.
    This is separated to be spawned in parallel.
    """
    if wcskey in ['', ' ', 'INDEF', None]:
        if updatewcs:
            uw.updatewcs(fname, checkfiles=False)
    else:
        numext = fileutil.countExtn(fname)
        extlist = []
        for extn in range(1, numext + 1):
            extlist.append(('SCI', extn))
        if wcskey in string.ascii_uppercase:
            wkey = wcskey
            wname = ' '
        else:
            wname = wcskey
            wkey = ' '
        altwcs.restoreWCS(fname, extlist, wcskey=wkey, wcsname=wname)
    # make an asn table at the end
    # Make sure there is a WCSCORR table for each input image
    if wcskey not in ['', ' ', 'INDEF', None] or updatewcs:
        wcscorr.init_wcscorr(fname)
Ejemplo n.º 20
0
def _process_input_wcs_single(fname, wcskey, updatewcs):
    """
    See docs for _process_input_wcs.
    This is separated to be spawned in parallel.
    """
    if wcskey in ['', ' ', 'INDEF', None]:
        if updatewcs:
            uw.updatewcs(fname, checkfiles=False)
    else:
        numext = fileutil.countExtn(fname)
        extlist = []
        for extn in range(1, numext + 1):
            extlist.append(('SCI', extn))
        if wcskey in string.ascii_uppercase:
            wkey = wcskey
            wname = ' '
        else:
            wname = wcskey
            wkey = ' '
        altwcs.restoreWCS(fname, extlist, wcskey=wkey, wcsname=wname)
    # make an asn table at the end
    # Make sure there is a WCSCORR table for each input image
    if wcskey not in ['', ' ', 'INDEF', None] or updatewcs:
        wcscorr.init_wcscorr(fname)
Ejemplo n.º 21
0
def generate_sky_catalog(image, refwcs, **kwargs):
    """Build source catalog from input image using photutils.

    This script borrows heavily from build_source_catalog.

    The catalog returned by this function includes sources found in all chips
    of the input image with the positions translated to the coordinate frame
    defined by the reference WCS `refwcs`.  The sources will be
    - identified using photutils segmentation-based source finding code
    - ignore any input pixel which has been flagged as 'bad' in the DQ
    array, should a DQ array be found in the input HDUList.
    - classified as probable cosmic-rays (if enabled) using central_moments
    properties of each source, with these sources being removed from the
    catalog.

    Parameters
    ----------
    image : ~astropy.io.fits.HDUList`
        Input image.

    refwcs : `~stwcs.wcsutils.HSTWCS`
        Definition of the reference frame WCS.

    dqname : str
        EXTNAME for the DQ array, if present, in the input image.

    output : bool
        Specify whether or not to write out a separate catalog file for all the
        sources found in each chip.  Default: None (False)

    threshold : float, optional
        This parameter controls the S/N threshold used for identifying sources in
        the image relative to the background RMS in much the same way that
        the 'threshold' parameter in 'tweakreg' works.

    fwhm : float, optional
        FWHM (in pixels) of the expected sources from the image, comparable to the
        'conv_width' parameter from 'tweakreg'.  Objects with FWHM closest to
        this value will be identified as sources in the catalog.

    Returns
    --------
    master_cat : `~astropy.table.Table`
        Source catalog for all 'valid' sources identified from all chips of the
        input image with positions translated to the reference WCS coordinate
        frame.

    """
    # Extract source catalogs for each chip
    source_cats = generate_source_catalog(image, **kwargs)

    # Build source catalog for entire image
    master_cat = None
    numSci = countExtn(image, extname='SCI')
    # if no refwcs specified, build one now...
    if refwcs is None:
        refwcs = build_reference_wcs([image])
    for chip in range(numSci):
        chip += 1
        # work with sources identified from this specific chip
        seg_tab_phot = source_cats[chip]
        if seg_tab_phot is None:
            continue
        # Convert pixel coordinates from this chip to sky coordinates
        chip_wcs = wcsutil.HSTWCS(image, ext=('sci', chip))
        seg_ra, seg_dec = chip_wcs.all_pix2world(seg_tab_phot['xcentroid'], seg_tab_phot['ycentroid'], 1)
        # Convert sky positions to pixel positions in the reference WCS frame
        seg_xy_out = refwcs.all_world2pix(seg_ra, seg_dec, 1)
        seg_tab_phot['xcentroid'] = seg_xy_out[0]
        seg_tab_phot['ycentroid'] = seg_xy_out[1]
        if master_cat is None:
            master_cat = seg_tab_phot
        else:
            master_cat = vstack([master_cat, seg_tab_phot])

    return master_cat
Ejemplo n.º 22
0
def wfpc2_to_flt(imgname):
    """Convert separate GEIS-based FITS files into single FLT file

    Parameters
    ----------
    imgname : str
        Filename of calibrated WFPC2 SCI (*_c0m.fits) image

    Returns
    -------
    flt_filename : str
        Filename of WFPC2 MEF _*flt.fits file that was written out

    """
    is_mef = 'c0m' in imgname
    if not is_mef:
        raise TypeError("MEF C0M file needed as input.")

    dq_file = imgname.replace('c0m', 'c1m')
    is_dq = os.path.exists(dq_file)
    flt_filename = imgname.replace('c0m', 'flt')

    # Read in input SCI file
    in_sci = fits.open(imgname)

    # Add keywords to be more compatible with ACS and WFC3 data
    num_sci = fileutil.countExtn(imgname)
    det_name = 'PC' if in_sci[('sci', 1)].header['detector'] == 1 else 'WF'
    in_sci[0].header['DETECTOR'] = det_name

    if is_dq:
        # Read in existing input DQ file
        in_dq = fits.open(dq_file)
        dq_extns = [extn for extn in in_dq[1:]]
    else:
        # Could not find a DQ file, so create empty DQ arrays
        # based on SCI arrays
        dq_extns = [extn for extn in copy.deepcopy(in_sci[1:])]
        for extn in dq_extns:
            extn.data = np.zeros(extn.data.shape, dtype=np.int32)

    # Update EXTNAME to be consistent with ACS and WFC3 DQ extname
    for i, extn in enumerate(dq_extns):
        extn.header['extname'] = 'DQ'
        extn.header['extver'] = i + 1

    # Now create ERR arrays as well...
    err_extns = [extn for extn in copy.deepcopy(in_sci[1:])]
    for i, extn in enumerate(err_extns):
        # Initialize using Poisson error estimate
        extn.data = np.sqrt(extn.data)
        extn.header['extname'] = 'ERR'
        extn.header['extver'] = i + 1

    # Create output FLT file now to avoid having astropy
    # create a tmp* file that doesn't always get cleaned up...
    out_hdu = copy.deepcopy(in_sci)
    fname_kw = out_hdu[0].header['filename']
    out_hdu[0].header['filename'] = f"{fname_kw[:-8]}flt.fits"
    for dq_extn, err_extn in zip(dq_extns, err_extns):
        out_hdu.append(dq_extn)
        out_hdu.append(err_extn)

    print(f"Writing out {flt_filename}")
    out_file = open(flt_filename, 'wb')
    out_hdu.writeto(out_file, overwrite=True)
    in_sci.close()
    del in_sci
    if is_dq:
        in_dq.close()

    return flt_filename
Ejemplo n.º 23
0
def restore_from_to(f, fromext=None, toext=None, wcskey=" ", wcsname=" "):
    """
    Copy an alternate WCS from one extension as a primary WCS of another extension

    Reads in a WCS defined with wcskey and saves it as the primary WCS.
    Goes sequentially through the list of extensions in ext.
    Alternatively uses 'fromext' and 'toext'.


    Parameters
    ----------
    f:       string or `astropy.io.fits.HDUList`
             a file name or a file object
    fromext: string
             extname from which to read in the alternate WCS, for example 'SCI'
    toext:   string or python list
             extname or a list of extnames to which the WCS will be copied as
             primary, for example ['SCI', 'ERR', 'DQ']
    wcskey:  a charater
             "A"-"Z" - Used for one of 26 alternate WCS definitions.
             or " " - find a key from WCSNAMe value
    wcsname: string (optional)
             if given and wcskey is " ", will try to restore by WCSNAME value

    See Also
    --------
    archiveWCS - copy the primary WCS as an alternate WCS
    restoreWCS - Copy a WCS with key "WCSKEY" to the primary WCS

    """
    if isinstance(f, str):
        fobj = fits.open(f, mode='update')
    else:
        fobj = f

    if not _parpasscheck(fobj, ext=None, wcskey=wcskey, fromext=fromext, toext=toext):
        closefobj(f, fobj)
        raise ValueError("Input parameters problem")

    # Interpret input 'ext' value to get list of extensions to process
    # ext = _buildExtlist(fobj, ext)

    if isinstance(toext, str):
        toext = [toext]

    # the case of an HDUList object in memory without an associated file

    # if fobj.filename() is not None:
    #        name = fobj.filename()

    simplefits = fu.isFits(fobj)[1] is 'simple'
    if simplefits:
        wcskeyext = 0
    else:
        wcskeyext = 1

    if wcskey == " ":
        if wcsname.strip():
            wkey = getKeyFromName(fobj[wcskeyext].header, wcsname)
            if not wkey:
                closefobj(f, fobj)
                raise KeyError("Could not get a key from wcsname %s ." % wcsname)
    else:
        if wcskey not in wcskeys(fobj, ext=wcskeyext):
            print("Could not find alternate WCS with key %s in this file" % wcskey)
            closefobj(f, fobj)
            return
        wkey = wcskey

    countext = fu.countExtn(fobj, fromext)
    if not countext:
        raise KeyError("File does not have extension with extname %s", fromext)
    else:
        for i in range(1, countext + 1):
            for toe in toext:
                _restore(fobj, fromextnum=i, fromextnam=fromext, toextnum=i, toextnam=toe, ukey=wkey)

    if fobj.filename() is not None:
        # fobj.writeto(name)
        closefobj(f, fobj)
Ejemplo n.º 24
0
def determine_alignment_residuals(input, files, max_srcs=2000):
    """Determine the relative alignment between members of an association.

    Parameters
    -----------
    input : string
        Original pipeline input filename.  This filename will be used to
        define the output analysis results filename.

    files : list
        Set of files on which to actually perform comparison.  The original
        pipeline can work on both CTE-corrected and non-CTE-corrected files,
        but this comparison will only be performed on CTE-corrected
        products when available.

    Returns
    --------
    results : string
        Name of JSON file containing all the extracted results from the comparisons
        being performed.
    """
    # Open all files as HDUList objects
    hdus = [fits.open(f) for f in files]
    # Determine sources from each chip
    src_cats = []
    num_srcs = []
    for hdu in hdus:
        numsci = countExtn(hdu)
        nums = 0
        img_cats = {}
        for chip in range(numsci):
            chip += 1
            img_cats[chip] = amutils.extract_point_sources(hdu[("SCI",
                                                                chip)].data,
                                                           nbright=max_srcs)
            nums += len(img_cats[chip])
        num_srcs.append(nums)
        src_cats.append(img_cats)

    if len(num_srcs) == 0 or (len(num_srcs) > 0 and max(num_srcs) <= 3):
        return None

    # src_cats = [amutils.generate_source_catalog(hdu) for hdu in hdus]
    # Combine WCS from HDULists and source catalogs into tweakwcs-compatible input
    imglist = []
    for i, (f, cat) in enumerate(zip(files, src_cats)):
        imglist += amutils.build_wcscat(f, i, cat)

    # Setup matching algorithm using parameters tuned to well-aligned images
    match = tweakwcs.TPMatch(searchrad=5,
                             separation=1.0,
                             tolerance=4.0,
                             use2dhist=True)
    try:
        # perform relative fitting
        matchlist = tweakwcs.align_wcs(imglist,
                                       None,
                                       match=match,
                                       expand_refcat=False)
        del matchlist
    except Exception:
        return None
    # Check to see whether there were any successful fits...
    align_success = False
    for img in imglist:
        if img.meta['fit_info']['status'] == 'SUCCESS':
            align_success = True
            break
    if align_success:
        # extract results in the style of 'tweakreg'
        resids = extract_residuals(imglist)
        if resids is None:
            resids_file = None
        else:
            # Define name for output JSON file...
            resids_file = "{}_astrometry_resids.json".format(input[:9])
            # Remove any previously computed results
            if os.path.exists(resids_file):
                os.remove(resids_file)
            # Dump the results to a JSON file now...
            with open(resids_file, 'w') as jfile:
                json.dump(resids, jfile)
    else:
        resids_file = None

    return resids_file
Ejemplo n.º 25
0
def build_wcscat(image, group_id, source_catalog):
    """ Return a list of `~tweakwcs.tpwcs.FITSWCS` objects for all chips in an image.

    Parameters
    ----------
    image : str, `~astropy.io.fits.HDUList`
        Either filename or HDUList of a single HST observation.

    group_id : int
        Integer ID for group this image should be associated with; primarily
        used when separate chips are in separate files to treat them all as one
        exposure.

    source_catalog : dict
        If provided, these catalogs will be attached as `catalog`
        entries in each chip's ``FITSWCS`` object.  It should be provided as a
        dict of astropy Tables identified by chip number with
        each table containing sources from image extension ``('sci', chip)`` as
        generated by `generate_source_catalog()`.

    Returns
    -------
    wcs_catalogs : list of `~tweakwcs.tpwcs.FITSWCS`
        List of `~tweakwcs.tpwcs.FITSWCS` objects defined for all chips in input image.

    """
    open_file = False
    if isinstance(image, str):
        hdulist = fits.open(image)
        open_file = True
    elif isinstance(image, fits.HDUList):
        hdulist = image
    else:
        log.info("Wrong type of input, {}, for build_wcscat...".format(
            type(image)))
        raise ValueError

    wcs_catalogs = []
    numsci = countExtn(hdulist)
    for chip in range(1, numsci + 1):
        w = wcsutil.HSTWCS(hdulist, ('SCI', chip))

        imcat = source_catalog[chip]
        # rename xcentroid/ycentroid columns, if necessary, to be consistent with tweakwcs
        if 'xcentroid' in imcat.colnames:
            imcat.rename_column('xcentroid', 'x')
            imcat.rename_column('ycentroid', 'y')

        wcscat = FITSWCS(w,
                         meta={
                             'chip': chip,
                             'group_id': group_id,
                             'filename': image,
                             'catalog': imcat,
                             'name': image
                         })

        wcs_catalogs.append(wcscat)

    if open_file:
        hdulist.close()

    return wcs_catalogs
Ejemplo n.º 26
0
def generate_sky_catalog(image, refwcs, dqname="DQ", output=False):
    """Build source catalog from input image using photutils.

    This script borrows heavily from build_source_catalog.

    The catalog returned by this function includes sources found in all chips
    of the input image with the positions translated to the coordinate frame
    defined by the reference WCS `refwcs`.  The sources will be
    - identified using photutils segmentation-based source finding code
    - ignore any input pixel which has been flagged as 'bad' in the DQ
    array, should a DQ array be found in the input HDUList.
    - classified as probable cosmic-rays (if enabled) using central_moments
    properties of each source, with these sources being removed from the
    catalog.

    Parameters
    ----------
    image : `~astropy.io.fits.HDUList`
        Input image.
    refwcs : `~stwcs.wcsutil.HSTWCS`
        Definition of the reference frame WCS.
    dqname : str, optional
        EXTNAME for the DQ array, if present, in the input image.
    output : bool, optional
        Specify whether or not to write out a separate catalog file for all the
        sources found in each chip.

    Returns
    --------
    master_cat : `~astropy.table.Table`
        Source catalog for all 'valid' sources identified from all chips of the
        input image with positions translated to the reference WCS coordinate
        frame.

    """
    # Extract source catalogs for each chip
    source_cats = generate_source_catalog(image, dqname=dqname, output=output)

    # Build source catalog for entire image
    master_cat = None
    numSci = countExtn(image, extname='SCI')
    # if no refwcs specified, build one now...
    if refwcs is None:
        refwcs = build_reference_wcs([image])
    for chip in range(numSci):
        chip += 1
        # work with sources identified from this specific chip
        seg_tab_phot = source_cats[chip]
        if seg_tab_phot is None:
            continue
        # Convert pixel coordinates from this chip to sky coordinates
        chip_wcs = wcsutil.HSTWCS(image, ext=('sci', chip))
        seg_ra, seg_dec = chip_wcs.all_pix2world(seg_tab_phot['xcentroid'],
                                                 seg_tab_phot['ycentroid'], 1)
        # Convert sky positions to pixel positions in the reference WCS frame
        seg_xy_out = refwcs.all_world2pix(seg_ra, seg_dec, 1)
        seg_tab_phot['xcentroid'] = seg_xy_out[0]
        seg_tab_phot['ycentroid'] = seg_xy_out[1]
        if master_cat is None:
            master_cat = seg_tab_phot
        else:
            master_cat = vstack([master_cat, seg_tab_phot])

    return master_cat
Ejemplo n.º 27
0
def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
    """
    Update WCSCORR table with a new row or rows for this extension header. It
    copies the current set of WCS keywords as a new row of the table based on
    keyed WCSs as per Paper I Multiple WCS standard).

    Parameters
    ----------
    dest : HDUList
        The HDU list whose WCSCORR table should be appended to (the WCSCORR HDU
        must already exist)
    source : HDUList, optional
        The HDU list containing the extension from which to extract the WCS
        keywords to add to the WCSCORR table.  If None, the dest is also used
        as the source.
    extname : str, optional
        The extension name from which to take new WCS keywords.  If there are
        multiple extensions with that name, rows are added for each extension
        version.
    wcs_id : str, optional
        The name of the WCS to add, as in the WCSNAMEa keyword.  If
        unspecified, all the WCSs in the specified extensions are added.
    active: bool, optional
        When True, indicates that the update should reflect an update of the
        active WCS information, not just appending the WCS to the file as a
        headerlet
    """
    if not isinstance(dest, fits.HDUList):
        dest = fits.open(dest,mode='update')
    fname = dest.filename()

    if source is None:
        source = dest

    if extname == 'PRIMARY':
        return

    numext = fileutil.countExtn(source, extname)
    if numext == 0:
        raise ValueError('No %s extensions found in the source HDU list.'
                        % extname)
    # Initialize the WCSCORR table extension in dest if not already present
    init_wcscorr(dest)
    try:
        dest.index_of('WCSCORR')
    except KeyError:
        return

    # check to see whether or not this is an up-to-date table
    # replace with newly initialized table with current format
    old_table = dest['WCSCORR']
    wcscorr_cols = ['WCS_ID','EXTVER', 'SIPNAME',
                    'HDRNAME', 'NPOLNAME', 'D2IMNAME']

    for colname in wcscorr_cols:
        if colname not in old_table.data.columns.names:
            print("WARNING:    Replacing outdated WCSCORR table...")
            outdated_table = old_table.copy()
            del dest['WCSCORR']
            init_wcscorr(dest)
            old_table = dest['WCSCORR']
            break

    # Current implementation assumes the same WCS keywords are in each
    # extension version; if this should not be assumed then this can be
    # modified...
    wcs_keys = altwcs.wcskeys(source[(extname, 1)].header)
    wcs_keys = [kk for kk in wcs_keys if kk]
    if ' ' not in wcs_keys: wcs_keys.append(' ') # Insure that primary WCS gets used
    # apply logic for only updating WCSCORR table with specified keywords
    # corresponding to the WCS with WCSNAME=wcs_id
    if wcs_id is not None:
        wnames = altwcs.wcsnames(source[(extname, 1)].header)
        wkeys = []
        for letter in wnames:
            if wnames[letter] == wcs_id:
                wkeys.append(letter)
        if len(wkeys) > 1 and ' ' in wkeys:
            wkeys.remove(' ')
        wcs_keys = wkeys
    wcshdr = stwcs.wcsutil.HSTWCS(source, ext=(extname, 1)).wcs2header()
    wcs_keywords = list(wcshdr.keys())

    if 'O' in wcs_keys:
        wcs_keys.remove('O') # 'O' is reserved for original OPUS WCS

    # create new table for hdr and populate it with the newly updated values
    new_table = create_wcscorr(descrip=True,numrows=0, padding=len(wcs_keys)*numext)
    prihdr = source[0].header

    # Get headerlet related keywords here
    sipname, idctab = utils.build_sipname(source, fname, "None")
    npolname, npolfile = utils.build_npolname(source, None)
    d2imname, d2imfile = utils.build_d2imname(source, None)
    if 'hdrname' in prihdr:
        hdrname = prihdr['hdrname']
    else:
        hdrname = ''

    idx = -1
    for wcs_key in wcs_keys:
        for extver in range(1, numext + 1):
            extn = (extname, extver)
            if 'SIPWCS' in extname and not active:
                tab_extver = 0 # Since it has not been added to the SCI header yet
            else:
                tab_extver = extver
            hdr = source[extn].header
            if 'WCSNAME'+wcs_key in hdr:
                wcsname = hdr['WCSNAME' + wcs_key]
            else:
                wcsname = utils.build_default_wcsname(hdr['idctab'])

            selection = {'WCS_ID': wcsname, 'EXTVER': tab_extver,
                         'SIPNAME':sipname, 'HDRNAME': hdrname,
                         'NPOLNAME': npolname, 'D2IMNAME':d2imname
                         }

            # Ensure that an entry for this WCS is not already in the dest
            # table; if so just skip it
            rowind = find_wcscorr_row(old_table.data, selection)
            if np.any(rowind):
                continue

            idx += 1

            wcs = stwcs.wcsutil.HSTWCS(source, ext=extn, wcskey=wcs_key)
            wcshdr = wcs.wcs2header()

            # Update selection column values
            for key, val in selection.items():
                if key in new_table.data.names:
                    new_table.data.field(key)[idx] = val

            for key in wcs_keywords:
                if key in new_table.data.names:
                    new_table.data.field(key)[idx] = wcshdr[key + wcs_key]

            for key in DEFAULT_PRI_KEYS:
                if key in new_table.data.names and key in prihdr:
                    new_table.data.field(key)[idx] = prihdr[key]
            # Now look for additional, non-WCS-keyword table column data
            for key in COL_FITSKW_DICT:
                fitkw = COL_FITSKW_DICT[key]
                # Interpret any 'pri.hdrname' or
                # 'sci.crpix1' formatted keyword names
                if '.' in fitkw:
                    srchdr,fitkw = fitkw.split('.')
                    if 'pri' in srchdr.lower(): srchdr = prihdr
                    else: srchdr = source[extn].header
                else:
                    srchdr = source[extn].header

                if fitkw+wcs_key in srchdr:
                    new_table.data.field(key)[idx] = srchdr[fitkw+wcs_key]


    # If idx was never incremented, no rows were added, so there's nothing else
    # to do...
    if idx < 0:
        return

    # Now, we need to merge this into the existing table
    rowind = find_wcscorr_row(old_table.data, {'wcs_id':['','0.0']})
    old_nrows = np.where(rowind)[0][0]
    new_nrows = new_table.data.shape[0]

    # check to see if there is room for the new row
    if (old_nrows + new_nrows) > old_table.data.shape[0]-1:
        pad_rows = 2 * new_nrows
        # if not, create a new table with 'pad_rows' new empty rows
        upd_table = fits.new_table(old_table.columns,header=old_table.header,
                                     nrows=old_table.data.shape[0]+pad_rows)
    else:
        upd_table = old_table
        pad_rows = 0
    # Now, add
    for name in old_table.columns.names:
        if name in new_table.data.names:
            # reset the default values to ones specific to the row definitions
            for i in range(pad_rows):
                upd_table.data.field(name)[old_nrows+i] = old_table.data.field(name)[-1]
            # Now populate with values from new table
            upd_table.data.field(name)[old_nrows:old_nrows + new_nrows] = \
                    new_table.data.field(name)
    upd_table.header['TROWS'] = old_nrows + new_nrows

    # replace old extension with newly updated table extension
    dest['WCSCORR'] = upd_table
Ejemplo n.º 28
0
def tran(inimage,
         outimage,
         direction='forward',
         x=None,
         y=None,
         coords=None,
         coordfile=None,
         colnames=None,
         separator=None,
         precision=6,
         output=None,
         verbose=True):
    """ Primary interface to perform coordinate transformations in pixel
        coordinates between 2 images using STWCS and full distortion models
        read from each image's header.
    """
    single_coord = False

    # Only use value provided in `coords` if nothing has been specified for coordfile
    if coords is not None and coordfile is None:
        coordfile = coords
        warnings.simplefilter('always', DeprecationWarning)
        warnings.warn(
            "Please update calling code to pass in `coordfile` instead of `coords`.",
            category=DeprecationWarning)
        warnings.simplefilter('default', DeprecationWarning)

    if coordfile is not None:
        if colnames in util.blank_list:
            colnames = ['c1', 'c2']
        # Determine columns which contain pixel positions
        cols = util.parse_colnames(colnames, coordfile)
        # read in columns from input coordinates file
        xyvals = np.loadtxt(coordfile, usecols=cols, delimiter=separator)
        if xyvals.ndim == 1:  # only 1 entry in coordfile
            xlist = [xyvals[0].copy()]
            ylist = [xyvals[1].copy()]
        else:
            xlist = xyvals[:, 0].copy()
            ylist = xyvals[:, 1].copy()
        del xyvals
    else:
        if isinstance(x, np.ndarray):
            xlist = x.tolist()
            ylist = y.tolist()
        elif not isinstance(x, list):
            xlist = [x]
            ylist = [y]
            single_coord = True
        else:
            xlist = x
            ylist = y

    # start by reading in WCS+distortion info for each image
    im1wcs = wcsutil.HSTWCS(inimage)
    if im1wcs.wcs.is_unity():
        print(
            "####\nNo valid input WCS found in {}.\n  Results may be invalid.\n####\n"
            .format(inimage))

    if util.is_blank(outimage):
        fname, fextn = fileutil.parseFilename(inimage)
        numsci = fileutil.countExtn(fname)
        chips = []
        for e in range(1, numsci + 1):
            chips.append(wcsutil.HSTWCS(fname, ext=('sci', e)))
        if len(chips) == 0:
            chips = [im1wcs]
        im2wcs = distortion.utils.output_wcs(chips)
    else:
        im2wcs = wcsutil.HSTWCS(outimage)

    if im2wcs.wcs.is_unity():
        print(
            "####\nNo valid output WCS found in {}.\n  Results may be invalid.\n####\n"
            .format(outimage))

    # Setup the transformation
    p2p = wcs_functions.WCSMap(im1wcs, im2wcs)

    if direction[0].lower() == 'f':
        outx, outy = p2p.forward(xlist, ylist)
    else:
        outx, outy = p2p.backward(xlist, ylist)

    if isinstance(outx, np.ndarray):
        outx = outx.tolist()
        outy = outy.tolist()

    # add formatting based on precision here...
    xstr = []
    ystr = []
    fmt = "%." + repr(precision) + "f"
    for ox, oy in zip(outx, outy):
        xstr.append(fmt % ox)
        ystr.append(fmt % oy)

    if verbose or (not verbose and util.is_blank(output)):
        print('# Coordinate transformations for ', inimage)
        print('# X(in)      Y(in)             X(out)         Y(out)\n')
        for xs, ys, a, b in zip(xlist, ylist, xstr, ystr):
            print("%.4f  %.4f    %s  %s" % (xs, ys, a, b))

    # Create output file, if specified
    if output:
        f = open(output, mode='w')
        f.write("# Coordinates converted from %s\n" % inimage)
        for xs, ys in zip(xstr, ystr):
            f.write('%s    %s\n' % (xs, ys))
        f.close()
        print('Wrote out results to: ', output)

    if single_coord:
        outx = outx[0]
        outy = outy[0]
    return outx, outy
Ejemplo n.º 29
0
def build_wcscat(image, group_id, source_catalog):
    """ Return a list of `~tweakwcs.tpwcs.FITSWCS` objects for all chips in an image.

    Parameters
    ----------
    image : str, ~astropy.io.fits.HDUList`
        Either filename or HDUList of a single HST observation.

    group_id : int
        Integer ID for group this image should be associated with; primarily
        used when separate chips are in separate files to treat them all as one
        exposure.

    source_catalog : dict
        If provided, these catalogs will be attached as `catalog`
        entries in each chip's ``FITSWCS`` object.  It should be provided as a
        dict of astropy Tables identified by chip number with
        each table containing sources from image extension ``('sci', chip)`` as
        generated by `generate_source_catalog()`.

    Returns
    -------
    wcs_catalogs : list of `~tweakwcs.tpwcs.FITSWCS`
        List of `~tweakwcs.tpwcs.FITSWCS` objects defined for all chips in input image.

    """
    open_file = False
    if isinstance(image, str):
        hdulist = pf.open(image)
        open_file = True
    elif isinstance(image, pf.HDUList):
        hdulist = image
    else:
        log.info("Wrong type of input, {}, for build_wcscat...".format(type(image)))
        raise ValueError

    wcs_catalogs = []
    numsci = countExtn(hdulist)
    for chip in range(1, numsci + 1):
        w = wcsutil.HSTWCS(hdulist, ('SCI', chip))

        imcat = source_catalog[chip]
        # rename xcentroid/ycentroid columns, if necessary, to be consistent with tweakwcs
        if 'xcentroid' in imcat.colnames:
            imcat.rename_column('xcentroid', 'x')
            imcat.rename_column('ycentroid', 'y')

        wcscat = FITSWCS(
            w,
            meta={
                'chip': chip,
                'group_id': group_id,
                'filename': image,
                'catalog': imcat,
                'name': image
            }
        )

        wcs_catalogs.append(wcscat)

    if open_file:
        hdulist.close()

    return wcs_catalogs
Ejemplo n.º 30
0
def init_wcscorr(input, force=False):
    """
    This function will initialize the WCSCORR table if it is not already present,
    and look for WCS keywords with a prefix of 'O' as the original OPUS
    generated WCS as the initial row for the table or use the current WCS
    keywords as initial row if no 'O' prefix keywords are found.

    This function will NOT overwrite any rows already present.

    This function works on all SCI extensions at one time.
    """
    # TODO: Create some sort of decorator or (for Python2.5) context for
    # opening a FITS file and closing it when done, if necessary
    if not isinstance(input, fits.HDUList):
        # input must be a filename, so open as `astropy.io.fits.HDUList` object
        fimg = fits.open(input, mode='update')
        need_to_close = True
    else:
        fimg = input
        need_to_close = False

    # Do not try to generate a WCSCORR table for a simple FITS file
    numsci = fileutil.countExtn(fimg)
    if len(fimg) == 1 or numsci == 0:
        return

    enames = []
    for e in fimg:
        enames.append(e.name)
    if 'WCSCORR' in enames:
        if not force:
            return
        else:
            del fimg['wcscorr']
    print('Initializing new WCSCORR table for ', fimg.filename())

    used_wcskeys = altwcs.wcskeys(fimg['SCI', 1].header)

    # define the primary columns of the WCSEXT table with initial rows for each
    # SCI extension for the original OPUS solution
    numwcs = len(used_wcskeys)
    if numwcs == 0: numwcs = 1

    # create new table with more rows than needed initially to make it easier to
    # add new rows later
    wcsext = create_wcscorr(descrip=True,
                            numrows=numsci,
                            padding=(numsci * numwcs) + numsci * 4)
    # Assign the correct EXTNAME value to this table extension
    wcsext.header['TROWS'] = (numsci * 2, 'Number of updated rows in table')
    wcsext.header['EXTNAME'] = ('WCSCORR', 'Table with WCS Update history')
    wcsext.header['EXTVER'] = 1

    # define set of WCS keywords which need to be managed and copied to the table
    wcs1 = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', 1))
    idc2header = True
    if wcs1.idcscale is None:
        idc2header = False
    wcs_keywords = list(wcs1.wcs2header(idc2hdr=idc2header).keys())

    prihdr = fimg[0].header
    prihdr_keys = DEFAULT_PRI_KEYS
    pri_funcs = {
        'SIPNAME': stwcs.updatewcs.utils.build_sipname,
        'NPOLNAME': stwcs.updatewcs.utils.build_npolname,
        'D2IMNAME': stwcs.updatewcs.utils.build_d2imname
    }

    # Now copy original OPUS values into table
    for extver in range(1, numsci + 1):
        rowind = find_wcscorr_row(wcsext.data, {
            'WCS_ID': 'OPUS',
            'EXTVER': extver,
            'WCS_key': 'O'
        })
        # There should only EVER be a single row for each extension with OPUS values
        rownum = np.where(rowind)[0][0]
        #print 'Archiving OPUS WCS in row number ',rownum,' in WCSCORR table for SCI,',extver

        hdr = fimg['SCI', extver].header
        # define set of WCS keywords which need to be managed and copied to the table
        if used_wcskeys is None:
            used_wcskeys = altwcs.wcskeys(hdr)
        # Check to see whether or not there is an OPUS alternate WCS present,
        # if so, get its values directly, otherwise, archive the PRIMARY WCS
        # as the OPUS values in the WCSCORR table
        if 'O' not in used_wcskeys:
            altwcs.archiveWCS(fimg, ('SCI', extver),
                              wcskey='O',
                              wcsname='OPUS')
        wkey = 'O'

        wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver), wcskey=wkey)
        wcshdr = wcs.wcs2header(idc2hdr=idc2header)

        if wcsext.data.field('CRVAL1')[rownum] != 0:
            # If we find values for these keywords already in the table, do not
            # overwrite them again
            print('WCS keywords already updated...')
            break
        for key in wcs_keywords:
            if key in wcsext.data.names:
                wcsext.data.field(key)[rownum] = wcshdr[(key + wkey)[:8]]
        # Now get any keywords from PRIMARY header needed for WCS updates
        for key in prihdr_keys:
            if key in prihdr:
                val = prihdr[key]
            else:
                val = ''
            wcsext.data.field(key)[rownum] = val

    # Now that we have archived the OPUS alternate WCS, remove it from the list
    # of used_wcskeys
    if 'O' in used_wcskeys:
        used_wcskeys.remove('O')

    # Now copy remaining alternate WCSs into table
    # TODO: Much of this appears to be redundant with update_wcscorr; consider
    # merging them...
    for uwkey in used_wcskeys:
        for extver in range(1, numsci + 1):
            hdr = fimg['SCI', extver].header
            wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver), wcskey=uwkey)
            wcshdr = wcs.wcs2header()
            if 'WCSNAME' + uwkey not in wcshdr:
                wcsid = utils.build_default_wcsname(fimg[0].header['idctab'])
            else:
                wcsid = wcshdr['WCSNAME' + uwkey]

            # identify next empty row
            rowind = find_wcscorr_row(wcsext.data,
                                      selections={'wcs_id': ['', '0.0']})
            rows = np.where(rowind)
            if len(rows[0]) > 0:
                rownum = np.where(rowind)[0][0]
            else:
                print('No available rows found for updating. ')

            # Update selection columns for this row with relevant values
            wcsext.data.field('WCS_ID')[rownum] = wcsid
            wcsext.data.field('EXTVER')[rownum] = extver
            wcsext.data.field('WCS_key')[rownum] = uwkey

            # Look for standard WCS keyword values
            for key in wcs_keywords:
                if key in wcsext.data.names:
                    wcsext.data.field(key)[rownum] = wcshdr[key + uwkey]
            # Now get any keywords from PRIMARY header needed for WCS updates
            for key in prihdr_keys:
                if key in pri_funcs:
                    val = pri_funcs[key](fimg)[0]
                else:
                    if key in prihdr:
                        val = prihdr[key]
                    else:
                        val = ''
                wcsext.data.field(key)[rownum] = val

    # Append this table to the image FITS file
    fimg.append(wcsext)
    # force an update now
    # set the verify flag to 'warn' so that it will always succeed, but still
    # tell the user if PyFITS detects any problems with the file as a whole
    utils.updateNEXTENDKw(fimg)

    fimg.flush('warn')

    if need_to_close:
        fimg.close()
Ejemplo n.º 31
0
def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
    """
    Update WCSCORR table with a new row or rows for this extension header. It
    copies the current set of WCS keywords as a new row of the table based on
    keyed WCSs as per Paper I Multiple WCS standard).

    Parameters
    ----------
    dest : HDUList
        The HDU list whose WCSCORR table should be appended to (the WCSCORR HDU
        must already exist)
    source : HDUList, optional
        The HDU list containing the extension from which to extract the WCS
        keywords to add to the WCSCORR table.  If None, the dest is also used
        as the source.
    extname : str, optional
        The extension name from which to take new WCS keywords.  If there are
        multiple extensions with that name, rows are added for each extension
        version.
    wcs_id : str, optional
        The name of the WCS to add, as in the WCSNAMEa keyword.  If
        unspecified, all the WCSs in the specified extensions are added.
    active: bool, optional
        When True, indicates that the update should reflect an update of the
        active WCS information, not just appending the WCS to the file as a
        headerlet
    """
    if not isinstance(dest, fits.HDUList):
        dest = fits.open(dest, mode='update')
    fname = dest.filename()

    if source is None:
        source = dest

    if extname == 'PRIMARY':
        return

    numext = fileutil.countExtn(source, extname)
    if numext == 0:
        raise ValueError('No %s extensions found in the source HDU list.' %
                         extname)
    # Initialize the WCSCORR table extension in dest if not already present
    init_wcscorr(dest)
    try:
        dest.index_of('WCSCORR')
    except KeyError:
        return

    # check to see whether or not this is an up-to-date table
    # replace with newly initialized table with current format
    old_table = dest['WCSCORR']
    wcscorr_cols = [
        'WCS_ID', 'EXTVER', 'SIPNAME', 'HDRNAME', 'NPOLNAME', 'D2IMNAME'
    ]

    for colname in wcscorr_cols:
        if colname not in old_table.data.columns.names:
            print("WARNING:    Replacing outdated WCSCORR table...")
            outdated_table = old_table.copy()
            del dest['WCSCORR']
            init_wcscorr(dest)
            old_table = dest['WCSCORR']
            break

    # Current implementation assumes the same WCS keywords are in each
    # extension version; if this should not be assumed then this can be
    # modified...
    wcs_keys = altwcs.wcskeys(source[(extname, 1)].header)
    wcs_keys = [kk for kk in wcs_keys if kk]
    if ' ' not in wcs_keys:
        wcs_keys.append(' ')  # Insure that primary WCS gets used
    # apply logic for only updating WCSCORR table with specified keywords
    # corresponding to the WCS with WCSNAME=wcs_id
    if wcs_id is not None:
        wnames = altwcs.wcsnames(source[(extname, 1)].header)
        wkeys = []
        for letter in wnames:
            if wnames[letter] == wcs_id:
                wkeys.append(letter)
        if len(wkeys) > 1 and ' ' in wkeys:
            wkeys.remove(' ')
        wcs_keys = wkeys
    wcshdr = stwcs.wcsutil.HSTWCS(source, ext=(extname, 1)).wcs2header()
    wcs_keywords = list(wcshdr.keys())

    if 'O' in wcs_keys:
        wcs_keys.remove('O')  # 'O' is reserved for original OPUS WCS

    # create new table for hdr and populate it with the newly updated values
    new_table = create_wcscorr(descrip=True,
                               numrows=0,
                               padding=len(wcs_keys) * numext)
    prihdr = source[0].header

    # Get headerlet related keywords here
    sipname, idctab = utils.build_sipname(source, fname, "None")
    npolname, npolfile = utils.build_npolname(source, None)
    d2imname, d2imfile = utils.build_d2imname(source, None)
    if 'hdrname' in prihdr:
        hdrname = prihdr['hdrname']
    else:
        hdrname = ''

    idx = -1
    for wcs_key in wcs_keys:
        for extver in range(1, numext + 1):
            extn = (extname, extver)
            if 'SIPWCS' in extname and not active:
                tab_extver = 0  # Since it has not been added to the SCI header yet
            else:
                tab_extver = extver
            hdr = source[extn].header
            if 'WCSNAME' + wcs_key in hdr:
                wcsname = hdr['WCSNAME' + wcs_key]
            else:
                wcsname = utils.build_default_wcsname(hdr['idctab'])

            selection = {
                'WCS_ID': wcsname,
                'EXTVER': tab_extver,
                'SIPNAME': sipname,
                'HDRNAME': hdrname,
                'NPOLNAME': npolname,
                'D2IMNAME': d2imname
            }

            # Ensure that an entry for this WCS is not already in the dest
            # table; if so just skip it
            rowind = find_wcscorr_row(old_table.data, selection)
            if np.any(rowind):
                continue

            idx += 1

            wcs = stwcs.wcsutil.HSTWCS(source, ext=extn, wcskey=wcs_key)
            wcshdr = wcs.wcs2header()

            # Update selection column values
            for key, val in selection.items():
                if key in new_table.data.names:
                    new_table.data.field(key)[idx] = val

            for key in wcs_keywords:
                if key in new_table.data.names:
                    new_table.data.field(key)[idx] = wcshdr[key + wcs_key]

            for key in DEFAULT_PRI_KEYS:
                if key in new_table.data.names and key in prihdr:
                    new_table.data.field(key)[idx] = prihdr[key]
            # Now look for additional, non-WCS-keyword table column data
            for key in COL_FITSKW_DICT:
                fitkw = COL_FITSKW_DICT[key]
                # Interpret any 'pri.hdrname' or
                # 'sci.crpix1' formatted keyword names
                if '.' in fitkw:
                    srchdr, fitkw = fitkw.split('.')
                    if 'pri' in srchdr.lower(): srchdr = prihdr
                    else: srchdr = source[extn].header
                else:
                    srchdr = source[extn].header

                if fitkw + wcs_key in srchdr:
                    new_table.data.field(key)[idx] = srchdr[fitkw + wcs_key]

    # If idx was never incremented, no rows were added, so there's nothing else
    # to do...
    if idx < 0:
        return

    # Now, we need to merge this into the existing table
    rowind = find_wcscorr_row(old_table.data, {'wcs_id': ['', '0.0']})
    old_nrows = np.where(rowind)[0][0]
    new_nrows = new_table.data.shape[0]

    # check to see if there is room for the new row
    if (old_nrows + new_nrows) > old_table.data.shape[0] - 1:
        pad_rows = 2 * new_nrows
        # if not, create a new table with 'pad_rows' new empty rows
        upd_table = fits.new_table(old_table.columns,
                                   header=old_table.header,
                                   nrows=old_table.data.shape[0] + pad_rows)
    else:
        upd_table = old_table
        pad_rows = 0
    # Now, add
    for name in old_table.columns.names:
        if name in new_table.data.names:
            # reset the default values to ones specific to the row definitions
            for i in range(pad_rows):
                upd_table.data.field(name)[old_nrows +
                                           i] = old_table.data.field(name)[-1]
            # Now populate with values from new table
            upd_table.data.field(name)[old_nrows:old_nrows + new_nrows] = \
                    new_table.data.field(name)
    upd_table.header['TROWS'] = old_nrows + new_nrows

    # replace old extension with newly updated table extension
    dest['WCSCORR'] = upd_table
Ejemplo n.º 32
0
def determine_alignment_residuals(input,
                                  files,
                                  catalogs=None,
                                  max_srcs=1000,
                                  json_timestamp=None,
                                  json_time_since_epoch=None,
                                  log_level=logutil.logging.INFO):
    """Determine the relative alignment between members of an association.

    Parameters
    -----------
    input : string
        Original pipeline input filename.  This filename will be used to
        define the output analysis results filename.

    files : list
        Set of files on which to actually perform comparison.  The original
        pipeline can work on both CTE-corrected and non-CTE-corrected files,
        but this comparison will only be performed on CTE-corrected
        products when available.

    catalogs : list, optional
        List of dictionaries containing the source catalogs for each input chip.
        The list NEEDS to be in the same order as the filenames given in `files`.
        Each dictionary for each file will need to have numerical (integer) keys
        for each 'sci' extension.  If left as `None`, this function will create
        it's own set of catalogs using `astrometric_utils.extract_point_sources`.

    json_timestamp: str, optional
        Universal .json file generation date and time (local timezone) that will be used in the instantiation
        of the HapDiagnostic object. Format: MM/DD/YYYYTHH:MM:SS (Example: 05/04/2020T13:46:35). If not
        specified, default value is logical 'None'

    json_time_since_epoch : float
        Universal .json file generation time that will be used in the instantiation of the HapDiagnostic
        object. Format: Time (in seconds) elapsed since January 1, 1970, 00:00:00 (UTC). If not specified,
        default value is logical 'None'

    log_level : int, optional
        The desired level of verboseness in the log statements displayed on the screen and written to the
        .log file. Default value is 'NOTSET'.

    Returns
    --------
    resids_files : list of string
        Name of JSON files containing all the extracted results from the comparisons
        being performed.
    """
    log.setLevel(log_level)

    if catalogs is None:
        # Open all files as HDUList objects
        hdus = [fits.open(f) for f in files]
        # Determine sources from each chip
        src_cats = []
        num_srcs = []
        for hdu in hdus:
            numsci = countExtn(hdu)
            nums = 0
            img_cats = {}
            if hdu[("SCI", 1)].data.max() == 0.0:
                log.info(
                    "SKIPPING point-source finding for blank image: {}".format(
                        hdu.filename()))
                continue
            log.info("Determining point-sources for {}".format(hdu.filename()))
            for chip in range(numsci):
                chip += 1
                img_cats[chip] = amutils.extract_point_sources(
                    hdu[("SCI", chip)].data, nbright=max_srcs)
                nums += len(img_cats[chip])

            log.info("Identified {} point-sources from {}".format(
                nums, hdu.filename()))
            num_srcs.append(nums)
            src_cats.append(img_cats)
    else:
        src_cats = catalogs
        num_srcs = []
        for img in src_cats:
            num_img = 0
            for chip in img:
                num_img += len(img[chip])
            num_srcs.append(num_img)

    if len(num_srcs) == 0 or (len(num_srcs) > 0 and max(num_srcs) <= 3):
        log.warning(
            "Not enough sources identified in input images for comparison")
        return []

    # Combine WCS from HDULists and source catalogs into tweakwcs-compatible input
    imglist = []
    for i, (f, cat) in enumerate(zip(files, src_cats)):
        imglist += amutils.build_wcscat(f, i, cat)

    # Setup matching algorithm using parameters tuned to well-aligned images
    match = tweakwcs.TPMatch(searchrad=5,
                             separation=4.0,
                             tolerance=1.0,
                             use2dhist=True)
    try:
        # perform relative fitting
        matchlist = tweakwcs.align_wcs(imglist,
                                       None,
                                       minobj=6,
                                       match=match,
                                       expand_refcat=False)
        del matchlist
    except Exception:
        try:
            # Try without 2dHist use to see whether we can get any matches at all
            match = tweakwcs.TPMatch(searchrad=5,
                                     separation=4.0,
                                     tolerance=1.0,
                                     use2dhist=False)
            matchlist = tweakwcs.align_wcs(imglist,
                                           None,
                                           minobj=6,
                                           match=match,
                                           expand_refcat=False)
            del matchlist

        except Exception:
            log.warning("Problem encountered during matching of sources")
            return []

    # Check to see whether there were any successful fits...
    align_success = False
    for img in imglist:
        wcsname = fits.getval(img.meta['filename'], 'wcsname', ext=("sci", 1))
        img.meta['wcsname'] = wcsname
        img.meta['fit_info']['aligned_to'] = imglist[0].meta['filename']
        img.meta['reference_catalog'] = None

    for img in imglist:
        if img.meta['fit_info']['status'] == 'SUCCESS' and '-FIT' in wcsname:
            align_success = True
            break
    resids_files = []
    if align_success:

        # extract results in the style of 'tweakreg'
        resids = extract_residuals(imglist)

        if resids is not None:
            resids_files = generate_output_files(
                resids,
                json_timestamp=json_timestamp,
                json_time_since_epoch=json_time_since_epoch,
                exclude_fields=['group_id'])

    return resids_files
Ejemplo n.º 33
0
def generate_source_catalog(image, **kwargs):
    """ Build source catalogs for each chip using photutils.

    The catalog returned by this function includes sources found in all chips
    of the input image with the positions translated to the coordinate frame
    defined by the reference WCS `refwcs`.  The sources will be
    - identified using photutils segmentation-based source finding code
    - ignore any input pixel which has been flagged as 'bad' in the DQ
    array, should a DQ array be found in the input HDUList.
    - classified as probable cosmic-rays (if enabled) using central_moments
    properties of each source, with these sources being removed from the
    catalog.

    Parameters
    ----------
    image : `~astropy.io.fits.HDUList`
        Input image as an astropy.io.fits HDUList.

    dqname : str
        EXTNAME for the DQ array, if present, in the input image HDUList.

    output : bool
        Specify whether or not to write out a separate catalog file for all the
        sources found in each chip.  Default: None (False)

    threshold : float, optional
        This parameter controls the threshold used for identifying sources in
        the image relative to the background RMS.
        If None, compute a default value of (background+3*rms(background)).
        If threshold < 0.0, use absolute value as scaling factor for default value.

    fwhm : float, optional
        FWHM (in pixels) of the expected sources from the image, comparable to the
        'conv_width' parameter from 'tweakreg'.  Objects with FWHM closest to
        this value will be identified as sources in the catalog.

    Returns
    -------
    source_cats : dict
        Dict of astropy Tables identified by chip number with
        each table containing sources from image extension ``('sci', chip)``.

    """
    if not isinstance(image, pf.HDUList):
        raise ValueError("Input {} not fits.HDUList object".format(image))
    dqname = kwargs.get('dqname', 'DQ')
    output = kwargs.get('output', None)
    # Build source catalog for entire image
    source_cats = {}
    numSci = countExtn(image, extname='SCI')

    for chip in range(numSci):
        chip += 1
        # find sources in image
        if output:
            rootname = image[0].header['rootname']
            outroot = '{}_sci{}_src'.format(rootname, chip)
            kwargs['output'] = outroot
        imgarr = image['sci', chip].data

        # apply any DQ array, if available
        dqmask = None
        if image.index_of(dqname):
            dqarr = image[dqname, chip].data

            # "grow out" regions in DQ mask flagged as saturated by several
            # pixels in every direction to prevent the
            # source match algorithm from trying to match multiple sources
            # from one image to a single source in the
            # other or vice-versa.
            # Create temp DQ mask containing all pixels flagged with any value EXCEPT 256
            non_sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=256)

            # Create temp DQ mask containing saturated pixels ONLY
            sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=~256)

            # Grow out saturated pixels by a few pixels in every direction
            grown_sat_mask = ndimage.binary_dilation(sat_mask, iterations=5)

            # combine the two temporary DQ masks into a single composite DQ mask.
            dqmask = np.bitwise_or(non_sat_mask, grown_sat_mask)

            # dqmask = bitfield_to_boolean_mask(dqarr, good_mask_value=False)
            # TODO: <---Remove this old no-sat bit grow line once this
            # thing works

        seg_tab, segmap = extract_sources(imgarr, dqmask=dqmask, **kwargs)
        seg_tab_phot = seg_tab

        source_cats[chip] = seg_tab_phot

    return source_cats
Ejemplo n.º 34
0
def create_astrometric_catalog(inputs, **pars):
    """Create an astrometric catalog that covers the inputs' field-of-view.

    Parameters
    ===========
    input : str
        Filenames of images to be aligned to astrometric catalog

    catalog : str, optional
        Name of catalog to extract astrometric positions for sources in the
        input images' field-of-view. Default: GSC241. Options available are
        documented on the catalog web page.

    output : str, optional
        Filename to give to the astrometric catalog read in from the master
        catalog web service.  If 'None', no file will be written out.
        Default: ref.cat

    gaia_only : bool, optional
        Specify whether or not to only use sources from GAIA in output catalog
        Default: False

    note ::
        This function will point to astrometric catalog web service defined
        through the use of the ASTROMETRIC_CATALOG_URL environment variable.

    Returns
    =======
    ref_table : object
        Astropy Table object of the catalog

    """
    # interpret input parameters
    catalog = pars.get("catalog", 'GSC241')
    output = pars.get("output", 'ref_cat.ecsv')
    gaia_only = pars.get("gaia_only", False)
    table_format = pars.get("table_format", 'ascii.ecsv')

    inputs, _ = parseinput.parseinput(inputs)
    # start by creating a composite field-of-view for all inputs
    wcslist = []
    for img in inputs:
        nsci = fu.countExtn(img)
        for num in range(nsci):
            extname = '{}[sci,{}]'.format(img, num+1)
            wcslist.append(stwcs.wcsutil.HSTWCS(extname))

    # This default output WCS will have the same plate-scale and orientation
    # as the first chip in the list, which for WFPC2 data means the PC.
    # Fortunately, for alignment, this doesn't matter since no resampling of
    # data will be performed
    outwcs = utils.output_wcs(wcslist)
    radius = compute_radius(outwcs)
    ra, dec = outwcs.wcs.crval

    # perform query for this field-of-view
    ref_dict = get_catalog(ra, dec, sr=radius, catalog=catalog)
    colnames = ('ra','dec', 'mag', 'objID', 'GaiaID')
    col_types = ('f8', 'f8', 'f4', 'U25', 'U25')
    ref_table = Table(names = colnames, dtype=col_types)

    # extract just the columns we want...
    num_sources = 0
    for source in ref_dict:
        if 'GAIAsourceID' in source:
            g = source['GAIAsourceID']
            if gaia_only and g.strip() is '':
                continue
        else:
            g = -1  # indicator for no source ID extracted
        r = float(source['ra'])
        d = float(source['dec'])
        m = float(source['mag'])
        o = source['objID']
        num_sources += 1
        ref_table.add_row((r,d,m,o,g))

    # Write out table to a file, if specified
    if output:
        ref_table.write(output, format=table_format)
    print("Created catalog '{}' with {} sources".format(output, num_sources))

    return ref_table
Ejemplo n.º 35
0
def tran(inimage,
         outimage,
         direction='forward',
         x=None,
         y=None,
         coordfile=None,
         colnames=None,
         separator=None,
         precision=6,
         output=None,
         verbose=True):
    """ Primary interface to perform coordinate transformations in pixel
        coordinates between 2 images using STWCS and full distortion models
        read from each image's header.
    """
    single_coord = False

    if coordfile is None:
        if isinstance(x, np.ndarray):
            xlist = x.tolist()
            ylist = y.tolist()
        elif not isinstance(x, list):
            xlist = [x]
            ylist = [y]
            single_coord = True
        else:
            xlist = x
            ylist = y

    else:
        if colnames in util.blank_list:
            colnames = ['c1', 'c2']

        # Determine columns which contain pixel positions
        cols = util.parse_colnames(colnames, coordfile)
        # read in columns from input coordinates file
        xyvals = np.loadtxt(coordfile, usecols=cols, delimiter=separator)

        if xyvals.ndim == 1:  # only 1 entry in coordfile
            xlist = [xyvals[0].copy()]
            ylist = [xyvals[1].copy()]
        else:
            xlist = xyvals[:, 0].copy()
            ylist = xyvals[:, 1].copy()

    # start by reading in WCS+distortion info for each image
    im1wcs = wcsutil.HSTWCS(inimage)
    if im1wcs.wcs.is_unity():
        print("####\nNo valid input WCS found in '{}'."
              "\n  Results may be invalid.\n####\n".format(inimage))

    if util.is_blank(outimage):
        fname, fextn = fileutil.parseFilename(inimage)
        numsci = fileutil.countExtn(fname)
        chips = [
            wcsutil.HSTWCS(fname, ext=('sci', e + 1)) for e in range(numsci)
        ]
        if len(chips) == 0:
            chips = [im1wcs]
        im2wcs = distortion.utils.output_wcs(chips)

    else:
        im2wcs = wcsutil.HSTWCS(outimage)

    if im2wcs.wcs.is_unity():
        print("####\nNo valid output WCS found in '{}'."
              "\n  Results may be invalid.\n####\n".format(outimage))

    # Setup the transformation
    p2p = wcs_functions.WCSMap(im1wcs, im2wcs)

    if direction[0].lower() == 'f':
        outx, outy = p2p.forward(xlist, ylist)
    else:
        outx, outy = p2p.backward(xlist, ylist)

    if isinstance(outx, np.ndarray):
        outx = outx.tolist()
        outy = outy.tolist()

    # add formatting based on precision here...
    xstr = []
    ystr = []
    for ox, oy in zip(outx, outy):
        xstr.append('{0:.{1}f}'.format(ox, precision))
        ystr.append('{0:.{1}f}'.format(oy, precision))

    if verbose:
        print('# Coordinate transformations for ', inimage)
        print('# X(in)      Y(in)             X(out)         Y(out)\n')
        for xs, ys, a, b in zip(xlist, ylist, xstr, ystr):
            print("%.4f  %.4f    %s  %s" % (xs, ys, a, b))

    # Create output file, if specified
    if output:
        with open(output, mode='w') as f:
            f.write("# Coordinates converted from %s\n" % inimage)
            for xs, ys in zip(xstr, ystr):
                f.write('%s    %s\n' % (xs, ys))
        print('Wrote out results to: ', output)

    if single_coord:
        outx = outx[0]
        outy = outy[0]

    return outx, outy
Ejemplo n.º 36
0
def tran(inimage,outimage,direction='forward',x=None,y=None,
        coords=None, coordfile=None,colnames=None,separator=None,
        precision=6, output=None,verbose=True):
    """ Primary interface to perform coordinate transformations in pixel
        coordinates between 2 images using STWCS and full distortion models
        read from each image's header.
    """
    single_coord = False

    # Only use value provided in `coords` if nothing has been specified for coordfile
    if coords is not None and coordfile is None:
        coordfile = coords
        warnings.simplefilter('always',DeprecationWarning)
        warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.",
            category=DeprecationWarning)
        warnings.simplefilter('default',DeprecationWarning)

    if coordfile is not None:
        if colnames in util.blank_list:
            colnames = ['c1','c2']
        # Determine columns which contain pixel positions
        cols = util.parse_colnames(colnames,coordfile)
        # read in columns from input coordinates file
        xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)
        if xyvals.ndim == 1: # only 1 entry in coordfile
            xlist = [xyvals[0].copy()]
            ylist = [xyvals[1].copy()]
        else:
            xlist = xyvals[:,0].copy()
            ylist = xyvals[:,1].copy()
        del xyvals
    else:
        if isinstance(x,np.ndarray):
            xlist = x.tolist()
            ylist = y.tolist()
        elif not isinstance(x,list):
            xlist = [x]
            ylist = [y]
            single_coord = True
        else:
            xlist = x
            ylist = y

    # start by reading in WCS+distortion info for each image
    im1wcs = wcsutil.HSTWCS(inimage)
    if im1wcs.wcs.is_unity():
        print("####\nNo valid input WCS found in {}.\n  Results may be invalid.\n####\n".format(inimage))

    if util.is_blank(outimage):
        fname,fextn = fileutil.parseFilename(inimage)
        numsci = fileutil.countExtn(fname)
        chips = []
        for e in range(1,numsci+1):
            chips.append(wcsutil.HSTWCS(fname,ext=('sci',e)))
        if len(chips) == 0:
            chips = [im1wcs]
        im2wcs = distortion.utils.output_wcs(chips)
    else:
        im2wcs = wcsutil.HSTWCS(outimage)

    if im2wcs.wcs.is_unity():
        print("####\nNo valid output WCS found in {}.\n  Results may be invalid.\n####\n".format(outimage))

    # Setup the transformation
    p2p = wcs_functions.WCSMap(im1wcs,im2wcs)

    if direction[0].lower() == 'f':
        outx,outy = p2p.forward(xlist,ylist)
    else:
        outx,outy = p2p.backward(xlist,ylist)

    if isinstance(outx,np.ndarray):
        outx = outx.tolist()
        outy = outy.tolist()

    # add formatting based on precision here...
    xstr = []
    ystr = []
    fmt = "%."+repr(precision)+"f"
    for ox,oy in zip(outx,outy):
        xstr.append(fmt%ox)
        ystr.append(fmt%oy)

    if verbose or (not verbose and util.is_blank(output)):
        print('# Coordinate transformations for ',inimage)
        print('# X(in)      Y(in)             X(out)         Y(out)\n')
        for xs,ys,a,b in zip(xlist,ylist,xstr,ystr):
            print("%.4f  %.4f    %s  %s"%(xs,ys,a,b))

    # Create output file, if specified
    if output:
        f = open(output,mode='w')
        f.write("# Coordinates converted from %s\n"%inimage)
        for xs,ys in zip(xstr,ystr):
            f.write('%s    %s\n'%(xs,ys))
        f.close()
        print('Wrote out results to: ',output)

    if single_coord:
        outx = outx[0]
        outy = outy[0]
    return outx,outy
Ejemplo n.º 37
0
def init_wcscorr(input, force=False):
    """
    This function will initialize the WCSCORR table if it is not already present,
    and look for WCS keywords with a prefix of 'O' as the original OPUS
    generated WCS as the initial row for the table or use the current WCS
    keywords as initial row if no 'O' prefix keywords are found.

    This function will NOT overwrite any rows already present.

    This function works on all SCI extensions at one time.
    """
    # TODO: Create some sort of decorator or (for Python2.5) context for
    # opening a FITS file and closing it when done, if necessary
    if not isinstance(input, fits.HDUList):
        # input must be a filename, so open as `astropy.io.fits.HDUList` object
        fimg = fits.open(input, mode='update')
        need_to_close = True
    else:
        fimg = input
        need_to_close = False

    # Do not try to generate a WCSCORR table for a simple FITS file
    numsci = fileutil.countExtn(fimg)
    if len(fimg) == 1 or numsci == 0:
        return

    enames = []
    for e in fimg: enames.append(e.name)
    if 'WCSCORR' in enames:
        if not force:
            return
        else:
            del fimg['wcscorr']
    print('Initializing new WCSCORR table for ',fimg.filename())

    used_wcskeys = altwcs.wcskeys(fimg['SCI', 1].header)

    # define the primary columns of the WCSEXT table with initial rows for each
    # SCI extension for the original OPUS solution
    numwcs = len(used_wcskeys)
    if numwcs == 0: numwcs = 1

    # create new table with more rows than needed initially to make it easier to
    # add new rows later
    wcsext = create_wcscorr(descrip=True,numrows=numsci, padding=(numsci*numwcs) + numsci * 4)
    # Assign the correct EXTNAME value to this table extension
    wcsext.header['TROWS'] = (numsci * 2, 'Number of updated rows in table')
    wcsext.header['EXTNAME'] = ('WCSCORR', 'Table with WCS Update history')
    wcsext.header['EXTVER'] = 1

    # define set of WCS keywords which need to be managed and copied to the table
    wcs1 = stwcs.wcsutil.HSTWCS(fimg,ext=('SCI',1))
    idc2header = True
    if wcs1.idcscale is None:
        idc2header = False
    wcs_keywords = list(wcs1.wcs2header(idc2hdr=idc2header).keys())

    prihdr = fimg[0].header
    prihdr_keys = DEFAULT_PRI_KEYS
    pri_funcs = {'SIPNAME':stwcs.updatewcs.utils.build_sipname,
                 'NPOLNAME':stwcs.updatewcs.utils.build_npolname,
                 'D2IMNAME':stwcs.updatewcs.utils.build_d2imname}

    # Now copy original OPUS values into table
    for extver in range(1, numsci + 1):
        rowind = find_wcscorr_row(wcsext.data,
                                  {'WCS_ID': 'OPUS', 'EXTVER': extver,
                                   'WCS_key':'O'})
        # There should only EVER be a single row for each extension with OPUS values
        rownum = np.where(rowind)[0][0]
        #print 'Archiving OPUS WCS in row number ',rownum,' in WCSCORR table for SCI,',extver

        hdr = fimg['SCI', extver].header
        # define set of WCS keywords which need to be managed and copied to the table
        if used_wcskeys is None:
            used_wcskeys = altwcs.wcskeys(hdr)
        # Check to see whether or not there is an OPUS alternate WCS present,
        # if so, get its values directly, otherwise, archive the PRIMARY WCS
        # as the OPUS values in the WCSCORR table
        if 'O' not in used_wcskeys:
            altwcs.archiveWCS(fimg,('SCI',extver),wcskey='O', wcsname='OPUS')
        wkey = 'O'

        wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver), wcskey=wkey)
        wcshdr = wcs.wcs2header(idc2hdr=idc2header)

        if wcsext.data.field('CRVAL1')[rownum] != 0:
            # If we find values for these keywords already in the table, do not
            # overwrite them again
            print('WCS keywords already updated...')
            break
        for key in wcs_keywords:
            if key in wcsext.data.names:
                wcsext.data.field(key)[rownum] = wcshdr[(key+wkey)[:8]]
        # Now get any keywords from PRIMARY header needed for WCS updates
        for key in prihdr_keys:
            if key in prihdr:
                val = prihdr[key]
            else:
                val = ''
            wcsext.data.field(key)[rownum] = val

    # Now that we have archived the OPUS alternate WCS, remove it from the list
    # of used_wcskeys
    if 'O' in used_wcskeys:
        used_wcskeys.remove('O')

    # Now copy remaining alternate WCSs into table
    # TODO: Much of this appears to be redundant with update_wcscorr; consider
    # merging them...
    for uwkey in used_wcskeys:
        for extver in range(1, numsci + 1):
            hdr = fimg['SCI', extver].header
            wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver),
                                       wcskey=uwkey)
            wcshdr = wcs.wcs2header()
            if 'WCSNAME' + uwkey not in wcshdr:
                wcsid = utils.build_default_wcsname(fimg[0].header['idctab'])
            else:
                wcsid = wcshdr['WCSNAME' + uwkey]

            # identify next empty row
            rowind = find_wcscorr_row(wcsext.data,
                                    selections={'wcs_id':['','0.0']})
            rows = np.where(rowind)
            if len(rows[0]) > 0:
                rownum = np.where(rowind)[0][0]
            else:
                print('No available rows found for updating. ')

            # Update selection columns for this row with relevant values
            wcsext.data.field('WCS_ID')[rownum] = wcsid
            wcsext.data.field('EXTVER')[rownum] = extver
            wcsext.data.field('WCS_key')[rownum] = uwkey

            # Look for standard WCS keyword values
            for key in wcs_keywords:
                if key in wcsext.data.names:
                    wcsext.data.field(key)[rownum] = wcshdr[key + uwkey]
            # Now get any keywords from PRIMARY header needed for WCS updates
            for key in prihdr_keys:
                if key in pri_funcs:
                    val = pri_funcs[key](fimg)[0]
                else:
                    if key in prihdr:
                        val = prihdr[key]
                    else:
                        val = ''
                wcsext.data.field(key)[rownum] = val

    # Append this table to the image FITS file
    fimg.append(wcsext)
    # force an update now
    # set the verify flag to 'warn' so that it will always succeed, but still
    # tell the user if PyFITS detects any problems with the file as a whole
    utils.updateNEXTENDKw(fimg)

    fimg.flush('warn')

    if need_to_close:
        fimg.close()
Ejemplo n.º 38
0
def generate_source_catalog(image,
                            dqname="DQ",
                            output=False,
                            fwhm=3.0,
                            **detector_pars):
    """ Build source catalogs for each chip using photutils.

    The catalog returned by this function includes sources found in all chips
    of the input image with the positions translated to the coordinate frame
    defined by the reference WCS `refwcs`.  The sources will be
    - identified using photutils segmentation-based source finding code
    - ignore any input pixel which has been flagged as 'bad' in the DQ
    array, should a DQ array be found in the input HDUList.
    - classified as probable cosmic-rays (if enabled) using central_moments
    properties of each source, with these sources being removed from the
    catalog.

    Parameters
    ----------
    image : `~astropy.io.fits.HDUList`
        Input image as an astropy.io.fits HDUList.
    dqname : str
        EXTNAME for the DQ array, if present, in
        the input image HDUList.
    output : bool
        Specify whether or not to write out a separate catalog file for all the
        sources found in each chip.
    fwhm : float
        Full-width half-maximum (fwhm) of the PSF in pixels.

    Returns
    -------
    source_cats : dict
        Dict of astropy Tables identified by chip number with
        each table containing sources from image extension ``('sci', chip)``.

    """
    if not isinstance(image, fits.HDUList):
        raise ValueError("Input {} not fits.HDUList object".format(image))

    # remove parameters that are not needed by subsequent functions
    del detector_pars['fwhmpsf']

    # Build source catalog for entire image
    source_cats = {}
    numSci = countExtn(image, extname='SCI')
    outroot = None

    for chip in range(numSci):
        chip += 1
        # find sources in image
        if output:
            rootname = image[0].header['rootname']
            outroot = '{}_sci{}_src'.format(rootname, chip)

        imgarr = image['sci', chip].data

        # apply any DQ array, if available
        dqmask = None
        if image.index_of(dqname):
            dqarr = image[dqname, chip].data

            # "grow out" regions in DQ mask flagged as saturated by several
            # pixels in every direction to prevent the
            # source match algorithm from trying to match multiple sources
            # from one image to a single source in the
            # other or vice-versa.
            # Create temp DQ mask containing all pixels flagged with any value EXCEPT 256
            non_sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=256)

            # Create temp DQ mask containing saturated pixels ONLY
            sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=~256)

            # Grow out saturated pixels by a few pixels in every direction
            grown_sat_mask = ndimage.binary_dilation(sat_mask, iterations=5)

            # combine the two temporary DQ masks into a single composite DQ mask.
            dqmask = np.bitwise_or(non_sat_mask, grown_sat_mask)

            # dqmask = bitfield_to_boolean_mask(dqarr, good_mask_value=False)
            # TODO: <---Remove this old no-sat bit grow line once this
            # thing works

        seg_tab, segmap = extract_sources(imgarr,
                                          dqmask=dqmask,
                                          outroot=outroot,
                                          fwhm=fwhm,
                                          **detector_pars)
        seg_tab_phot = seg_tab

        source_cats[chip] = seg_tab_phot

    return source_cats