def make_final_image(input_image, output_image,
                     desired_fwhm, clobber=False):
    """This routine makes the 'final' images for a data cube. At least the
    paths to the input image, output image, and output wavelength image are
    necessary for this. Beyond that, the user may also have the routine create
    uncertainty images as well.

    Images are convolved to the resolution 'desired_fwhm'. If the current fwhm
    is already higher than that, the routine will throw an error.

    A number of fits header keywords are necessary for this program to function
    properly. Any of these missing will throw an error.

    The output images are intensity-weighted, i.e. the wavelength image will be
    created such that the wavelengths at each pixel are the 'most likely'
    wavelength for the intensity at that pixel, etc.

    Inputs:
    input_image -> Path to the input image.
    output_image -> Path to the output image.
    desired_fwhm -> Desired FWHM for the resultant image to have.

    Optional Inputs:
    clobber -> Overwrite output images if they already exist. Default is False.

    """

    print "Making final data cube images for image "+input_image

    # Open the image
    image = FPImage(input_image)

    # Measure the sky background level in the input image
    skyavg, _truesig, skysig = image.skybackground()

    # Get various header keywords, crash if necessary
    intygrid = image.inty
    fwhm = image.fwhm
    wave0 = image.wave0
    calf = image.calf
    xcen = image.xcen
    ycen = image.ycen
    if fwhm is None:
        exit("Error! FWHM not measured for image "+input_image+".")
    if wave0 is None or calf is None:
        exit("Error! Wavelength solution does " +
             "not exist for image "+input_image+".")
    if xcen is None or ycen is None:
        exit("Error! Center values not measured " +
             "image "+input_image+".")
    if fwhm > desired_fwhm:
        exit("Error! Desired FWHM too low for image " +
             input_image+".")

    # Calculate the necessary FWHM for convolution
    fwhm_conv = np.sqrt(desired_fwhm**2-fwhm**2)
    # Magic number converts sigma to fwhm
    sig = fwhm_conv/2.3548
    # Extremely conservative "safe" kernel size
    ksize = np.ceil(4*sig)
    # Generate the gaussian kernel, shifted to shift the image
    kxgrid, kygrid = np.meshgrid(np.linspace(-ksize, ksize, 2*ksize+1),
                                 np.linspace(-ksize, ksize, 2*ksize+1))
    xshift, yshift = image.xshift, image.yshift
    rad2grid = (kxgrid + xshift)**2 + (kygrid + yshift)**2
    kern = np.exp(-rad2grid/(2*sig**2))

    # Normalize the kernel
    kern = kern/np.sum(kern)

    # Extract relevant arrays
    vargrid = image.vari
    badpixgrid = image.badp

    # Convolve the variances appropriately
    new_vargrid = convolve_variance(vargrid, badpixgrid, kern)

    # Add the sky background uncertainty to the uncertainty grid
    vargrid = vargrid+skysig**2

    # Create and convolve the wavelength array
    if image.wave is None:
        rgrid = image.rarray(xcen, ycen)
        wavegrid = wave0 / np.sqrt(1+rgrid**2/calf**2)
    else:
        wavegrid = image.wave
    new_wavegrid = convolve_wave(wavegrid, intygrid, badpixgrid, kern)

    # Convolve the intensity image
    new_intygrid = convolve_inty(intygrid, badpixgrid, kern)
    new_intygrid[new_intygrid == 0] = intygrid[new_intygrid == 0]
    new_intygrid[np.isnan(new_intygrid)] = intygrid[np.isnan(new_intygrid)]
    image.fwhm = desired_fwhm  # Update header FWHM keyword

    # Subtract the sky background from the image
    new_intygrid[new_intygrid != 0] -= skyavg

    # Create a new fits extension for the wavelength array
    if image.wave is None:
        waveheader = image.openimage[2].header.copy()
        waveheader['EXTVER'] = 4
        wavehdu = ImageHDU(data=new_wavegrid,
                           header=waveheader,
                           name="WAVE")
        image.openimage[1].header.set('WAVEEXT', 4,
                                      comment='Extension for Wavelength Frame')
        image.openimage.append(wavehdu)

    # Put all of the convolved images in the right extensions
    image.inty = new_intygrid
    image.vari = new_vargrid
    image.wave = new_wavegrid

    # Adjust header keywords (even though they're kinda irrelevant now)
    image.xcen += image.xshift
    image.ycen += image.yshift
    image.axcen += image.xshift
    image.aycen += image.yshift

    # Write the output file
    image.writeto(output_image, clobber=clobber)

    # Close images
    image.close()

    return
Esempio n. 2
0
def align_norm(fnlist, tolerance=5, thresh=3.5):
    """Aligns a set of images to each other, as well as normalizing the images
    to the same average brightness.

    Both the alignment and normalization are accomplished through stellar
    photometry using the IRAF routine 'daophot'. The centroids of a handful
    of stars are found and used to run the IRAF routine 'imalign'. The
    instrumental magnitudes of the stars are used to determine by how much
    each image must be scaled for the photometry to match across images.

    The images are simply updated with their rescaled, shifted selves. This
    overwrites the previous images and adds the header keyword 'fpphot' to
    the images.

    A handful of temporary files are created during this process, which should
    all be deleted by the routine at the end. But if it is interrupted, they
    might not be.

    If the uncertainty images exist, this routine also shifts them by the same
    amounts as the intensity images, as well as updating the uncertainty values
    for both the new normalization and the uncertainties in normalizing the
    images.

    Inputs:
    fnlist -> List of strings, each the path to a fits image.
    tolerance -> How close two objects can be and still be considered the same
                 object. Default is 3 pixels.
    thresh -> Optional. Level above sky background variation to look for objs.
              Default is 3.5 (times SkySigma). Decrease if center positions
              aren't being found accurately. Increase for crowded fields to
              decrease computation time.

    """

    # Get image FWHMs
    fwhm = np.empty(len(fnlist))
    firstimage = FPImage(fnlist[0])
    toggle = firstimage.fwhm
    axcen = firstimage.axcen
    aycen = firstimage.aycen
    arad = firstimage.arad
    firstimage.close()
    if axcen is None:
        print "Error! Images have not yet been aperture-masked! Do this first!"
        crash()
    if toggle is None:
        print "Warning! FWHMs have not been measured!"
        print "Assuming 5 pixel FWHM for all images."
        for i in range(len(fnlist)):
            fwhm[i] = 5
    else:
        for i in range(len(fnlist)):
            image = FPImage(fnlist[i])
            fwhm[i] = image.fwhm
            image.close()

    # Get sky background levels
    skyavg = np.empty(len(fnlist))
    skysig = np.empty(len(fnlist))
    for i in range(len(fnlist)):
        image = FPImage(fnlist[i])
        skyavg[i], skysig[i], _skyvar = image.skybackground()
        image.close()

    # Identify the stars in each image
    xlists = []
    ylists = []
    print "Identifying stars in each image..."
    for i in range(len(fnlist)):
        xlists.append([])
        ylists.append([])
        image = FPImage(fnlist[i])
        axcen = image.axcen
        aycen = image.aycen
        arad = image.arad
        sources = daofind(image.inty-skyavg[i],
                          fwhm=fwhm[i],
                          threshold=thresh*skysig[i]).as_array()
        for j in range(len(sources)):
            # If the source is not near the center or edge
            centermask = ((sources[j][1]-axcen)**2 +
                          (sources[j][2]-aycen)**2 > (0.05*arad)**2)
            edgemask = ((sources[j][1]-axcen)**2 +
                        (sources[j][2]-aycen)**2 < (0.95*arad)**2)
            if np.logical_and(centermask, edgemask):
                xlists[i].append(sources[j][1])
                ylists[i].append(sources[j][2])
        image.close()

    # Match objects between fields
    print "Matching objects between images..."
    xcoo = []
    ycoo = []
    for i in range(len(xlists[0])):
        # For each object in the first image
        accept = True
        for j in range(1, len(fnlist)):
            # For each other image
            dist2 = ((np.array(xlists[j])-xlists[0][i])**2 +
                     (np.array(ylists[j])-ylists[0][i])**2)
            if (min(dist2) > tolerance**2):
                accept = False
                break
        if accept:
            # We found an object at that position in every image
            xcoo.append(xlists[0][i])
            ycoo.append(ylists[0][i])

    # Create coordinate arrays for the photometry and shifting
    x = np.zeros((len(fnlist), len(xcoo)))
    y = np.zeros_like(x)
    for i in range(len(xcoo)):
        # For every object found in the first image
        for j in range(len(fnlist)):
            # Find that object in every image
            dist2 = ((np.array(xlists[j])-xcoo[i])**2 +
                     (np.array(ylists[j])-ycoo[i])**2)
            index = np.argmin(dist2)
            x[j, i] = xlists[j][index]
            y[j, i] = ylists[j][index]

    # Do aperture photometry on the matched objects
    print "Performing photometry on matched stars..."
    counts = np.zeros_like(x)
    dcounts = np.zeros_like(x)
    for i in range(len(fnlist)):
        image = FPImage(fnlist[i])
        apertures = CircularAperture((x[i], y[i]), r=2*fwhm[i])
        annuli = CircularAnnulus((x[i], y[i]), r_in=3*fwhm[i], r_out=4*fwhm[i])
        phot_table = aperture_photometry(image.inty,
                                         apertures, error=np.sqrt(image.vari))
        sky_phot_table = aperture_photometry(image.inty, annuli,
                                             error=np.sqrt(image.vari))
        counts[i] = phot_table["aperture_sum"] / apertures.area()
        counts[i] -= sky_phot_table["aperture_sum"] / annuli.area()
        counts[i] *= apertures.area()
        dcounts[i] = phot_table["aperture_sum_err"] / apertures.area()
        image.close()

    # Calculate the shifts and normalizations
    norm, dnorm = calc_norm(counts, dcounts)
    for i in range(x.shape[1]):
        x[:, i] = -(x[:, i] - x[0, i])
        y[:, i] = -(y[:, i] - y[0, i])
    xshifts = np.average(x, axis=1)
    yshifts = np.average(y, axis=1)

    # Normalize the images and put shifts in the image headers
    for i in range(len(fnlist)):
        image = FPImage(fnlist[i], update=True)
        image.phottog = "True"
        image.dnorm = dnorm[i]
        image.inty /= norm[i]
        image.vari = image.vari/norm[i]**2
        image.xshift = xshifts[i]
        image.yshift = yshifts[i]
        image.close()

    return