def make_final_image(input_image, output_image, output_wave_image, desired_fwhm, input_uncert_image=None, output_uncert_image=None, clobber=False): """This routine makes the 'final' images for a data cube. At least the paths to the input image, output image, and output wavelength image are necessary for this. Beyond that, the user may also have the routine create uncertainty images as well. Images are convolved to the resolution 'desired_fwhm'. If the current fwhm is already higher than that, the routine will throw an error. A number of fits header keywords are necessary for this program to function properly. Any of these missing will throw an error. The output images are intensity-weighted, i.e. the wavelength image will be created such that the wavelengths at each pixel are the 'most likely' wavelength for the intensity at that pixel, etc. Inputs: input_image -> Path to the input image. output_image -> Path to the output image. output_wave_image -> Path to the output wavelength image. desired_fwhm -> Desired FWHM for the resultant image to have. Optional Inputs: input_uncert_image -> Path to the input uncertainty image, if it exists. output_uncert_image -> Path to the output uncertainty image, if it exists. clobber -> Overwrite output images if they already exist. Default is False. """ print "Making final data cube images for image "+input_image #Measure the sky background level in the input image skyavg, skysig = fit_sky_level([input_image]) #Open the input image and get various header keywords, crash if necessary intyimage = openfits(input_image) intygrid = intyimage[0].data fwhm = intyimage[0].header.get("fpfwhm") wave0 = intyimage[0].header.get("fpwave0") calf = intyimage[0].header.get("fpcalf") xcen = intyimage[0].header.get("fpxcen") ycen = intyimage[0].header.get("fpycen") if fwhm == None: crash("Error! FWHM not measured for image "+input_image+".") if wave0 == None or calf == None: crash("Error! Wavelength solution does "+ "not exist for image "+input_image+".") if xcen == None or ycen == None: crash("Error! Center values not measured "+ "image "+input_image+".") if fwhm>desired_fwhm: crash("Error! Desired FWHM too low for image "+ input_image+".") #Subtract the sky background from the image intygrid[intygrid!=0] -= skyavg[0] #Calculate the necessary FWHM for convolution and make the gaussian kernel fwhm_conv = np.sqrt(desired_fwhm**2-fwhm**2) sig = fwhm_conv/2.3548+0.0001 ksize = np.ceil(4*sig) #Generate the kernel to 4-sigma kxgrid, kygrid = np.meshgrid(np.linspace(-ksize,ksize,2*ksize+1),np.linspace(-ksize,ksize,2*ksize+1)) kern = np.exp(-(kxgrid**2+kygrid**2)/(2*sig**2)) #Gaussian (unnormalized because sig can be 0) kern = kern/np.sum(kern) #Normalize the kernel #Open and convolve the uncertainty image if one exists. Save the output. if input_uncert_image != None: uncertimage = openfits(input_uncert_image) uncertgrid = uncertimage[0].data #Add the sky background uncertainty to the uncertainty grid uncertgrid[intygrid!=0] = np.sqrt(uncertgrid[intygrid!=0]**2+skysig[0]**2) #Convolve the uncertainties appropriately new_uncert_grid = convolve_uncert(uncertgrid, intygrid, kern) #Write to output file writefits(output_uncert_image,new_uncert_grid,header=uncertimage[0].header,clobber=clobber) uncertimage.close() #Create and convolve the wavelength image. Save the output. xgrid, ygrid = np.meshgrid(np.arange(intyimage[0].data.shape[1]), np.arange(intyimage[0].data.shape[0])) r2grid = (xgrid-xcen)**2 + (ygrid-ycen)**2 wavegrid = wave0 / np.sqrt(1+r2grid/calf**2) newwavegrid = convolve_wave(wavegrid, intygrid, kern) writefits(output_wave_image,newwavegrid,header=intyimage[0].header,clobber=clobber) #Convolve the intensity image. Save the output newintygrid = convolve_inty(intygrid, kern) intyimage[0].header["fpfwhm"] = desired_fwhm #Update header FWHM keyword writefits(output_image,newintygrid,header=intyimage[0].header,clobber=clobber) #Close images intyimage.close() return
def align_norm(fnlist,uncertlist=None): """Aligns a set of images to each other, as well as normalizing the images to the same average brightness. Both the alignment and normalization are accomplished through stellar photometry using the IRAF routine 'daophot'. The centroids of a handful of stars are found and used to run the IRAF routine 'imalign'. The instrumental magnitudes of the stars are used to determine by how much each image must be scaled for the photometry to match across images. The images are simply updated with their rescaled, shifted selves. This overwrites the previous images and adds the header keyword 'fpphot' to the images. A handful of temporary files are created during this process, which should all be deleted by the routine at the end. But if it is interrupted, they might not be. If the uncertainty images exist, this routine also shifts them by the same amounts as the intensity images, as well as updating the uncertainty values for both the new normalization and the uncertainties in normalizing the images. Inputs: fnlist -> List of strings, each the path to a fits image. uncertlist (optional) -> List of paths to uncertainty images. """ #Fit for the sky background level _skyavg, skysig = fit_sky_level(fnlist) #Get image FWHMs fwhm = np.empty(len(fnlist)) firstimage = openfits(fnlist[0]) toggle = firstimage[0].header.get("fpfwhm") axcen = firstimage[0].header.get("fpaxcen") aycen = firstimage[0].header.get("fpaycen") arad = firstimage[0].header.get("fparad") firstimage.close() if axcen == None: print "Error! Images have not yet been aperture-masked! Do this first!" crash() if toggle == None: print "Warning: FWHMs have not been measured! Assuming 5 pixel FWHM for all images." for i in range(len(fnlist)): fwhm[i] = 5 else: for i in range(len(fnlist)): image = openfits(fnlist[i]) fwhm[i] = image[0].header["fpfwhm"] image.close() #Identify objects in the fields coolist = identify_objects(fnlist,skysig,fwhm) #Match objects between fields coofile = match_objects(coolist) #Do aperture photometry on the matched objects photlist = do_phot(fnlist,coofile,fwhm,skysig) #Read the photometry files x, y, mag, dmag = read_phot(photlist) #Calculate the normalizations norm, dnorm = calc_norm(mag,dmag) #Normalize the images (and optionally, the uncertainty images) for i in range(len(fnlist)): print "Normalizing image "+fnlist[i] image = openfits(fnlist[i],mode="update") if not (uncertlist is None): uncimage = openfits(uncertlist[i],mode="update") uncimage[0].data = np.sqrt(norm[i]**2*uncimage[0].data**2 + dnorm[i]**2*image[0].data**2) uncimage.close() image[0].data *= norm[i] image.close() #Calculate the shifts for i in range(x.shape[1]): x[:,i] = -(x[:,i] - x[0,i]) y[:,i] = -(y[:,i] - y[0,i]) xshifts = np.average(x,axis=1) yshifts = np.average(y,axis=1) #Shift the images (and optionally, the uncertainty images) iraf.images(_doprint=0) iraf.immatch(_doprint=0) for i in range(len(fnlist)): print "Shifting image "+fnlist[i] iraf.geotran(input=fnlist[i], output=fnlist[i], geometry="linear", xshift=xshifts[i], yshift=yshifts[i], database="", verbose="no") if not (uncertlist is None): iraf.geotran(input=uncertlist[i], output=uncertlist[i], geometry="linear", xshift=xshifts[i], yshift=yshifts[i], database="", verbose="no") #Update the image headers for i in range(len(fnlist)): image = openfits(fnlist[i],mode="update") image[0].header["fpphot"]="True" image[0].header["fpxcen"]+=xshifts[i] image[0].header["fpycen"]+=yshifts[i] image[0].header["fpaxcen"]+=xshifts[i] image[0].header["fpaycen"]+=yshifts[i] image.close() #Clean up the coordinate file list clean_files(fnlist) remove(coofile) for i in range(len(photlist)): remove(photlist[i]) return