def rouletteWheelSelect(population, SCORES, scorefunc, scoreparams, N):
	""" Select N unique individuals from the population based on roulette wheel selection.
		If there are not at least N individuals in the population, ValueError is raised """
	
	if len(population) < N: raise ValueError("Cannot Select %d individuals from a population of %d" %(N, len(population)))
	
#	print 'getting wheel'	##
	wheel = getRouletteWheel(population, SCORES, scorefunc, scoreparams)
#	print 'got wheel'	##
	answer = []
	selection = set([])
	
	while N:
#		print 'while', N	##
		try:
#			print 'trying'	##
			r = randfloat(-1,0)
			slot = (s for s in wheel if s[1]<=r<=s[0]).next()
			answer.append(wheel[slot])
			selection.add(slot)
			N -= 1
		except:
#			print 'excepting'	##
			print r
			for slot in wheel: print slot
			raise
			crash()
	
	return answer
Example #2
0
                              clobber=True)
 
 #Get final lists for the velocity map fitting for each object
 final_inty_lists = []
 final_wave_lists = []
 final_unc_lists = []
 for i in range(len(list_of_objs)):
     final_inty_lists.append([])
     final_wave_lists.append([])
     final_unc_lists.append([])
     finalimagelist = sorted(listdir(list_of_objs[i].replace(" ", "")+"_cube"))
     for j in range(len(finalimagelist)):
         if "_wave.fits" in finalimagelist[j]: final_wave_lists[i].append(join(list_of_objs[i].replace(" ", "")+"_cube",finalimagelist[j]))
         elif "_unc.fits" in finalimagelist[j]: final_unc_lists[i].append(join(list_of_objs[i].replace(" ", "")+"_cube",finalimagelist[j]))
         elif "_inty.fits" in finalimagelist[j]: final_inty_lists[i].append(join(list_of_objs[i].replace(" ", "")+"_cube",finalimagelist[j]))
 if len(final_wave_lists) != len(final_unc_lists) or len(final_wave_lists) != len(final_inty_lists): crash("Error! Mismatched images in data cube directory!")
 
 #Shift to solar velocity frame
 for i in range(len(list_of_objs)):
     firstimage = openfits(final_wave_lists[i][0])
     velshift = firstimage[0].header.get("fpsolar")
     firstimage.close()
     if velshift is None:
         print "Performing solar velocity shift for object "+list_of_objs[i]+"..."
         solar_velocity_shift(final_wave_lists[i], rest_wave)
     else: print "Solar velocity shift for object "+list_of_objs[i]+" already done."
 
 #Velocity map fitting
 for i in range(len(list_of_objs)):
     if (isfile(join(list_of_objs[i].replace(" ", "")+"_cube","velocity.fits"))):
         while True:
Example #3
0
def fit_wave_soln(fnlist):
    """Fits a wavelength solution to rings in a set of images. Appends this
    wavelength solution to the image headers as the keywords:
    'fpcala', 'fpcalb', ... 'fpcalf'
    
    Each object in fnlist must have a corresponding "median.fits" in its
    image directory, or this routine will not work.
    
    ARC ring images are fitted by adjusting the center, while the center is held
    fixed for night sky rings. A combination of fits to both sets of rings is
    used to determine a wavelength solution for the whole set of images.
    
    If the ARC rings disagree substantially with the night sky rings, it is
    recommended that users delete the ARC rings from the fit and use only the
    night sky rings.
    
    It is also known that the wavelength solution can sometimes be piecewise in
    time when a large jump in 'z' happens between two images; i.e. different
    wavelength solutions exist before and after the jump. The routine allows
    the user to make a piecewise solution for this reason, but this ability
    should be used sparingly.
    
    This routine contains one of the few hard-coded numbers in the pipeline,
    Fguess=5600. Currently F values are not written to the fits image headers,
    and this is a reasonable guess.
    
    Inputs:
    fnlist -> List of strings, each the path to a fits image. These images
    should all have been taken with the same order filter. If not, the routine
    will crash.
    
    """
    
    #This bit takes care of the 's' to save shortcut in matplotlib.
    oldsavekey = plt.rcParams["keymap.save"]
    plt.rcParams["keymap.save"] = ""
        
    #Open all of the images
    imagelist = []
    arclist = []
    objlist = []
    for i in range(len(fnlist)):
        imagelist.append(openfits(fnlist[i]))
        if i == 0: filt = imagelist[0][0].header["FILTER"]
        if imagelist[i][0].header["FILTER"] != filt:
            print "Error! Some of these images are in different filters!"
            crash()
        if imagelist[i][0].header["OBJECT"]=="ARC": arclist.append(imagelist[i])
        else:
            if not isfile(join(split(fnlist[i])[0],"median.fits")):
                print "Error! No 'median.fits' file found."
                crash()
            medimage = openfits(join(split(fnlist[i])[0],"median.fits"))
            imagelist[i][0].data += -medimage[0].data
            medimage.close()
            objlist.append(imagelist[i])
    
    #Load wavelength libraries
    arclib, nightlib = get_libraries(filt)
    if arclib is None:
        print "Error! Your filter isn't the wavelength library!"
        crash()

    



    #This next bit fits all of the rings that the user marks

    #Fit rings in the object images
    radlists = []
    for i in range(len(objlist)):
        radlists.append([])
    i=0
    while True:
        xgrid, ygrid = np.meshgrid(np.arange(objlist[i][0].data.shape[1]), np.arange(objlist[i][0].data.shape[0]))
        xcen = objlist[i][0].header["FPXCEN"]
        ycen = objlist[i][0].header["FPYCEN"]
        axcen = objlist[i][0].header["FPAXCEN"]
        aycen = objlist[i][0].header["FPAYCEN"]
        arad = objlist[i][0].header["FPARAD"]
        rgrid = np.sqrt((xgrid - xcen)**2 + (ygrid - ycen)**2)
        rbins = np.arange(arad-np.int(max(abs(axcen-xcen),abs(aycen-ycen))))+1
        intbins = np.empty_like(rbins)
        for j in range(len(rbins)):
            intbins[j] = np.median(objlist[i][0].data[np.logical_and(np.logical_and(objlist[i][0].data!=0,rgrid<rbins[j]),rgrid>rbins[j]-1)])
        ringplot = PlotRingProfile(objlist[i][0].data[aycen-arad:aycen+arad,axcen-arad:axcen+arad], #Data to be plotted. Only want stuff inside aperture
                                   rbins+(xcen-axcen)+arad, #Radii bins shifted to image center
                                   intbins*arad/np.percentile(np.abs(intbins),98)+(ycen-aycen)+arad, #Intensity bins, rescaled and shifted by image center
                                   xcen-axcen+arad, ycen-aycen+arad, #Shifted center
                                   radlists[i], #Previously fitted rings
                                   repr(i+1)+"/"+repr(len(objlist))) #numstring
        #Changing images and loop breakout conditions
        if ringplot.key == "d": i+=1
        if ringplot.key == "a": i+=-1
        if i == -1 or i == len(objlist):
            while True:
                yn = raw_input("Finished marking sky rings? (y/n) ")
                if "n" in yn or "N" in yn:
                    if i == -1: i=0
                    if i == len(objlist): i = len(objlist)-1
                    break
                elif "y" in yn or "Y" in yn:
                    break
        if i == -1 or i == len(objlist): break
        #Force-marking a ring
        if ringplot.key == "e" and ringplot.xcoo != None: radlists[i].append(ringplot.xcoo-arad-(xcen-axcen))
        #Deleting a ring
        if ringplot.key == "s" and ringplot.xcoo != None and len(radlists[i])>0:
            radlists[i].pop(np.argmin(np.array(radlists[i])-np.sqrt((ringplot.xcoo-arad-(xcen-axcen))**2 + (ringplot.ycoo-arad-(ycen-aycen))**2)))
        #Fitting a ring profile
        if ringplot.key == "w" and ringplot.xcoo != None:
            x = rbins[max(ringplot.xcoo-arad-(xcen-axcen)-50,0):min(ringplot.xcoo-arad-(xcen-axcen)+50,len(rbins))]**2
            y = intbins[max(ringplot.xcoo-arad-(xcen-axcen)-50,0):min(ringplot.xcoo-arad-(xcen-axcen)+50,len(rbins))]
            fit = GaussFit(x,y)
            fitplot = PlotRingFit(x,y,fit)
            if fitplot.key == "w": radlists[i].append(np.sqrt(fit[2]))
    zo = []
    to = []
    ro = []
    for i in range(len(objlist)):
        for j in range(len(radlists[i])):
            zo.append(objlist[i][0].header["ET1Z"])
            to.append(objlist[i][0].header["JD"])
            ro.append(radlists[i][j])
            
    #Fit rings in the ARC images
    xcen = objlist[0][0].header["FPXCEN"]
    ycen = objlist[0][0].header["FPYCEN"]
    radlists = []
    for i in range(len(arclist)):
        radlists.append([])
    i=0
    while True:
        xgrid, ygrid = np.meshgrid(np.arange(arclist[i][0].data.shape[1]), np.arange(arclist[i][0].data.shape[0]))
        axcen = arclist[i][0].header["FPAXCEN"]
        aycen = arclist[i][0].header["FPAYCEN"]
        arad = arclist[i][0].header["FPARAD"]
        rgrid = np.sqrt((xgrid - xcen)**2 + (ygrid - ycen)**2)
        rbins = np.arange(arad-np.int(max(abs(axcen-xcen),abs(aycen-ycen))))+1
        intbins = np.empty_like(rbins)
        for j in range(len(rbins)):
            intbins[j] = np.median(arclist[i][0].data[np.logical_and(np.logical_and(arclist[i][0].data!=0,rgrid<rbins[j]),rgrid>rbins[j]-1)])
        ringplot = PlotRingProfile(arclist[i][0].data[aycen-arad:aycen+arad,axcen-arad:axcen+arad], #Data to be plotted. Only want stuff inside aperture
                                   rbins+(xcen-axcen)+arad, #Radii bins shifted to image center
                                   intbins*arad/np.percentile(np.abs(intbins),98)+(ycen-aycen)+arad, #Intensity bins, rescaled and shifted by image center
                                   xcen-axcen+arad, ycen-aycen+arad, #Shifted center
                                   radlists[i], #Previously fitted rings
                                   repr(i+1)+"/"+repr(len(arclist))) #numstring
        #Changing images and loop breakout conditions
        if ringplot.key == "d": i+=1
        if ringplot.key == "a": i+=-1
        if i == -1 or i == len(arclist):
            while True:
                yn = raw_input("Finished marking ARC rings? (y/n) ")
                if "n" in yn or "N" in yn:
                    if i == -1: i=0
                    if i == len(arclist): i = len(arclist)-1
                    break
                elif "y" in yn or "Y" in yn:
                    break
        if i == -1 or i == len(arclist): break
        #Force-marking a ring
        if ringplot.key == "e" and ringplot.xcoo != None: radlists[i].append(ringplot.xcoo-arad-(xcen-axcen))
        #Deleting a ring
        if ringplot.key == "s" and ringplot.xcoo != None and len(radlists[i])>0:
            radlists[i].pop(np.argmin(np.array(radlists[i])-np.sqrt((ringplot.xcoo-arad-(xcen-axcen))**2 + (ringplot.ycoo-arad-(ycen-aycen))**2)))
        #Fitting a ring profile
        if ringplot.key == "w" and ringplot.xcoo != None:
            x = rbins[max(ringplot.xcoo-arad-(xcen-axcen)-50,0):min(ringplot.xcoo-arad-(xcen-axcen)+50,len(rbins))]**2
            y = intbins[max(ringplot.xcoo-arad-(xcen-axcen)-50,0):min(ringplot.xcoo-arad-(xcen-axcen)+50,len(rbins))]
            fit = GaussFit(x,y)
            fitplot = PlotRingFit(x,y,fit)
            if fitplot.key == "w": radlists[i].append(np.sqrt(fit[2]))
    za = []
    ta = []
    ra = []
    for i in range(len(arclist)):
        for j in range(len(radlists[i])):
            za.append(arclist[i][0].header["ET1Z"])
            ta.append(arclist[i][0].header["JD"])
            ra.append(radlists[i][j])
    
#     #Load previous ring fits from a text file - COMMENT THIS OUT LATER
#     rr,zz,tt = np.loadtxt("test.out",unpack=True)
#     za = list(zz[zz>0])
#     ta = list(tt[zz>0])
#     ra = list(rr[zz>0])
#     zo = list(zz[zz<0])
#     to = list(tt[zz<0])
#     ro = list(rr[zz<0])

    #Now we try to get a good guess at the wavelengths
    
    #Get a good guess at which wavelengths are which
    Bguess = objlist[0][0].header["ET1B"]
    Fguess = 5600
    
    #Figure out A by matching rings to the wavelength libraries
    master_r = np.array(ro+ra)
    master_z = np.array(zo+za)
    wavematch = np.zeros_like(master_r)
    isnight = np.array([True]*len(ro)+[False]*len(ra))
    oldrms = 10000 #Really high initial RMS for comparisons
    for i in range(len(master_r)):
        if isnight[i]: lib = nightlib
        else: lib = arclib
        for j in range(len(lib)):
            #Assume the i'th ring is the j'th line
            Aguess = lib[j]*np.sqrt(1+master_r[i]**2/Fguess**2)-Bguess*master_z[i]
            #What are all of the other rings, given this A?
            waveguess = (Aguess+Bguess*master_z)/np.sqrt(1+master_r**2/Fguess**2)
            for k in range(len(master_r)):
                if isnight[k]: wavematch[k] = nightlib[np.argmin(np.abs(nightlib-waveguess[k]))]
                else: wavematch[k] = arclib[np.argmin(np.abs(arclib-waveguess[k]))]
            rms = np.sqrt(np.average((waveguess-wavematch)**2))
            if rms < oldrms:
                #This is the new best solution. Keep it!
                oldrms = rms
                bestA = Aguess
                master_wave = wavematch.copy()
    
    #Make more master arrays for the plotting
    master_t = np.array(to+ta)
    t0 = np.min(master_t)
    master_t += -t0
    master_t *= 24*60 #Convert to minutes
    master_color = np.array(len(ro)*["blue"]+len(ra)*["red"]) #Colors for plotting
    toggle = np.ones(len(master_r),dtype="bool")
    dotime = False
    time_dividers = []
    
    #Do the interactive plotting
    while True:
        rplot = master_r[toggle]
        zplot = master_z[toggle]
        tplot = master_t[toggle]
        colorplot = master_color[toggle]
        waveplot = master_wave[toggle]
        fitplot = np.zeros(len(waveplot))
        xs = np.zeros((3,len(rplot)))
        xs[0] = rplot
        xs[1] = zplot
        xs[2] = tplot
        fit = [0]*(len(time_dividers)+1)
        time_dividers = sorted(time_dividers)
        if len(time_dividers)>1: print "Warning: Too many time divisions is likely unphysical. Be careful!"
        for i in range(len(time_dividers)+1):
            #Create a slice for all of the wavelengths before this time divider
            #but after the one before it
            if len(time_dividers)==0: tslice = tplot==tplot
            elif i == 0: tslice = tplot<time_dividers[i]
            elif i==len(time_dividers): tslice = tplot>time_dividers[i-1]
            else: tslice = np.logical_and(tplot<time_dividers[i],tplot>time_dividers[i-1])
            if dotime:
                fit[i] = curve_fit(fpfunc_for_curve_fit_with_t, xs[:,tslice], waveplot[tslice], p0=(bestA,Bguess,0,Fguess))[0]
                fitplot[tslice] = fpfunc_for_curve_fit_with_t(xs[:,tslice], fit[i][0], fit[i][1], fit[i][2], fit[i][3])
            else:
                fit[i] = curve_fit(fpfunc_for_curve_fit, xs[:,tslice], waveplot[tslice], p0=(bestA,Bguess,Fguess))[0]
                fitplot[tslice] = fpfunc_for_curve_fit(xs[:,tslice], fit[i][0], fit[i][1], fit[i][2])
        resid = waveplot - fitplot
        solnplot = WaveSolnPlot(rplot,zplot,tplot,waveplot,resid,colorplot,time_dividers)
        #Breakout case
        if solnplot.key == "a":
            while True:
                for i in range(len(time_dividers)+1):
                    if dotime: print "Solution 1: A = "+str(fit[i][0])+", B = "+str(fit[i][1])+", E = "+str(fit[i][2])+", F = "+str(fit[i][3])
                    else: print "Solution 1: A = "+str(fit[i][0])+", B = "+str(fit[i][1])+", F = "+str(fit[i][2])
                print "Residual rms="+str(np.sqrt(np.average(resid**2)))+" for "+repr(len(time_dividers)+1)+" independent "+repr(3+dotime)+"-parameter fits to "+repr(len(rplot))+" rings."
                yn = raw_input("Accept wavelength solution? (y/n) ")
                if "n" in yn or "N" in yn:
                    break
                elif "y" in yn or "Y" in yn:
                    solnplot.key = "QUIT"
                    break
        if solnplot.key == "QUIT": break
        #Restore all points case
        if solnplot.key == "r": toggle = np.ones(len(master_r),dtype="bool")
        #Delete nearest point case
        if solnplot.key == "d" and solnplot.axis != None:
            #Figure out which plot was clicked in
            if solnplot.axis == 1:
                #Resid vs. z plot
                z_loc = solnplot.xcoo
                resid_loc = solnplot.ycoo
                dist2 = ((zplot-z_loc)/(np.max(zplot)-np.min(zplot)))**2 + ((resid-resid_loc)/(np.max(resid)-np.min(resid)))**2
            elif solnplot.axis == 2:
                #Resid vs. R plot
                r_loc = solnplot.xcoo
                resid_loc = solnplot.ycoo
                dist2 = ((rplot-r_loc)/(np.max(rplot)-np.min(rplot)))**2 + ((resid-resid_loc)/(np.max(resid)-np.min(resid)))**2
            elif solnplot.axis == 3:
                #Resit vs. T plot
                t_loc = solnplot.xcoo
                resid_loc = solnplot.ycoo
                dist2 = ((tplot-t_loc)/(np.max(tplot)-np.min(tplot)))**2 + ((resid-resid_loc)/(np.max(resid)-np.min(resid)))**2
            elif solnplot.axis == 4:
                #Resid vs. Wave plot
                wave_loc = solnplot.xcoo
                resid_loc = solnplot.ycoo
                dist2 = ((waveplot-wave_loc)/(np.max(waveplot)-np.min(waveplot)))**2 + ((resid-resid_loc)/(np.max(resid)-np.min(resid)))**2
            #Get the radius and time of the worst ring
            r_mask = rplot[dist2 == np.min(dist2)][0]
            t_mask = tplot[dist2 == np.min(dist2)][0]
            toggle[np.logical_and(master_r == r_mask, master_t == t_mask)] = False
        #Fit for time case
        if solnplot.key == "t": dotime = not dotime
        #Add time break
        if solnplot.key == "w":
            timeplot = TimePlot(tplot,resid,colorplot,time_dividers)
            if timeplot.xcoo != None: time_dividers.append(timeplot.xcoo)
        #Remove time breaks
        if solnplot.key == "q":
            time_dividers = []
Example #4
0
def align_norm(fnlist, tolerance=5, thresh=3.5):
    """Aligns a set of images to each other, as well as normalizing the images
    to the same average brightness.

    Both the alignment and normalization are accomplished through stellar
    photometry using the IRAF routine 'daophot'. The centroids of a handful
    of stars are found and used to run the IRAF routine 'imalign'. The
    instrumental magnitudes of the stars are used to determine by how much
    each image must be scaled for the photometry to match across images.

    The images are simply updated with their rescaled, shifted selves. This
    overwrites the previous images and adds the header keyword 'fpphot' to
    the images.

    A handful of temporary files are created during this process, which should
    all be deleted by the routine at the end. But if it is interrupted, they
    might not be.

    If the uncertainty images exist, this routine also shifts them by the same
    amounts as the intensity images, as well as updating the uncertainty values
    for both the new normalization and the uncertainties in normalizing the
    images.

    Inputs:
    fnlist -> List of strings, each the path to a fits image.
    tolerance -> How close two objects can be and still be considered the same
                 object. Default is 3 pixels.
    thresh -> Optional. Level above sky background variation to look for objs.
              Default is 3.5 (times SkySigma). Decrease if center positions
              aren't being found accurately. Increase for crowded fields to
              decrease computation time.

    """

    # Get image FWHMs
    fwhm = np.empty(len(fnlist))
    firstimage = FPImage(fnlist[0])
    toggle = firstimage.fwhm
    axcen = firstimage.axcen
    aycen = firstimage.aycen
    arad = firstimage.arad
    firstimage.close()
    if axcen is None:
        print "Error! Images have not yet been aperture-masked! Do this first!"
        crash()
    if toggle is None:
        print "Warning! FWHMs have not been measured!"
        print "Assuming 5 pixel FWHM for all images."
        for i in range(len(fnlist)):
            fwhm[i] = 5
    else:
        for i in range(len(fnlist)):
            image = FPImage(fnlist[i])
            fwhm[i] = image.fwhm
            image.close()

    # Get sky background levels
    skyavg = np.empty(len(fnlist))
    skysig = np.empty(len(fnlist))
    for i in range(len(fnlist)):
        image = FPImage(fnlist[i])
        skyavg[i], skysig[i], _skyvar = image.skybackground()
        image.close()

    # Identify the stars in each image
    xlists = []
    ylists = []
    print "Identifying stars in each image..."
    for i in range(len(fnlist)):
        xlists.append([])
        ylists.append([])
        image = FPImage(fnlist[i])
        axcen = image.axcen
        aycen = image.aycen
        arad = image.arad
        sources = daofind(image.inty-skyavg[i],
                          fwhm=fwhm[i],
                          threshold=thresh*skysig[i]).as_array()
        for j in range(len(sources)):
            # If the source is not near the center or edge
            centermask = ((sources[j][1]-axcen)**2 +
                          (sources[j][2]-aycen)**2 > (0.05*arad)**2)
            edgemask = ((sources[j][1]-axcen)**2 +
                        (sources[j][2]-aycen)**2 < (0.95*arad)**2)
            if np.logical_and(centermask, edgemask):
                xlists[i].append(sources[j][1])
                ylists[i].append(sources[j][2])
        image.close()

    # Match objects between fields
    print "Matching objects between images..."
    xcoo = []
    ycoo = []
    for i in range(len(xlists[0])):
        # For each object in the first image
        accept = True
        for j in range(1, len(fnlist)):
            # For each other image
            dist2 = ((np.array(xlists[j])-xlists[0][i])**2 +
                     (np.array(ylists[j])-ylists[0][i])**2)
            if (min(dist2) > tolerance**2):
                accept = False
                break
        if accept:
            # We found an object at that position in every image
            xcoo.append(xlists[0][i])
            ycoo.append(ylists[0][i])

    # Create coordinate arrays for the photometry and shifting
    x = np.zeros((len(fnlist), len(xcoo)))
    y = np.zeros_like(x)
    for i in range(len(xcoo)):
        # For every object found in the first image
        for j in range(len(fnlist)):
            # Find that object in every image
            dist2 = ((np.array(xlists[j])-xcoo[i])**2 +
                     (np.array(ylists[j])-ycoo[i])**2)
            index = np.argmin(dist2)
            x[j, i] = xlists[j][index]
            y[j, i] = ylists[j][index]

    # Do aperture photometry on the matched objects
    print "Performing photometry on matched stars..."
    counts = np.zeros_like(x)
    dcounts = np.zeros_like(x)
    for i in range(len(fnlist)):
        image = FPImage(fnlist[i])
        apertures = CircularAperture((x[i], y[i]), r=2*fwhm[i])
        annuli = CircularAnnulus((x[i], y[i]), r_in=3*fwhm[i], r_out=4*fwhm[i])
        phot_table = aperture_photometry(image.inty,
                                         apertures, error=np.sqrt(image.vari))
        sky_phot_table = aperture_photometry(image.inty, annuli,
                                             error=np.sqrt(image.vari))
        counts[i] = phot_table["aperture_sum"] / apertures.area()
        counts[i] -= sky_phot_table["aperture_sum"] / annuli.area()
        counts[i] *= apertures.area()
        dcounts[i] = phot_table["aperture_sum_err"] / apertures.area()
        image.close()

    # Calculate the shifts and normalizations
    norm, dnorm = calc_norm(counts, dcounts)
    for i in range(x.shape[1]):
        x[:, i] = -(x[:, i] - x[0, i])
        y[:, i] = -(y[:, i] - y[0, i])
    xshifts = np.average(x, axis=1)
    yshifts = np.average(y, axis=1)

    # Normalize the images and put shifts in the image headers
    for i in range(len(fnlist)):
        image = FPImage(fnlist[i], update=True)
        image.phottog = "True"
        image.dnorm = dnorm[i]
        image.inty /= norm[i]
        image.vari = image.vari/norm[i]**2
        image.xshift = xshifts[i]
        image.yshift = yshifts[i]
        image.close()

    return
def make_final_image(input_image, output_image, output_wave_image,
                     desired_fwhm,
                     input_uncert_image=None, output_uncert_image=None,
                     clobber=False):
    """This routine makes the 'final' images for a data cube. At least the paths
    to the input image, output image, and output wavelength image are necessary
    for this. Beyond that, the user may also have the routine create uncertainty
    images as well.
    
    Images are convolved to the resolution 'desired_fwhm'. If the current fwhm
    is already higher than that, the routine will throw an error.
    
    A number of fits header keywords are necessary for this program to function
    properly. Any of these missing will throw an error.
    
    The output images are intensity-weighted, i.e. the wavelength image will be
    created such that the wavelengths at each pixel are the 'most likely'
    wavelength for the intensity at that pixel, etc.
    
    Inputs:
    input_image -> Path to the input image.
    output_image -> Path to the output image.
    output_wave_image -> Path to the output wavelength image.
    desired_fwhm -> Desired FWHM for the resultant image to have.
    
    Optional Inputs:
    input_uncert_image -> Path to the input uncertainty image, if it exists.
    output_uncert_image -> Path to the output uncertainty image, if it exists.
    clobber -> Overwrite output images if they already exist. Default is False.
    
    """
    
    print "Making final data cube images for image "+input_image
    
    #Measure the sky background level in the input image
    skyavg, skysig = fit_sky_level([input_image])
    
    #Open the input image and get various header keywords, crash if necessary
    intyimage = openfits(input_image)
    intygrid = intyimage[0].data
    fwhm = intyimage[0].header.get("fpfwhm")
    wave0 = intyimage[0].header.get("fpwave0")
    calf = intyimage[0].header.get("fpcalf")
    xcen = intyimage[0].header.get("fpxcen")
    ycen = intyimage[0].header.get("fpycen")
    if fwhm == None: crash("Error! FWHM not measured for image "+input_image+".")
    if wave0 == None or calf == None: crash("Error! Wavelength solution does "+
                                            "not exist for image "+input_image+".")
    if xcen == None or ycen == None: crash("Error! Center values not measured "+
                                           "image "+input_image+".")
    if fwhm>desired_fwhm: crash("Error! Desired FWHM too low for image "+
                                input_image+".")
    
    #Subtract the sky background from the image
    intygrid[intygrid!=0] -= skyavg[0]
    
    #Calculate the necessary FWHM for convolution and make the gaussian kernel
    fwhm_conv = np.sqrt(desired_fwhm**2-fwhm**2)
    sig = fwhm_conv/2.3548+0.0001
    ksize = np.ceil(4*sig) #Generate the kernel to 4-sigma
    kxgrid, kygrid = np.meshgrid(np.linspace(-ksize,ksize,2*ksize+1),np.linspace(-ksize,ksize,2*ksize+1))
    kern = np.exp(-(kxgrid**2+kygrid**2)/(2*sig**2)) #Gaussian (unnormalized because sig can be 0)
    kern = kern/np.sum(kern) #Normalize the kernel
    
    #Open and convolve the uncertainty image if one exists. Save the output.
    if input_uncert_image != None:
        uncertimage = openfits(input_uncert_image)
        uncertgrid = uncertimage[0].data
        #Add the sky background uncertainty to the uncertainty grid
        uncertgrid[intygrid!=0] = np.sqrt(uncertgrid[intygrid!=0]**2+skysig[0]**2)
        #Convolve the uncertainties appropriately
        new_uncert_grid = convolve_uncert(uncertgrid, intygrid, kern)
        #Write to output file
        writefits(output_uncert_image,new_uncert_grid,header=uncertimage[0].header,clobber=clobber)
        uncertimage.close()
        
    #Create and convolve the wavelength image. Save the output.
    xgrid, ygrid = np.meshgrid(np.arange(intyimage[0].data.shape[1]),
                               np.arange(intyimage[0].data.shape[0]))
    r2grid = (xgrid-xcen)**2 + (ygrid-ycen)**2
    wavegrid = wave0 / np.sqrt(1+r2grid/calf**2)
    newwavegrid = convolve_wave(wavegrid, intygrid, kern)
    writefits(output_wave_image,newwavegrid,header=intyimage[0].header,clobber=clobber)
    
    #Convolve the intensity image. Save the output
    newintygrid = convolve_inty(intygrid, kern)
    intyimage[0].header["fpfwhm"] = desired_fwhm #Update header FWHM keyword
    writefits(output_image,newintygrid,header=intyimage[0].header,clobber=clobber)
    
    #Close images
    intyimage.close()
    
    return
Example #6
0
def deghost(fn,uncfn=None,g=0.04):
    """Routine to deghost an image by rotating it about a central point and
    subtracting a constant multiple of this rotated image from the original.
    My experience has shown that about 0.04 is the right value, but your
    mileage may vary.
    
    The fits header must contain values in "fpxcen" and "fpycen" for the center
    about which to be rotated.
    
    Optionally also modifies an uncertainty image to account for the effects
    of deghosting on the error propagation.
    
    Creates the fits header keyword "fpghost" = "True".
    
    Inputs:
    fn -> String, the path to the fits image to be deghosted.
    uncfn (optional) -> String, the path to the uncertainty image.
    g -> The multiple to subtract from the original image, default = 4%
    
    """
    
    #Open the image and check for center coordinates
    image = openfits(fn,mode="update")
    xcen = image[0].header.get("fpxcen")
    ycen = image[0].header.get("fpycen")
    if xcen == None:
        print "Error! Image "+fn+" doesn't have center coordinates in header!"
        crash()
    
    #Deghost the image
    print "Deghosting image "+fn
    image[0].header["fpghost"] = "True"
    
    #Determine image size
    xsize=image[0].data.shape[1]
    ysize=image[0].data.shape[0]

    #Make a mask for the chip gaps and outside-aperture stuff
    mask = image[0].data==0

    #Make an array of the flipped data
    flip = image[0].data[::-1,::-1].copy()

    #Calculate the difference between the image's geometric center (midpoint) and the axis of rotation
    xshift = 2*xcen-xsize-1
    yshift = 2*ycen-ysize-1

    #A given pixel's position, when rotated, will overlap its four neighboring pixels.
    #All pixels will have the same four overlapping regions because the rotation is 180 degrees.
    #Here we take a weighted sum of these four pixels, where the weights are equal to the areas of overlap.
    #This weighted sum is subtracted from the original pixel in the data array.
    image[0].data[max(np.floor(yshift),0):min(ysize+np.floor(yshift),ysize),max(np.floor(xshift),0):min(xsize+np.floor(xshift),xsize)] += ( -g*abs((np.ceil(yshift)-yshift)*(np.ceil(xshift)-xshift))*flip[max(-np.floor(yshift),0):min(ysize,ysize-np.floor(yshift)),max(-np.floor(xshift),0):min(xsize,xsize-np.floor(xshift))])
    image[0].data[max(np.ceil(yshift),0):min(ysize+np.ceil(yshift),ysize),max(np.floor(xshift),0):min(xsize+np.floor(xshift),xsize)] += ( -g*abs((np.floor(yshift)-yshift)*(np.ceil(xshift)-xshift))*flip[max(-np.ceil(yshift),0):min(ysize,ysize-np.ceil(yshift)),max(-np.floor(xshift),0):min(xsize,xsize-np.floor(xshift))])
    image[0].data[max(np.floor(yshift),0):min(ysize+np.floor(yshift),ysize),max(np.ceil(xshift),0):min(xsize+np.ceil(xshift),xsize)] += ( -g*abs((np.ceil(yshift)-yshift)*(np.floor(xshift)-xshift))*flip[max(-np.floor(yshift),0):min(ysize,ysize-np.floor(yshift)),max(-np.ceil(xshift),0):min(xsize,xsize-np.ceil(xshift))])
    image[0].data[max(np.ceil(yshift),0):min(ysize+np.ceil(yshift),ysize),max(np.ceil(xshift),0):min(xsize+np.ceil(xshift),xsize)] += ( -g*abs((np.floor(yshift)-yshift)*(np.floor(xshift)-xshift))*flip[max(-np.ceil(yshift),0):min(ysize,ysize-np.ceil(yshift)),max(-np.ceil(xshift),0):min(xsize,xsize-np.ceil(xshift))])
    
    #Remask the data using the mask
    image[0].data[mask] = 0
    
    #Close the image
    image.close()
    
    #Modify the uncertainty image if it exists
    if not (uncfn is None):
        print "Updating uncertainty image "+uncfn
        uncimage = openfits(uncfn,mode="update")
        
        #Make an array of the flipped data
        flip = uncimage[0].data[::-1,::-1].copy()*1.0
        
        #Uncertainties add in quadrature
        uncimage[0].data = np.power(uncimage[0].data,2)
        
        #Add the uncertainty in the deghosted uncertainty image to the original
        uncimage[0].data[max(np.floor(yshift),0):min(ysize+np.floor(yshift),ysize),max(np.floor(xshift),0):min(xsize+np.floor(xshift),xsize)] += (g*abs((np.ceil(yshift)-yshift)*(np.ceil(xshift)-xshift))*flip[max(-np.floor(yshift),0):min(ysize,ysize-np.floor(yshift)),max(-np.floor(xshift),0):min(xsize,xsize-np.floor(xshift))])**2
        uncimage[0].data[max(np.ceil(yshift),0):min(ysize+np.ceil(yshift),ysize),max(np.floor(xshift),0):min(xsize+np.floor(xshift),xsize)] += ( g*abs((np.floor(yshift)-yshift)*(np.ceil(xshift)-xshift))*flip[max(-np.ceil(yshift),0):min(ysize,ysize-np.ceil(yshift)),max(-np.floor(xshift),0):min(xsize,xsize-np.floor(xshift))])**2
        uncimage[0].data[max(np.floor(yshift),0):min(ysize+np.floor(yshift),ysize),max(np.ceil(xshift),0):min(xsize+np.ceil(xshift),xsize)] += ( g*abs((np.ceil(yshift)-yshift)*(np.floor(xshift)-xshift))*flip[max(-np.floor(yshift),0):min(ysize,ysize-np.floor(yshift)),max(-np.ceil(xshift),0):min(xsize,xsize-np.ceil(xshift))])**2
        uncimage[0].data[max(np.ceil(yshift),0):min(ysize+np.ceil(yshift),ysize),max(np.ceil(xshift),0):min(xsize+np.ceil(xshift),xsize)] += ( g*abs((np.floor(yshift)-yshift)*(np.floor(xshift)-xshift))*flip[max(-np.ceil(yshift),0):min(ysize,ysize-np.ceil(yshift)),max(-np.ceil(xshift),0):min(xsize,xsize-np.ceil(xshift))])**2
        
        uncimage[0].data = np.sqrt(uncimage[0].data)
        
        #Close the uncertainty image
        uncimage.close()
        
    return
Example #7
0
def align_norm(fnlist,uncertlist=None):
    """Aligns a set of images to each other, as well as normalizing the images
    to the same average brightness.
    
    Both the alignment and normalization are accomplished through stellar
    photometry using the IRAF routine 'daophot'. The centroids of a handful
    of stars are found and used to run the IRAF routine 'imalign'. The
    instrumental magnitudes of the stars are used to determine by how much
    each image must be scaled for the photometry to match across images.
    
    The images are simply updated with their rescaled, shifted selves. This
    overwrites the previous images and adds the header keyword 'fpphot' to
    the images.
    
    A handful of temporary files are created during this process, which should
    all be deleted by the routine at the end. But if it is interrupted, they
    might not be.
    
    If the uncertainty images exist, this routine also shifts them by the same
    amounts as the intensity images, as well as updating the uncertainty values
    for both the new normalization and the uncertainties in normalizing the
    images.
    
    Inputs:
    fnlist -> List of strings, each the path to a fits image.
    uncertlist (optional) -> List of paths to uncertainty images.
    
    """
    
    #Fit for the sky background level
    _skyavg, skysig = fit_sky_level(fnlist)
    
    #Get image FWHMs
    fwhm = np.empty(len(fnlist))
    firstimage = openfits(fnlist[0])
    toggle = firstimage[0].header.get("fpfwhm")
    axcen = firstimage[0].header.get("fpaxcen")
    aycen = firstimage[0].header.get("fpaycen")
    arad = firstimage[0].header.get("fparad")
    firstimage.close()
    if axcen == None:
        print "Error! Images have not yet been aperture-masked! Do this first!"
        crash()
    if toggle == None:
        print "Warning: FWHMs have not been measured! Assuming 5 pixel FWHM for all images."
        for i in range(len(fnlist)): fwhm[i] = 5
    else:
        for i in range(len(fnlist)):
            image = openfits(fnlist[i])
            fwhm[i] = image[0].header["fpfwhm"]
            image.close()
    
    #Identify objects in the fields
    coolist = identify_objects(fnlist,skysig,fwhm)
    
    #Match objects between fields
    coofile = match_objects(coolist)
    
    #Do aperture photometry on the matched objects
    photlist = do_phot(fnlist,coofile,fwhm,skysig)
    
    #Read the photometry files
    x, y, mag, dmag = read_phot(photlist)
    
    #Calculate the normalizations
    norm, dnorm = calc_norm(mag,dmag)
    
    #Normalize the images (and optionally, the uncertainty images)
    for i in range(len(fnlist)):
        print "Normalizing image "+fnlist[i]
        image = openfits(fnlist[i],mode="update")
        if not (uncertlist is None):
            uncimage = openfits(uncertlist[i],mode="update")
            uncimage[0].data = np.sqrt(norm[i]**2*uncimage[0].data**2 + dnorm[i]**2*image[0].data**2)
            uncimage.close()
        image[0].data *= norm[i]
        image.close()
    
    #Calculate the shifts
    for i in range(x.shape[1]):
        x[:,i] = -(x[:,i] - x[0,i])
        y[:,i] = -(y[:,i] - y[0,i])
    xshifts = np.average(x,axis=1)
    yshifts = np.average(y,axis=1)
    
    #Shift the images (and optionally, the uncertainty images)
    iraf.images(_doprint=0)
    iraf.immatch(_doprint=0)
    for i in range(len(fnlist)):
        print "Shifting image "+fnlist[i]
        iraf.geotran(input=fnlist[i],
                     output=fnlist[i],
                     geometry="linear",
                     xshift=xshifts[i],
                     yshift=yshifts[i],
                     database="",
                     verbose="no")
        if not (uncertlist is None):
            iraf.geotran(input=uncertlist[i],
                         output=uncertlist[i],
                         geometry="linear",
                         xshift=xshifts[i],
                         yshift=yshifts[i],
                         database="",
                         verbose="no")
    
    #Update the image headers
    for i in range(len(fnlist)):
        image = openfits(fnlist[i],mode="update")
        image[0].header["fpphot"]="True"
        image[0].header["fpxcen"]+=xshifts[i]
        image[0].header["fpycen"]+=yshifts[i]
        image[0].header["fpaxcen"]+=xshifts[i]
        image[0].header["fpaycen"]+=yshifts[i]
        image.close()
    
    #Clean up the coordinate file list
    clean_files(fnlist)
    remove(coofile)
    for i in range(len(photlist)):
        remove(photlist[i])
    
    return