コード例 #1
0
ファイル: pipeline.py プロジェクト: carlmitchell/saltfppipe
def pipeline(rawdir="raw", mode="halpha"):
    """Runs successive steps of the saltfp data reduction, checking along the
    way to see if each step was successful. This is the main driver program of
    the SALT Fabry-Perot pipeline.

    Inputs:
    rawdir -> String, containing the path to the 'raw' directory. By
                  default, this is 'raw'
    mode -> Mode for velocity fitting. Currently the only option is H-Alpha
                line fitting.

    """

    # Set rest wave based on the mode called
    if mode == "halpha":
        rest_wave = 6562.81

    # Create product directory
    if isdir("product"):
        while True:
            yn = raw_input("Product directory already exists. " + "Recreate it? (y/n) ")
            if "n" in yn or "N" in yn:
                break
            elif "y" in yn or "Y" in yn:
                # Confirmation
                yn = raw_input("Are you sure? This takes a while. (y/n) ")
                if ("y" in yn or "Y" in yn) and not ("n" in yn or "N" in yn):
                    rmtree("product")
                    break

    if not isdir("product"):
        # Acquire the list of filenames from the raw directory
        fnlist = sorted(listdir(rawdir))
        for i in range(len(fnlist)):
            fnlist[i] = join(rawdir, fnlist[i])
        # Run the first two steps of imred on the first image
        iraf.pysalt(_doprint=0)
        iraf.saltred(_doprint=0)
        iraf.saltprepare(
            fnlist[0],
            "temp.fits",
            "",
            createvar=False,
            badpixelimage="",
            clobber=True,
            logfile="temp.log",
            verbose=True,
        )
        iraf.saltbias(
            "temp.fits",
            "temp.fits",
            "",
            subover=True,
            trim=True,
            subbias=False,
            masterbias="",
            median=False,
            function="polynomial",
            order=5,
            rej_lo=3.0,
            rej_hi=5.0,
            niter=10,
            plotover=False,
            turbo=False,
            clobber=True,
            logfile="temp.log",
            verbose=True,
        )
        # Create the bad pixel mask
        image = fits.open("temp.fits")
        for i in range(1, len(image)):
            mask = image[i].data != image[i].data
            image[i].data = 1 * mask
        image.writeto("badpixmask.fits", clobber="True")
        image.close()
        # Remove temporary files
        remove("temp.fits")
        remove("temp.log")
        # Run the raw images through the first few data reduction pipeline
        # steps
        imred(fnlist, "product", bpmfile="badpixmask.fits")
        # Delete the temporary bad pixel mask
        remove("badpixmask.fits")
        # Move these raw images into the product directory
        mkdir("product")
        fnlist = sorted(listdir("."))
        for i in range(len(fnlist)):
            if "mfxgbpP" in fnlist[i] and ".fits" in fnlist[i]:
                move(fnlist[i], join("product", fnlist[i]))
    # List of files in the product directory
    fnlist = sorted(listdir("product"))
    for i in range(len(fnlist)):
        fnlist[i] = join("product", fnlist[i])

    # Manual verification of fits images and headers
    firstimage = FPImage(fnlist[0])
    verify = firstimage.verifytog
    firstimage.close()
    if verify is None:
        while True:
            prompt = "Manually verify image contents? (Recommended) (y/n) "
            yn = raw_input(prompt)
            if "n" in yn or "N" in yn:
                print ("Skipping manual verification of image contents " + "(Not recommended)")
                break
            if "y" in yn or "Y" in yn:
                fnlist = verify_images(fnlist)
                break

    # Make separate lists of the different fits files
    (flatlist, list_of_objs, objlists, list_of_filts, filtlists) = separate_lists(fnlist)

    # Masking of pixels outside the aperture
    firstimage = FPImage(objlists[0][0])
    axcen = firstimage.axcen
    firstimage.close()
    if axcen is None:
        print "Masking pixels outside the RSS aperture..."
        axcen, aycen, arad = get_aperture(objlists[0][0])
        aperture_mask(fnlist, axcen, aycen, arad)
    else:
        print "Images have already been aperture-masked."

    # Masking bad pixels from external region file
    for objlist in objlists:
        for i in range(len(objlist)):
            if isfile(splitext(split(objlist[i])[1])[0] + ".reg"):
                print ("Adding regions from file " + splitext(split(objlist[i])[1])[0] + ".reg to the bad pixel mask.")
                mask_regions(objlist[i], splitext(split(objlist[i])[1])[0] + ".reg")

    # Measure stellar FWHMs
    firstimage = FPImage(objlists[0][0])
    fwhm = firstimage.fwhm
    firstimage.close()
    if fwhm is None:
        dofwhm = True
    else:
        while True:
            yn = raw_input("Seeing FWHM has already been measured. " + "Redo this? (y/n) ")
            if "n" in yn or "N" in yn:
                dofwhm = False
                break
            elif "y" in yn or "Y" in yn:
                dofwhm = True
                break
    if dofwhm:
        print "Measuring seeing FWHMs..."
        for objlist in objlists:
            measure_fwhm(objlist)

    # Find image centers using ghost pairs
    for i in range(len(objlists)):
        firstimage = FPImage(objlists[i][0])
        xcen = firstimage.xcen
        deghosted = firstimage.ghosttog
        firstimage.close()
        if deghosted is None:
            if xcen is None:
                ghosttog = True
            else:
                while True:
                    yn = raw_input(
                        "Optical centers already measured for " + "object " + list_of_objs[i] + ". Redo this? (y/n) "
                    )
                    if "n" in yn or "N" in yn:
                        ghosttog = False
                        break
                    elif "y" in yn or "Y" in yn:
                        ghosttog = True
                        break
            if ghosttog:
                print (
                    "Identifying optical centers for object "
                    + list_of_objs[i]
                    + ". This may take a while for crowded fields..."
                )
                find_ghost_centers(objlists[i])

    # Deghost images
    for i in range(len(objlists)):
        firstimage = FPImage(objlists[i][0])
        deghosted = firstimage.ghosttog
        firstimage.close()
        if deghosted is None:
            print "Deghosting images for object " + list_of_objs[i] + "..."
            for j in range(len(objlists[i])):
                deghost(objlists[i][j])
        else:
            print ("Images for object " + list_of_objs[i] + " have already been deghosted.")

    # Image Flattening
    firstimage = FPImage(objlists[0][0])
    flattog = firstimage.flattog
    firstimage.close()
    if flattog is None:
        print "Flattening images..."
        if len(flatlist) == 0:
            while True:
                print "Uh oh! No flatfield exposure found!"
                flatpath = raw_input("Enter path to external flat image: " + "(leave blank to skip flattening) ")
                if flatpath == "" or isfile(flatpath):
                    break
        else:
            combine_flat(flatlist, "flat.fits")
            flatpath = "flat.fits"
        if flatpath != "":
            notflatlist = []
            for objlist in objlists:
                notflatlist += objlist
            flatten(notflatlist, flatpath)
        else:
            print "Skipping image flattening. (Not recommended!)"
    else:
        print "Images have already been flattened."

    # Make separate directories for each object.
    # This is the first bit since 'singext' to create a new directory, because
    # this is the first point where it's really necessary to start treating the
    # images from different tracks very differently.
    for i in range(len(objlists)):
        if isdir(list_of_objs[i].replace(" ", "")):
            while True:
                yn = raw_input("A directory for object " + list_of_objs[i] + " already exists. Recreate? (y/n) ")
                if "n" in yn or "N" in yn:
                    do_copy = False
                    break
                elif "y" in yn or "Y" in yn:
                    do_copy = True
                    rmtree(list_of_objs[i].replace(" ", ""))
                    break
        else:
            do_copy = True
        if do_copy:
            mkdir(list_of_objs[i].replace(" ", ""))
            for j in range(len(objlists[i])):
                copyfile(objlists[i][j], join(list_of_objs[i].replace(" ", ""), split(objlists[i][j])[1]))
        for j in range(len(objlists[i])):
            objlists[i][j] = join(list_of_objs[i].replace(" ", ""), split(objlists[i][j])[1])
    # Update the filter lists
    for i in range(len(filtlists)):
        for j in range(len(filtlists[i])):
            for k in range(len(objlists)):
                for l in range(len(objlists[k])):
                    if split(filtlists[i][j])[1] == split(objlists[k][l])[1]:
                        filtlists[i][j] = objlists[k][l]

    # Image alignment and normalization
    for i in range(len(objlists)):
        firstimage = FPImage(objlists[i][0])
        aligned = firstimage.phottog
        firstimage.close()
        if aligned is None:
            print ("Aligning and normalizing images for object " + list_of_objs[i] + "...")
            align_norm(objlists[i])
        else:
            print ("Images for object " + list_of_objs[i] + " have already been aligned and normalized.")

    # Make a median image for each object
    for i in range(len(objlists)):
        if isfile(join(list_of_objs[i].replace(" ", ""), "median.fits")):
            while True:
                yn = raw_input("Median image for object " + list_of_objs[i] + " already exists. Replace it? (y/n) ")
                if "n" in yn or "N" in yn:
                    break
                elif "y" in yn or "Y" in yn:
                    make_median(objlists[i], join(list_of_objs[i].replace(" ", ""), "median.fits"))
                    break
        else:
            make_median(objlists[i], join(list_of_objs[i].replace(" ", ""), "median.fits"))

    # Wavelength calibrations
    all_rings_list = []
    for i in range(len(list_of_filts)):
        all_rings_list = all_rings_list + filtlists[i]
    firstimage = FPImage(all_rings_list[0])
    calf = firstimage.calf
    firstimage.close()
    if not (calf is None):
        while True:
            yn = raw_input("Wavelength solution already found. " + "Redo it? (y/n) ")
            if "n" in yn or "N" in yn:
                break
            elif "y" in yn or "Y" in yn:
                fit_wave_soln(all_rings_list)
                break
    else:
        fit_wave_soln(all_rings_list)

    # Sky ring removal
    for i in range(len(objlists)):
        for j in range(len(objlists[i])):
            # Check to see if sky rings have already been removed
            image = FPImage(objlists[i][j])
            deringed = image.ringtog
            image.close()
            if deringed is None:
                print "Subtracting sky rings for image " + objlists[i][j]
                sub_sky_rings([objlists[i][j]], [join(list_of_objs[i].replace(" ", ""), "median.fits")])
            else:
                print ("Sky ring subtraction already done for image " + objlists[i][j])

    # Creation of data cube and convolution to uniform PSF
    for i in range(len(objlists)):
        if isdir(list_of_objs[i].replace(" ", "") + "_cube"):
            while True:
                yn = raw_input("A data cube for object " + list_of_objs[i] + " already exists. Recreate? (y/n) ")
                if "n" in yn or "N" in yn:
                    do_create = False
                    break
                elif "y" in yn or "Y" in yn:
                    # Confirmation
                    yn = raw_input("Are you sure? This takes a while. (y/n) ")
                    if ("y" in yn or "Y" in yn) and not ("n" in yn or "N" in yn):
                        do_create = True
                        rmtree(list_of_objs[i].replace(" ", "") + "_cube")
                        break
        else:
            do_create = True
        if do_create:
            mkdir(list_of_objs[i].replace(" ", "") + "_cube")
            for j in range(len(objlists[i])):
                image = FPImage(objlists[i][j])
                fwhm = image.fwhm
                if j == 0:
                    largestfwhm = fwhm
                if fwhm > largestfwhm:
                    largestfwhm = fwhm
                image.close()
            while True:
                prompt = "Enter desired final fwhm or leave blank to use" + " default (" + str(largestfwhm) + " pix) "
                user_fwhm = raw_input(prompt)
                if user_fwhm == "":
                    user_fwhm = largestfwhm
                    break
                else:
                    try:
                        user_fwhm = float(user_fwhm)
                    except ValueError:
                        print "That wasn't a valid number..."
                    else:
                        if user_fwhm < largestfwhm:
                            print ("Final fwhm must exceed " + str(largestfwhm) + " pixels.")
                        else:
                            break
            desired_fwhm = user_fwhm * 1.01
            for j in range(len(objlists[i])):
                make_final_image(
                    objlists[i][j],
                    join(list_of_objs[i].replace(" ", "") + "_cube", split(objlists[i][j])[1]),
                    desired_fwhm,
                    clobber=True,
                )

    # Get final lists for the velocity map fitting for each object
    final_lists = []
    for i in range(len(list_of_objs)):
        final_lists.append([])
        for j in range(len(objlists[i])):
            final_lists[i].append(join(list_of_objs[i].replace(" ", "") + "_cube", split(objlists[i][j])[1]))

    # Shift to solar velocity frame
    for i in range(len(list_of_objs)):
        firstimage = FPImage(final_lists[i][0])
        velshift = firstimage.solarvel
        firstimage.close()
        if velshift is None:
            print ("Performing solar velocity shift for object " + list_of_objs[i] + "...")
            solar_velocity_shift(final_lists[i], rest_wave)
        else:
            print ("Solar velocity shift for object " + list_of_objs[i] + " already done.")

    if not do_velmap:
        sys.exit("Velocity map not made - Voigt-fitting software not found.")

    # Velocity map fitting
    for i in range(len(list_of_objs)):
        if isfile(join(list_of_objs[i].replace(" ", "") + "_cube", "velocity.fits")):
            while True:
                yn = raw_input("Velocity map already fitted for object " + list_of_objs[i] + ". Redo this? (y/n) ")
                if "n" in yn or "N" in yn:
                    domap = False
                    break
                elif "y" in yn or "Y" in yn:
                    # Confirmation
                    yn = raw_input("Are you sure? This takes a while. (y/n) ")
                    if ("y" in yn or "Y" in yn) and not ("n" in yn or "N" in yn):
                        domap = True
                        break
        else:
            domap = True
    if domap:
        print "Fitting velocity map for object " + list_of_objs[i] + "..."
        if mode == "halpha":
            fit_velmap_ha_n2_mode(final_lists[i], list_of_objs[i].replace(" ", "") + "_cube", clobber=True)

    # Clean velocity map
    for i in range(len(list_of_objs)):
        make_clean_map(list_of_objs[i].replace(" ", "") + "_cube", clobber=True)
コード例 #2
0
ファイル: pipeline.py プロジェクト: crawfordsm/saltfppipe
def pipeline(productdir = "product", mode = "halpha"):
    """Runs successive steps of the saltfp data reduction, checking along the
    way to see if each step was successful. This is the main driver program of
    the SALT Fabry-Perot pipeline.
    
    Inputs:
    productdir -> String, containing the path to the 'product' directory. By
                  default, this is 'product'
    mode -> Mode for velocity fitting. Currently the only option is H-Alpha
                line fitting.
                  
    """
    
    #Print the welcome message
    welcome_message()
    
    #Set rest wave based on the mode called
    if mode == "halpha": rest_wave = 6562.81
    
    #Acquire the list of filenames from the product directory
    fnlist = sorted(listdir(productdir))
    
    #Convert the images to single-extension fits files
    # This creates a new directory from scratch in order to preserve the
    # original 'product' directory.
    print "Converting images to single extension fits images..."
    productlist = []
    singextlist = []
    for i in range(len(fnlist)):
        if splitext(fnlist[i])[1] == ".fits":
            productlist.append(join(productdir,fnlist[i]))
            singextlist.append(join("singext","s"+fnlist[i]))
    toggle = create_directory("singext")
    if toggle:
        make_single_extension(productlist,singextlist)
    else: print "Skipping creation of single-extension fits images."
    
    #Manual verification of fits images and headers 
    firstimage = openfits(singextlist[0])
    verify = firstimage[0].header.get("fpverify")
    firstimage.close()
    if verify is None:
        while True:
            yn = raw_input("Manually verify image contents? (Recommended) (y/n) ")
            if "n" in yn or "N" in yn:
                print "Skipping manual verification of image contents (Not recommended)"
                break
            if "y" in yn or "Y" in yn:
                singextlist = verify_images(singextlist)
                break
    
    #Make separate lists of the different fits files
    flatlist, list_of_objs, objlists, list_of_filts, filtlists = separate_lists(singextlist)
    
    #Masking of pixels outside the aperture
    firstimage = openfits(objlists[0][0])
    axcen = firstimage[0].header.get("fpaxcen")
    firstimage.close()
    if axcen is None:
        print "Masking pixels outside the RSS aperture..."
        axcen, aycen, arad = get_aperture(objlists[0][0])
        aperture_mask(singextlist,axcen,aycen,arad,maskvalue=0)
    else: print "Images have already been aperture-masked."
    
    #Cosmic ray removal
    firstimage = openfits(objlists[0][0])
    crtoggle = firstimage[0].header.get("fpcosmic")
    firstimage.close()
    if crtoggle is None:
        print "Cosmic-ray correcting images..."
        cosmic_ray_remove(singextlist)
    else: print "Images have already been cosmic-ray corrected."
    
    #Create uncertainty images for the object files
    uncertlists = []
    for i in range(len(objlists)):
        uncertlists.append([])
        for j in range(len(objlists[i])):
            uncertlists[i].append(splitext(objlists[i][j])[0]+'_unc'+splitext(objlists[i][j])[1])
    if not isfile(uncertlists[0][0]):
        print "Generating uncertainty images..."
        for i in range(len(objlists)):
            for j in range(len(objlists[i])):
                print "Writing uncertainty image "+uncertlists[i][j]
                image = openfits(objlists[i][j])
                image[0].data = np.sqrt(image[0].data)
                image.writeto(uncertlists[i][j])
                image.close()
    else: print "Uncertainty images already exist."
    
    #Image Flattening
    firstimage = openfits(objlists[0][0])
    flattog = firstimage[0].header.get("fpflat")
    firstimage.close()
    if flattog is None:
        print "Flattening images..."
        if len(flatlist) == 0:
            while True:
                print "Uh oh! No flatfield exposure found!"
                flatpath = raw_input("Enter path to external flat image: (leave blank to skip flattening) ")
                if flatpath == "" or isfile(flatpath): break
        else:
            combine_flat(flatlist,"flat.fits")
            flatpath = "flat.fits"
        if flatpath != "":
            notflatlist = []
            for filtlist in filtlists: notflatlist+=filtlist
            for uncertlist in uncertlists: notflatlist+=uncertlist
            flatten(notflatlist,flatpath)
        else: print "Skipping image flattening. (Not recommended!)"
    else: print "Images have already been flattened."
    
    #Measure stellar FWHMs
    firstimage = openfits(objlists[0][0])
    fwhm = firstimage[0].header.get("fpfwhm")
    firstimage.close()
    if fwhm is None: dofwhm = True
    else:
        while True:
            yn = raw_input("Seeing FWHM has already been measured. Redo this? (y/n) ")
            if "n" in yn or "N" in yn:
                dofwhm = False
                break
            elif "y" in yn or "Y" in yn:
                dofwhm = True
                break
    if dofwhm:
        print "Measuring seeing FWHMs..."
        for objlist in objlists:
            measure_fwhm(objlist)
    
    #Find image centers using ghost pairs
    for i in range(len(objlists)):
        firstimage = openfits(objlists[i][0])
        xcen = firstimage[0].header.get("fpxcen")
        firstimage.close()
        if xcen is None:
            ghosttog = True
        else:
            while True:
                yn = raw_input("Optical centers already measured for object "+list_of_objs[i]+". Redo this? (y/n) ")
                if "n" in yn or "N" in yn:
                    ghosttog = False
                    break
                elif "y" in yn or "Y" in yn:
                    ghosttog = True
                    break
        if ghosttog:
            print "Identifying optical centers for object "+list_of_objs[i]+". This may take a while for crowded fields..."
            find_ghost_centers(objlists[i])
    
    #Deghost images
    firstimage = openfits(objlists[0][0])
    deghosted = firstimage[0].header.get("fpghost")
    firstimage.close()
    if deghosted is None:
        print "Deghosting images..."
        for i in range(len(objlists)):
            for j in range(len(objlists[i])):
                deghost(objlists[i][j],uncfn=uncertlists[i][j],g=0.04)
    else: print "Images have already been deghosted."
    
    #Make separate directories for each object.
    # This is the first bit since 'singext' to create a new directory, because
    # this is the first point where it's really necessary to start treating the
    # images from different tracks very differently.
    for i in range(len(objlists)):
        if isdir(list_of_objs[i].replace(" ", "")):
            while True:
                yn = raw_input("A directory for object "+list_of_objs[i]+" already exists. Recreate? (y/n) ")
                if "n" in yn or "N" in yn:
                    do_copy = False
                    break
                elif "y" in yn or "Y" in yn:
                    do_copy = True
                    rmtree(list_of_objs[i].replace(" ", ""))
                    break
        else: do_copy = True
        if do_copy:
            mkdir(list_of_objs[i].replace(" ", ""))
            for j in range(len(objlists[i])):
                copyfile(objlists[i][j],join(list_of_objs[i].replace(" ", ""),split(objlists[i][j])[1]))
                copyfile(uncertlists[i][j],join(list_of_objs[i].replace(" ", ""),split(uncertlists[i][j])[1]))
        for j in range(len(objlists[i])):
            objlists[i][j] = join(list_of_objs[i].replace(" ", ""),split(objlists[i][j])[1])
            uncertlists[i][j] = join(list_of_objs[i].replace(" ", ""),split(uncertlists[i][j])[1])
    #Update the filter lists (THIS IS TERRIBLE AND I SHOULD HAVE USED POINTERS AND I'M SO SORRY)
    for i in range(len(filtlists)):
        for j in range(len(filtlists[i])):
            for k in range(len(objlists)):
                for l in range(len(objlists[k])):
                    if split(filtlists[i][j])[1]==split(objlists[k][l])[1]: filtlists[i][j] = objlists[k][l]
    
    #Image alignment and normalization
    for i in range(len(objlists)):
        firstimage = openfits(objlists[i][0])
        aligned = firstimage[0].header.get("fpphot")
        firstimage.close()
        if aligned is None:
            print "Aligning and normalizing images for object "+list_of_objs[i]+"..."
            align_norm(objlists[i],uncertlist=uncertlists[i])
        else: print "Images for object "+list_of_objs[i]+" have already been aligned and normalized."
    
    #Make a median image for each object
    for i in range(len(objlists)):
        if isfile(join(list_of_objs[i].replace(" ", ""),"median.fits")):
            while True:
                yn = raw_input("Median image for object "+list_of_objs[i]+" already exists. Replace it? (y/n) ")
                if "n" in yn or "N" in yn:
                    break
                elif "y" in yn or "Y" in yn:
                    make_median(objlists[i],join(list_of_objs[i].replace(" ", ""),"median.fits"))
                    break
        else: make_median(objlists[i],join(list_of_objs[i].replace(" ", ""),"median.fits"))
    
    #Wavelength calibrations
    for i in range(len(list_of_filts)):
        #Do a separate calibration for each filter
        firstimage = openfits(filtlists[i][0])
        calf = firstimage[0].header.get("fpcalf")
        firstimage.close()
        if not(calf is None):
            while True:
                yn = raw_input("Wavelength solution already found for filter "+list_of_filts[i]+". Redo it? (y/n) ")
                if "n" in yn or "N" in yn:
                    break
                elif "y" in yn or "Y" in yn:
                    fit_wave_soln(filtlists[i])
                    break
        else: fit_wave_soln(filtlists[i])
    
    #Sky ring removal
    for i in range(len(objlists)):
        #Check to see if sky rings have already been removed
        firstimage = openfits(objlists[i][0])
        deringed = firstimage[0].header.get("fpdering")
        firstimage.close()
        if deringed is None:
            print "Subtracting sky rings for object "+list_of_objs[i]+"..."
            sub_sky_rings(objlists[i],[join(list_of_objs[i].replace(" ", ""),"median.fits")]*len(objlists[i]))
        else: print "Sky ring subtraction already done for object "+list_of_objs[i]
    
    #Creation of data cube and convolution to uniform PSF
    # Data cube is created in a separate directory, which is created if it
    # does not already exist, or overwritten if the user so chooses
    for i in range(len(objlists)):
        if isdir(list_of_objs[i].replace(" ", "")+"_cube"):
            while True:
                yn = raw_input("A data cube for object "+list_of_objs[i]+" already exists. Recreate? (y/n) ")
                if "n" in yn or "N" in yn:
                    do_create = False
                    break
                elif "y" in yn or "Y" in yn:
                    do_create = True
                    rmtree(list_of_objs[i].replace(" ", "")+"_cube")
                    break
        else: do_create = True
        if do_create:
            mkdir(list_of_objs[i].replace(" ", "")+"_cube")
            for j in range(len(objlists[i])):
                image = openfits(objlists[i][j])
                fwhm = image[0].header["fpfwhm"]
                if j==0: largestfwhm = fwhm
                if fwhm>largestfwhm: largestfwhm=fwhm
                image.close()
            while True:
                user_fwhm = raw_input("Enter desired final fwhm or leave blank to use default ("+str(largestfwhm)+" pix) ")
                if user_fwhm == "":
                    user_fwhm = largestfwhm
                    break
                else:
                    try: user_fwhm = float(user_fwhm)
                    except ValueError: print "That wasn't a valid number..."
                    else:
                        if user_fwhm<largestfwhm:
                            print "Final fwhm must exceed "+str(largestfwhm)+" pixels."
                        else: break
            desired_fwhm = user_fwhm
            for j in range(len(objlists[i])):
                make_final_image(objlists[i][j], #Input image
                                 join(list_of_objs[i].replace(" ", "")+"_cube",splitext(split(objlists[i][j])[1])[0]+"_inty"+splitext(split(objlists[i][j])[1])[1]), #Output image
                                 join(list_of_objs[i].replace(" ", "")+"_cube",splitext(split(objlists[i][j])[1])[0]+"_wave"+splitext(split(objlists[i][j])[1])[1]), #Output wave image
                                 desired_fwhm, #Desired final FWHM
                                 input_uncert_image = uncertlists[i][j], #Input uncertainty image
                                 output_uncert_image = join(list_of_objs[i].replace(" ", "")+"_cube",split(uncertlists[i][j])[1]), #Output uncertainty image
                                 clobber=True)