def pysalt(fs=None): # Run the pysalt pipeline on the raw data. if fs is None: fs = glob('P*.fits') if len(fs) == 0: print "WARNING: No raw files to run PySALT pre-processing." return # Copy the raw files into a raw directory if not os.path.exists('raw'): os.mkdir('raw') if not os.path.exists('work'): os.mkdir('work') for f in fs: shutil.copy2(f, 'raw/') shutil.move(f, 'work/') iraf.cd('work') # Run each of the pysalt pipeline steps deleting temporary files as we go # saltprepare iraf.unlearn(iraf.saltprepare) # Currently, there is not a bad pixel mask provided by SALT # so we don't create one here. iraf.saltprepare(images='P*.fits', clobber=True, mode='h') for f in glob('P*.fits'): os.remove(f) # saltgain iraf.unlearn(iraf.saltgain) # Multiply by the gain so that everything is in electrons. iraf.saltgain(images='pP*.fits', gaindb=pysaltpath + '/data/rss/RSSamps.dat', mult=True, usedb=True, mode='h') for f in glob('pP*.fits'): os.remove(f) # write a keyword in the header keyword gain = 1 in each amplifier fs = glob('gpP*.fits') for f in fs: for i in range(1, 7): pyfits.setval(f, 'GAIN', ext=i, value=1.0) # saltxtalk iraf.unlearn(iraf.saltxtalk) iraf.saltxtalk(images='gpP*.fits', clobber=True, usedb=True, xtalkfile=pysaltpath + '/data/rss/RSSxtalk.dat', mode='h') for f in glob('gpP*.fits'): os.remove(f) # saltbias iraf.unlearn(iraf.saltbias) iraf.saltbias(images='xgpP*.fits', clobber=True, mode='h') for f in glob('xgpP*.fits'): os.remove(f) # Put all of the newly created files into the pysalt directory if not os.path.exists('pysalt'): os.mkdir('pysalt') for f in glob('bxgpP*.fits'): shutil.move(f, 'pysalt') iraf.cd('..')
def pipeline(rawdir="raw", mode="halpha"): """Runs successive steps of the saltfp data reduction, checking along the way to see if each step was successful. This is the main driver program of the SALT Fabry-Perot pipeline. Inputs: rawdir -> String, containing the path to the 'raw' directory. By default, this is 'raw' mode -> Mode for velocity fitting. Currently the only option is H-Alpha line fitting. """ # Set rest wave based on the mode called if mode == "halpha": rest_wave = 6562.81 # Create product directory if isdir("product"): while True: yn = raw_input("Product directory already exists. " + "Recreate it? (y/n) ") if "n" in yn or "N" in yn: break elif "y" in yn or "Y" in yn: # Confirmation yn = raw_input("Are you sure? This takes a while. (y/n) ") if ("y" in yn or "Y" in yn) and not ("n" in yn or "N" in yn): rmtree("product") break if not isdir("product"): # Acquire the list of filenames from the raw directory fnlist = sorted(listdir(rawdir)) for i in range(len(fnlist)): fnlist[i] = join(rawdir, fnlist[i]) # Run the first two steps of imred on the first image iraf.pysalt(_doprint=0) iraf.saltred(_doprint=0) iraf.saltprepare( fnlist[0], "temp.fits", "", createvar=False, badpixelimage="", clobber=True, logfile="temp.log", verbose=True, ) iraf.saltbias( "temp.fits", "temp.fits", "", subover=True, trim=True, subbias=False, masterbias="", median=False, function="polynomial", order=5, rej_lo=3.0, rej_hi=5.0, niter=10, plotover=False, turbo=False, clobber=True, logfile="temp.log", verbose=True, ) # Create the bad pixel mask image = fits.open("temp.fits") for i in range(1, len(image)): mask = image[i].data != image[i].data image[i].data = 1 * mask image.writeto("badpixmask.fits", clobber="True") image.close() # Remove temporary files remove("temp.fits") remove("temp.log") # Run the raw images through the first few data reduction pipeline # steps imred(fnlist, "product", bpmfile="badpixmask.fits") # Delete the temporary bad pixel mask remove("badpixmask.fits") # Move these raw images into the product directory mkdir("product") fnlist = sorted(listdir(".")) for i in range(len(fnlist)): if "mfxgbpP" in fnlist[i] and ".fits" in fnlist[i]: move(fnlist[i], join("product", fnlist[i])) # List of files in the product directory fnlist = sorted(listdir("product")) for i in range(len(fnlist)): fnlist[i] = join("product", fnlist[i]) # Manual verification of fits images and headers firstimage = FPImage(fnlist[0]) verify = firstimage.verifytog firstimage.close() if verify is None: while True: prompt = "Manually verify image contents? (Recommended) (y/n) " yn = raw_input(prompt) if "n" in yn or "N" in yn: print ("Skipping manual verification of image contents " + "(Not recommended)") break if "y" in yn or "Y" in yn: fnlist = verify_images(fnlist) break # Make separate lists of the different fits files (flatlist, list_of_objs, objlists, list_of_filts, filtlists) = separate_lists(fnlist) # Masking of pixels outside the aperture firstimage = FPImage(objlists[0][0]) axcen = firstimage.axcen firstimage.close() if axcen is None: print "Masking pixels outside the RSS aperture..." axcen, aycen, arad = get_aperture(objlists[0][0]) aperture_mask(fnlist, axcen, aycen, arad) else: print "Images have already been aperture-masked." # Masking bad pixels from external region file for objlist in objlists: for i in range(len(objlist)): if isfile(splitext(split(objlist[i])[1])[0] + ".reg"): print ("Adding regions from file " + splitext(split(objlist[i])[1])[0] + ".reg to the bad pixel mask.") mask_regions(objlist[i], splitext(split(objlist[i])[1])[0] + ".reg") # Measure stellar FWHMs firstimage = FPImage(objlists[0][0]) fwhm = firstimage.fwhm firstimage.close() if fwhm is None: dofwhm = True else: while True: yn = raw_input("Seeing FWHM has already been measured. " + "Redo this? (y/n) ") if "n" in yn or "N" in yn: dofwhm = False break elif "y" in yn or "Y" in yn: dofwhm = True break if dofwhm: print "Measuring seeing FWHMs..." for objlist in objlists: measure_fwhm(objlist) # Find image centers using ghost pairs for i in range(len(objlists)): firstimage = FPImage(objlists[i][0]) xcen = firstimage.xcen deghosted = firstimage.ghosttog firstimage.close() if deghosted is None: if xcen is None: ghosttog = True else: while True: yn = raw_input( "Optical centers already measured for " + "object " + list_of_objs[i] + ". Redo this? (y/n) " ) if "n" in yn or "N" in yn: ghosttog = False break elif "y" in yn or "Y" in yn: ghosttog = True break if ghosttog: print ( "Identifying optical centers for object " + list_of_objs[i] + ". This may take a while for crowded fields..." ) find_ghost_centers(objlists[i]) # Deghost images for i in range(len(objlists)): firstimage = FPImage(objlists[i][0]) deghosted = firstimage.ghosttog firstimage.close() if deghosted is None: print "Deghosting images for object " + list_of_objs[i] + "..." for j in range(len(objlists[i])): deghost(objlists[i][j]) else: print ("Images for object " + list_of_objs[i] + " have already been deghosted.") # Image Flattening firstimage = FPImage(objlists[0][0]) flattog = firstimage.flattog firstimage.close() if flattog is None: print "Flattening images..." if len(flatlist) == 0: while True: print "Uh oh! No flatfield exposure found!" flatpath = raw_input("Enter path to external flat image: " + "(leave blank to skip flattening) ") if flatpath == "" or isfile(flatpath): break else: combine_flat(flatlist, "flat.fits") flatpath = "flat.fits" if flatpath != "": notflatlist = [] for objlist in objlists: notflatlist += objlist flatten(notflatlist, flatpath) else: print "Skipping image flattening. (Not recommended!)" else: print "Images have already been flattened." # Make separate directories for each object. # This is the first bit since 'singext' to create a new directory, because # this is the first point where it's really necessary to start treating the # images from different tracks very differently. for i in range(len(objlists)): if isdir(list_of_objs[i].replace(" ", "")): while True: yn = raw_input("A directory for object " + list_of_objs[i] + " already exists. Recreate? (y/n) ") if "n" in yn or "N" in yn: do_copy = False break elif "y" in yn or "Y" in yn: do_copy = True rmtree(list_of_objs[i].replace(" ", "")) break else: do_copy = True if do_copy: mkdir(list_of_objs[i].replace(" ", "")) for j in range(len(objlists[i])): copyfile(objlists[i][j], join(list_of_objs[i].replace(" ", ""), split(objlists[i][j])[1])) for j in range(len(objlists[i])): objlists[i][j] = join(list_of_objs[i].replace(" ", ""), split(objlists[i][j])[1]) # Update the filter lists for i in range(len(filtlists)): for j in range(len(filtlists[i])): for k in range(len(objlists)): for l in range(len(objlists[k])): if split(filtlists[i][j])[1] == split(objlists[k][l])[1]: filtlists[i][j] = objlists[k][l] # Image alignment and normalization for i in range(len(objlists)): firstimage = FPImage(objlists[i][0]) aligned = firstimage.phottog firstimage.close() if aligned is None: print ("Aligning and normalizing images for object " + list_of_objs[i] + "...") align_norm(objlists[i]) else: print ("Images for object " + list_of_objs[i] + " have already been aligned and normalized.") # Make a median image for each object for i in range(len(objlists)): if isfile(join(list_of_objs[i].replace(" ", ""), "median.fits")): while True: yn = raw_input("Median image for object " + list_of_objs[i] + " already exists. Replace it? (y/n) ") if "n" in yn or "N" in yn: break elif "y" in yn or "Y" in yn: make_median(objlists[i], join(list_of_objs[i].replace(" ", ""), "median.fits")) break else: make_median(objlists[i], join(list_of_objs[i].replace(" ", ""), "median.fits")) # Wavelength calibrations all_rings_list = [] for i in range(len(list_of_filts)): all_rings_list = all_rings_list + filtlists[i] firstimage = FPImage(all_rings_list[0]) calf = firstimage.calf firstimage.close() if not (calf is None): while True: yn = raw_input("Wavelength solution already found. " + "Redo it? (y/n) ") if "n" in yn or "N" in yn: break elif "y" in yn or "Y" in yn: fit_wave_soln(all_rings_list) break else: fit_wave_soln(all_rings_list) # Sky ring removal for i in range(len(objlists)): for j in range(len(objlists[i])): # Check to see if sky rings have already been removed image = FPImage(objlists[i][j]) deringed = image.ringtog image.close() if deringed is None: print "Subtracting sky rings for image " + objlists[i][j] sub_sky_rings([objlists[i][j]], [join(list_of_objs[i].replace(" ", ""), "median.fits")]) else: print ("Sky ring subtraction already done for image " + objlists[i][j]) # Creation of data cube and convolution to uniform PSF for i in range(len(objlists)): if isdir(list_of_objs[i].replace(" ", "") + "_cube"): while True: yn = raw_input("A data cube for object " + list_of_objs[i] + " already exists. Recreate? (y/n) ") if "n" in yn or "N" in yn: do_create = False break elif "y" in yn or "Y" in yn: # Confirmation yn = raw_input("Are you sure? This takes a while. (y/n) ") if ("y" in yn or "Y" in yn) and not ("n" in yn or "N" in yn): do_create = True rmtree(list_of_objs[i].replace(" ", "") + "_cube") break else: do_create = True if do_create: mkdir(list_of_objs[i].replace(" ", "") + "_cube") for j in range(len(objlists[i])): image = FPImage(objlists[i][j]) fwhm = image.fwhm if j == 0: largestfwhm = fwhm if fwhm > largestfwhm: largestfwhm = fwhm image.close() while True: prompt = "Enter desired final fwhm or leave blank to use" + " default (" + str(largestfwhm) + " pix) " user_fwhm = raw_input(prompt) if user_fwhm == "": user_fwhm = largestfwhm break else: try: user_fwhm = float(user_fwhm) except ValueError: print "That wasn't a valid number..." else: if user_fwhm < largestfwhm: print ("Final fwhm must exceed " + str(largestfwhm) + " pixels.") else: break desired_fwhm = user_fwhm * 1.01 for j in range(len(objlists[i])): make_final_image( objlists[i][j], join(list_of_objs[i].replace(" ", "") + "_cube", split(objlists[i][j])[1]), desired_fwhm, clobber=True, ) # Get final lists for the velocity map fitting for each object final_lists = [] for i in range(len(list_of_objs)): final_lists.append([]) for j in range(len(objlists[i])): final_lists[i].append(join(list_of_objs[i].replace(" ", "") + "_cube", split(objlists[i][j])[1])) # Shift to solar velocity frame for i in range(len(list_of_objs)): firstimage = FPImage(final_lists[i][0]) velshift = firstimage.solarvel firstimage.close() if velshift is None: print ("Performing solar velocity shift for object " + list_of_objs[i] + "...") solar_velocity_shift(final_lists[i], rest_wave) else: print ("Solar velocity shift for object " + list_of_objs[i] + " already done.") if not do_velmap: sys.exit("Velocity map not made - Voigt-fitting software not found.") # Velocity map fitting for i in range(len(list_of_objs)): if isfile(join(list_of_objs[i].replace(" ", "") + "_cube", "velocity.fits")): while True: yn = raw_input("Velocity map already fitted for object " + list_of_objs[i] + ". Redo this? (y/n) ") if "n" in yn or "N" in yn: domap = False break elif "y" in yn or "Y" in yn: # Confirmation yn = raw_input("Are you sure? This takes a while. (y/n) ") if ("y" in yn or "Y" in yn) and not ("n" in yn or "N" in yn): domap = True break else: domap = True if domap: print "Fitting velocity map for object " + list_of_objs[i] + "..." if mode == "halpha": fit_velmap_ha_n2_mode(final_lists[i], list_of_objs[i].replace(" ", "") + "_cube", clobber=True) # Clean velocity map for i in range(len(list_of_objs)): make_clean_map(list_of_objs[i].replace(" ", "") + "_cube", clobber=True)