def handle_flats(flatlist, maskname, band, options, extension=None,edgeThreshold=450,lampOffList=None,longslit=None): ''' handle_flats is the primary entry point to the Flats module. handle_flats takes a list of individual exposure FITS files and creates: 1. A CRR, dark subtracted, pixel-response flat file. 2. A set of polynomials that mark the edges of a slit Inputs: flatlist: maskname: The name of a mask band: A string indicating the bandceil Outputs: file {maskname}/flat_2d_{band}.fits -- pixel response flat file {maskname}/edges.np ''' tick = time.time() # Check bpos = np.ones(92) * -1 #Retrieve the list of files to use for flat creation. flatlist = IO.list_file_to_strings(flatlist) # Print the filenames to Standard-out for flat in flatlist: info(str(flat)) #Determine if flat files headers are in agreement for fname in flatlist: hdr, dat, bs = IO.readmosfits(fname, options, extension=extension) try: bs0 except: bs0 = bs if np.any(bs0.pos != bs.pos): print "bs0: "+str(bs0.pos)+" bs: "+str(bs.pos) error("Barset do not seem to match") raise Exception("Barsets do not seem to match") if hdr["filter"] != band: error ("Filter name %s does not match header filter name " "%s in file %s" % (band, hdr["filter"], fname)) raise Exception("Filter name %s does not match header filter name " "%s in file %s" % (band, hdr["filter"], fname)) for i in xrange(len(bpos)): b = hdr["B{0:02d}POS".format(i+1)] if bpos[i] == -1: bpos[i] = b else: if bpos[i] != b: error("Bar positions are not all the same in " "this set of flat files") raise Exception("Bar positions are not all the same in " "this set of flat files") bs = bs0 # Imcombine the lamps ON flats info("Attempting to combine previous files") combine(flatlist, maskname, band, options) # Imcombine the lamps OFF flats and subtract the off from the On sets if lampOffList != None: #Retrieve the list of files to use for flat creation. lampOffList = IO.list_file_to_strings(lampOffList) # Print the filenames to Standard-out for flat in lampOffList: info(str(flat)) print "Attempting to combine Lamps off data" combine(lampOffList, maskname, band, options, lampsOff=True) combine_off_on( maskname, band, options) debug("Combined '%s' to '%s'" % (flatlist, maskname)) info("Comgined to '%s'" % (maskname)) path = "combflat_2d_%s.fits" % band (header, data) = IO.readfits(path, use_bpm=True) info("Flat written to %s" % path) # Edge Trace if bs.long_slit: info( "Long slit mode recognized") info( "Central row position: "+str(longslit["row_position"])) info( "Upper and lower limits: "+str(longslit["yrange"][0])+" "+str(longslit["yrange"][1])) results = find_longslit_edges(data,header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit) elif bs.long2pos_slit: info( "Long2pos mode recognized") results = find_long2pos_edges(data,header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit) else: results = find_and_fit_edges(data, header, bs, options,edgeThreshold=edgeThreshold) results[-1]["maskname"] = maskname results[-1]["band"] = band np.save("slit-edges_{0}".format(band), results) save_ds9_edges(results, options) # Generate Flat out = "pixelflat_2d_%s.fits" % (band) if lampOffList != None: make_pixel_flat(data, results, options, out, flatlist, lampsOff=True) else: make_pixel_flat(data, results, options, out, flatlist, lampsOff=False) info( "Pixel flat took {0:6.4} s".format(time.time()-tick))
def handle_flats(flatlist, maskname, band, options, extension=None, edgeThreshold=450, lampOffList=None, longslit=None): ''' handle_flats is the primary entry point to the Flats module. handle_flats takes a list of individual exposure FITS files and creates: 1. A CRR, dark subtracted, pixel-response flat file. 2. A set of polynomials that mark the edges of a slit Inputs: flatlist: maskname: The name of a mask band: A string indicating the bandceil Outputs: file {maskname}/flat_2d_{band}.fits -- pixel response flat file {maskname}/edges.np ''' tick = time.time() # Check bpos = np.ones(92) * -1 #Retrieve the list of files to use for flat creation. flatlist = IO.list_file_to_strings(flatlist) # Print the filenames to Standard-out for flat in flatlist: info(str(flat)) #Determine if flat files headers are in agreement for fname in flatlist: hdr, dat, bs = IO.readmosfits(fname, options, extension=extension) try: bs0 except: bs0 = bs if np.any(bs0.pos != bs.pos): print "bs0: " + str(bs0.pos) + " bs: " + str(bs.pos) error("Barset do not seem to match") raise Exception("Barsets do not seem to match") if hdr["filter"] != band: error("Filter name %s does not match header filter name " "%s in file %s" % (band, hdr["filter"], fname)) raise Exception("Filter name %s does not match header filter name " "%s in file %s" % (band, hdr["filter"], fname)) for i in xrange(len(bpos)): b = hdr["B{0:02d}POS".format(i + 1)] if bpos[i] == -1: bpos[i] = b else: if bpos[i] != b: error("Bar positions are not all the same in " "this set of flat files") raise Exception("Bar positions are not all the same in " "this set of flat files") bs = bs0 # Imcombine the lamps ON flats info("Attempting to combine previous files") combine(flatlist, maskname, band, options) # Imcombine the lamps OFF flats and subtract the off from the On sets if lampOffList != None: #Retrieve the list of files to use for flat creation. lampOffList = IO.list_file_to_strings(lampOffList) # Print the filenames to Standard-out for flat in lampOffList: info(str(flat)) print "Attempting to combine Lamps off data" combine(lampOffList, maskname, band, options, lampsOff=True) combine_off_on(maskname, band, options) debug("Combined '%s' to '%s'" % (flatlist, maskname)) info("Comgined to '%s'" % (maskname)) path = "combflat_2d_%s.fits" % band (header, data) = IO.readfits(path, use_bpm=True) info("Flat written to %s" % path) # Edge Trace if bs.long_slit: info("Long slit mode recognized") info("Central row position: " + str(longslit["row_position"])) info("Upper and lower limits: " + str(longslit["yrange"][0]) + " " + str(longslit["yrange"][1])) results = find_longslit_edges(data, header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit) elif bs.long2pos_slit: info("Long2pos mode recognized") results = find_long2pos_edges(data, header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit) else: results = find_and_fit_edges(data, header, bs, options, edgeThreshold=edgeThreshold) results[-1]["maskname"] = maskname results[-1]["band"] = band np.save("slit-edges_{0}".format(band), results) save_ds9_edges(results, options) # Generate Flat out = "pixelflat_2d_%s.fits" % (band) if lampOffList != None: make_pixel_flat(data, results, options, out, flatlist, lampsOff=True) else: make_pixel_flat(data, results, options, out, flatlist, lampsOff=False) info("Pixel flat took {0:6.4} s".format(time.time() - tick))
fs = ['m120406_0291.fits'] maskname = 'NGC5053' options = Options.wavelength path = os.path.join(options["outdir"], maskname) if not os.path.exists(path): raise Exception("Output directory '%s' does not exist. This " "directory should exist." % path) if False: for fname in fs: fp = os.path.join(path, fname) mfits = IO.readmosfits(fp) header, data, bs = mfits Wavelength.plot_mask_solution_ds9(fname, maskname, options) Wavelength.fit_lambda(mfits, fname, maskname, options) Wavelength.apply_lambda(mfits, fname, maskname, options) Wavelength.plot_data_quality(maskname, fname, options) Wavelength.plot_sky_spectra(maskname, fname, options) Wavelength.plot_mask_fits(maskname, fname, options) if True: for fname in fs: pass
def imcombine(files, maskname, options, flat, outname=None, shifts=None, extension=None): ''' From a list of files it imcombine returns the imcombine of several values. The imcombine code also estimates the readnoise ad RN/sqrt(numreads) so that the variance per frame is equal to (ADU + RN^2) where RN is computed in ADUs. Arguments: files[]: list of full path to files to combine maskname: Name of mask options: Options dictionary flat[2048x2048]: Flat field (values should all be ~ 1.0) outname: If set, will write (see notes below for details) eps_[outname].fits: electron/sec file itimes_[outname].fits: integration time var_[outname].fits: Variance files shifts[len(files)]: If set, will "roll" each file by the amount in the shifts vector in pixels. This argument is used when telescope tracking is poor. If you need to use this, please notify Keck staff about poor telescope tracking. Returns 6-element tuple: header: The combined header electrons [2048x2048]: e- (in e- units) var [2048x2048]: electrons + RN**2 (in e-^2 units) bs: The MOSFIRE.Barset instance itimes [2048x2048]: itimes (in s units) Nframe: The number of frames that contribute to the summed arrays above. If Nframe > 5 I use the sigma-clipping Cosmic Ray Rejection tool. If Nframe < 5 then I drop the max/min elements. Notes: header -- fits header ADUs -- The mean # of ADUs per frame var -- the Variance [in adu] per frame. bs -- Barset itimes -- The _total_ integration time in second Nframe -- The number of frames in a stack. Thus the number of electron per second is derived as: e-/sec = (ADUs * Gain / Flat) * (Nframe/itimes) The total number of electrons is: el = ADUs * Gain * Nframe ''' ADUs = np.zeros((len(files), 2048, 2048)) itimes = np.zeros((len(files), 2048, 2048)) prevssl = None prevmn = None patternid = None maskname = None header = None if shifts is None: shifts = np.zeros(len(files)) warnings.filterwarnings('ignore') for i in xrange(len(files)): fname = files[i] thishdr, data, bs = IO.readmosfits(fname, options, extension=extension) itimes[i,:,:] = thishdr["truitime"] base = os.path.basename(fname).rstrip(".fits") fnum = int(base.split("_")[1]) if shifts[i] == 0: ADUs[i,:,:] = data.filled(0.0) / flat else: ADUs[i,:,:] = np.roll(data.filled(0.0) / flat, np.int(shifts[i]), axis=0) ''' Construct Header''' if header is None: header = thishdr header["imfno%3.3i" % (fnum)] = (fname, "img%3.3i file name" % fnum) map(lambda x: rem_header_key(header, x), ["CTYPE1", "CTYPE2", "WCSDIM", "CD1_1", "CD1_2", "CD2_1", "CD2_2", "LTM1_1", "LTM2_2", "WAT0_001", "WAT1_001", "WAT2_001", "CRVAL1", "CRVAL2", "CRPIX1", "CRPIX2", "RADECSYS"]) for card in header.cards: if card == '': continue key,val,comment = card if key in thishdr: if val != thishdr[key]: newkey = key + ("_img%2.2i" % fnum) try: header[newkey.rstrip()] = (thishdr[key], comment) except: pass ''' Now handle error checking''' if maskname is not None: if thishdr["maskname"] != maskname: raise Exception("File %s uses mask '%s' but the stack is of '%s'" % (fname, thishdr["maskname"], maskname)) maskname = thishdr["maskname"] if thishdr["aborted"]: raise Exception("Img '%s' was aborted and should not be used" % fname) if prevssl is not None: if len(prevssl) != len(bs.ssl): # todo Improve these checks raise Exception("The stack of input files seems to be of " "different masks") prevssl = bs.ssl if patternid is not None: if patternid != thishdr["frameid"]: raise Exception("The stack should be of '%s' frames only, but " "the current image is a '%s' frame." % (patternid, thishdr["frameid"])) patternid = thishdr["frameid"] if maskname is not None: if maskname != thishdr["maskname"]: raise Exception("The stack should be of CSU mask '%s' frames " "only but contains a frame of '%s'." % (maskname, thishdr["maskname"])) maskname = thishdr["maskname"] if thishdr["BUNIT"] != "ADU per coadd": raise Exception("The units of '%s' are not in ADU per coadd and " "this violates an assumption of the DRP. Some new code " "is needed in the DRP to handle the new units of " "'%s'." % (fname, thishdr["BUNIT"])) ''' Error checking is complete''' print "%s %s[%s]/%s: %5.1f s, Shift: %i px" % (fname, maskname, patternid, header['filter'], np.mean(itimes[i]), shifts[i]) warnings.filterwarnings('always') # the electrons and el_per_sec arrays are: # [2048, 2048, len(files)] and contain values for # each individual frame that is being combined. # These need to be kept here for CRR reasons. electrons = np.array(ADUs) * Detector.gain el_per_sec = electrons / itimes output = np.zeros((2048, 2048)) exptime = np.zeros((2048, 2048)) numreads = header["READS0"] RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain RN = Detector.RN / np.sqrt(numreads) # Cosmic ray rejection code begins here. This code construction the # electrons and itimes arrays. if len(files) >= 9: print "Sigclip CRR" srt = np.argsort(electrons, axis=0, kind='quicksort') shp = el_per_sec.shape sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]] electrons = electrons[srt, sti[1], sti[2]] el_per_sec = el_per_sec[srt, sti[1], sti[2]] itimes = itimes[srt, sti[1], sti[2]] # Construct the mean and standard deviation by dropping the top and bottom two # electron fluxes. This is temporary. mean = np.mean(el_per_sec[2:-2,:,:], axis = 0) std = np.std(el_per_sec[2:-2,:,:], axis = 0) drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) ) print "dropping: ", len(drop[0]) electrons[drop] = 0.0 itimes[drop] = 0.0 electrons = np.sum(electrons, axis=0) itimes = np.sum(itimes, axis=0) Nframe = len(files) elif len(files) > 5: print "WARNING: Drop min/max CRR" srt = np.argsort(el_per_sec,axis=0) shp = el_per_sec.shape sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]] electrons = electrons[srt, sti[1], sti[2]] itimes = itimes[srt, sti[1], sti[2]] electrons = np.sum(electrons[1:-1,:,:], axis=0) itimes = np.sum(itimes[1:-1,:,:], axis=0) Nframe = len(files) - 2 else: print "WARNING: CRR through median filtering" for i in xrange(len(files)): el = electrons[i,:,:] it = itimes[i,:,:] el_mf = scipy.signal.medfilt(el, 5) bad = np.abs(el - el_mf) / np.abs(el) > 10.0 el[bad] = 0.0 it[bad] = 0.0 electrons[i,:,:] = el itimes[i,:,:] = it electrons = np.sum(electrons, axis=0) itimes = np.sum(itimes, axis=0) Nframe = len(files) ''' Now handle variance ''' numreads = header["READS0"] RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain RN = Detector.RN / np.sqrt(numreads) var = (electrons + RN**2) ''' Now mask out bad pixels ''' electrons[data.mask] = np.nan var[data.mask] = np.inf if "RN" in header: raise Exception("RN Already populated in header") header['RN'] = ("%1.3f" , "Read noise in e-") header['NUMFRM'] = (Nframe, 'Typical number of frames in stack') if outname is not None: header['BUNIT'] = 'ELECTRONS/SECOND' IO.writefits(np.float32(electrons/itimes), maskname, "eps_%s" % (outname), options, header=header, overwrite=True) # Update itimes after division in order to not introduce nans itimes[data.mask] = 0.0 header['BUNIT'] = 'ELECTRONS^2' IO.writefits(var, maskname, "var_%s" % (outname), options, header=header, overwrite=True, lossy_compress=True) header['BUNIT'] = 'SECOND' IO.writefits(np.float32(itimes), maskname, "itimes_%s" % (outname), options, header=header, overwrite=True, lossy_compress=True) return header, electrons, var, bs, itimes, Nframe
def imcombine(files, maskname, options, flat, outname=None, shifts=None, extension=None): ''' From a list of files it imcombine returns the imcombine of several values. The imcombine code also estimates the readnoise ad RN/sqrt(numreads) so that the variance per frame is equal to (ADU + RN^2) where RN is computed in ADUs. Arguments: files[]: list of full path to files to combine maskname: Name of mask options: Options dictionary flat[2048x2048]: Flat field (values should all be ~ 1.0) outname: If set, will write (see notes below for details) eps_[outname].fits: electron/sec file itimes_[outname].fits: integration time var_[outname].fits: Variance files shifts[len(files)]: If set, will "roll" each file by the amount in the shifts vector in pixels. This argument is used when telescope tracking is poor. If you need to use this, please notify Keck staff about poor telescope tracking. Returns 6-element tuple: header: The combined header electrons [2048x2048]: e- (in e- units) var [2048x2048]: electrons + RN**2 (in e-^2 units) bs: The MOSFIRE.Barset instance itimes [2048x2048]: itimes (in s units) Nframe: The number of frames that contribute to the summed arrays above. If Nframe > 5 I use the sigma-clipping Cosmic Ray Rejection tool. If Nframe < 5 then I drop the max/min elements. Notes: header -- fits header ADUs -- The mean # of ADUs per frame var -- the Variance [in adu] per frame. bs -- Barset itimes -- The _total_ integration time in second Nframe -- The number of frames in a stack. Thus the number of electron per second is derived as: e-/sec = (ADUs * Gain / Flat) * (Nframe/itimes) The total number of electrons is: el = ADUs * Gain * Nframe ''' ADUs = np.zeros((len(files), 2048, 2048)) itimes = np.zeros((len(files), 2048, 2048)) prevssl = None prevmn = None patternid = None maskname = None header = None if shifts is None: shifts = np.zeros(len(files)) warnings.filterwarnings('ignore') for i in xrange(len(files)): fname = files[i] thishdr, data, bs = IO.readmosfits(fname, options, extension=extension) itimes[i, :, :] = thishdr["truitime"] base = os.path.basename(fname).rstrip(".fits") fnum = int(base.split("_")[1]) if shifts[i] == 0: ADUs[i, :, :] = data.filled(0.0) / flat else: ADUs[i, :, :] = np.roll(data.filled(0.0) / flat, np.int(shifts[i]), axis=0) ''' Construct Header''' if header is None: header = thishdr header["imfno%3.3i" % (fnum)] = (fname, "img%3.3i file name" % fnum) map(lambda x: rem_header_key(header, x), [ "CTYPE1", "CTYPE2", "WCSDIM", "CD1_1", "CD1_2", "CD2_1", "CD2_2", "LTM1_1", "LTM2_2", "WAT0_001", "WAT1_001", "WAT2_001", "CRVAL1", "CRVAL2", "CRPIX1", "CRPIX2", "RADECSYS" ]) for card in header.cards: if card == '': continue key, val, comment = card if key in thishdr: if val != thishdr[key]: newkey = key + ("_img%2.2i" % fnum) try: header[newkey.rstrip()] = (thishdr[key], comment) except: pass ''' Now handle error checking''' if maskname is not None: if thishdr["maskname"] != maskname: raise Exception( "File %s uses mask '%s' but the stack is of '%s'" % (fname, thishdr["maskname"], maskname)) maskname = thishdr["maskname"] if thishdr["aborted"]: raise Exception("Img '%s' was aborted and should not be used" % fname) if prevssl is not None: if len(prevssl) != len(bs.ssl): # todo Improve these checks raise Exception("The stack of input files seems to be of " "different masks") prevssl = bs.ssl if patternid is not None: if patternid != thishdr["frameid"]: raise Exception("The stack should be of '%s' frames only, but " "the current image is a '%s' frame." % (patternid, thishdr["frameid"])) patternid = thishdr["frameid"] if maskname is not None: if maskname != thishdr["maskname"]: raise Exception("The stack should be of CSU mask '%s' frames " "only but contains a frame of '%s'." % (maskname, thishdr["maskname"])) maskname = thishdr["maskname"] if thishdr["BUNIT"] != "ADU per coadd": raise Exception( "The units of '%s' are not in ADU per coadd and " "this violates an assumption of the DRP. Some new code " "is needed in the DRP to handle the new units of " "'%s'." % (fname, thishdr["BUNIT"])) ''' Error checking is complete''' print "%s %s[%s]/%s: %5.1f s, Shift: %i px" % ( fname, maskname, patternid, header['filter'], np.mean( itimes[i]), shifts[i]) warnings.filterwarnings('always') # the electrons and el_per_sec arrays are: # [2048, 2048, len(files)] and contain values for # each individual frame that is being combined. # These need to be kept here for CRR reasons. electrons = np.array(ADUs) * Detector.gain el_per_sec = electrons / itimes output = np.zeros((2048, 2048)) exptime = np.zeros((2048, 2048)) numreads = header["READS0"] RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain RN = Detector.RN / np.sqrt(numreads) # Cosmic ray rejection code begins here. This code construction the # electrons and itimes arrays. if len(files) >= 9: print "Sigclip CRR" srt = np.argsort(electrons, axis=0, kind='quicksort') shp = el_per_sec.shape sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]] electrons = electrons[srt, sti[1], sti[2]] el_per_sec = el_per_sec[srt, sti[1], sti[2]] itimes = itimes[srt, sti[1], sti[2]] # Construct the mean and standard deviation by dropping the top and bottom two # electron fluxes. This is temporary. mean = np.mean(el_per_sec[2:-2, :, :], axis=0) std = np.std(el_per_sec[2:-2, :, :], axis=0) drop = np.where((el_per_sec > (mean + std * 4)) | (el_per_sec < (mean - std * 4))) print "dropping: ", len(drop[0]) electrons[drop] = 0.0 itimes[drop] = 0.0 electrons = np.sum(electrons, axis=0) itimes = np.sum(itimes, axis=0) Nframe = len(files) elif len(files) > 5: print "WARNING: Drop min/max CRR" srt = np.argsort(el_per_sec, axis=0) shp = el_per_sec.shape sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]] electrons = electrons[srt, sti[1], sti[2]] itimes = itimes[srt, sti[1], sti[2]] electrons = np.sum(electrons[1:-1, :, :], axis=0) itimes = np.sum(itimes[1:-1, :, :], axis=0) Nframe = len(files) - 2 else: print "WARNING: CRR through median filtering" for i in xrange(len(files)): el = electrons[i, :, :] it = itimes[i, :, :] el_mf = scipy.signal.medfilt(el, 5) bad = np.abs(el - el_mf) / np.abs(el) > 10.0 el[bad] = 0.0 it[bad] = 0.0 electrons[i, :, :] = el itimes[i, :, :] = it electrons = np.sum(electrons, axis=0) itimes = np.sum(itimes, axis=0) Nframe = len(files) ''' Now handle variance ''' numreads = header["READS0"] RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain RN = Detector.RN / np.sqrt(numreads) var = (electrons + RN**2) ''' Now mask out bad pixels ''' electrons[data.mask] = np.nan var[data.mask] = np.inf if "RN" in header: raise Exception("RN Already populated in header") header['RN'] = ("%1.3f", "Read noise in e-") header['NUMFRM'] = (Nframe, 'Typical number of frames in stack') if outname is not None: header['BUNIT'] = 'ELECTRONS/SECOND' IO.writefits(np.float32(electrons / itimes), maskname, "eps_%s" % (outname), options, header=header, overwrite=True) # Update itimes after division in order to not introduce nans itimes[data.mask] = 0.0 header['BUNIT'] = 'ELECTRONS^2' IO.writefits(var, maskname, "var_%s" % (outname), options, header=header, overwrite=True, lossy_compress=True) header['BUNIT'] = 'SECOND' IO.writefits(np.float32(itimes), maskname, "itimes_%s" % (outname), options, header=header, overwrite=True, lossy_compress=True) return header, electrons, var, bs, itimes, Nframe
def handle_rectification(maskname, in_files, wavename, band_pass, barset_file, options, commissioning_shift=3.0): '''Handle slit rectification and coaddition. Args: maskname: The mask name string in_files: List of stacked spectra in electron per second. Will look like ['electrons_Offset_1.5.txt.fits', 'electrons_Offset_-1.5.txt.fits'] wavename: path (relative or full) to the wavelength stack file, string band_pass: Band pass name, string barset_file: Path to a mosfire fits file containing the full set of FITS extensions for the barset. It can be any file in the list of science files. Returns: None Writes files: [maskname]_[band]_[object name]_eps.fits -- The rectified, background subtracted, stacked eps spectrum [maskname]_[band]_[object name]_sig.fits -- Rectified, background subtracted, stacked weight spectrum (STD/itime) [maskname]_[band]_[object_name]_itime.fits Rectified, CRR stacked integration time spectrum [maskname]_[band]_[object_name]_snrs.fits Rectified signal to noise spectrum ''' global edges, dats, vars, itimes, shifts, lambdas, band, fidl, all_shifts band = band_pass dlambda = Wavelength.grating_results(band) hpp = Filters.hpp[band] fidl = np.arange(hpp[0], hpp[1], dlambda) lambdas = IO.readfits(wavename, options) if np.any(lambdas[1].data < 0) or np.any(lambdas[1].data > 29000): print "***********WARNING ***********" print "The file {0} may not be a wavelength file.".format(wavename) print "Check before proceeding." print "***********WARNING ***********" edges, meta = IO.load_edges(maskname, band, options) shifts = [] posnames = [] postoshift = {} for file in in_files: print ":: ", file II = IO.read_drpfits(maskname, file, options) off = np.array((II[0]["decoff"], II[0]["raoff"]),dtype=np.float64) if "yoffset" in II[0]: off = -II[0]["yoffset"] else: # Deal with data taken during commissioning if II[0]["frameid"] == 'A': off = 0.0 else: off = commissioning_shift try: off0 except: off0 = off shift = off - off0 shifts.append(shift) posnames.append(II[0]["frameid"]) postoshift[II[0]['frameid']] = shift print "Position {0} shift: {1:2.2f} as".format(off, shift) plans = Background.guess_plan_from_positions(set(posnames)) all_shifts = [] for plan in plans: to_append = [] for pos in plan: to_append.append(postoshift[pos]) all_shifts.append(to_append) # Reverse the elements in all_shifts to deal with an inversion all_shifts.reverse() theBPM = IO.badpixelmask() all_solutions = [] cntr = 0 for plan in plans: p0 = plan[0].replace("'", "p") p1 = plan[1].replace("'", "p") suffix = "%s-%s" % (p0,p1) print "Handling plan %s" % suffix fname = "bsub_{0}_{1}_{2}.fits".format(maskname,band,suffix) EPS = IO.read_drpfits(maskname, fname, options) EPS[1] = np.ma.masked_array(EPS[1], theBPM, fill_value=0) fname = "var_{0}_{1}_{2}.fits".format(maskname, band, suffix) VAR = IO.read_drpfits(maskname, fname, options) VAR[1] = np.ma.masked_array(VAR[1], theBPM, fill_value=np.inf) fname = "itime_{0}_{1}_{2}.fits".format(maskname, band, suffix) ITIME = IO.read_drpfits(maskname, fname, options) ITIME[1] = np.ma.masked_array(ITIME[1], theBPM, fill_value=0) dats = EPS vars = VAR itimes = ITIME EPS[0]["ORIGFILE"] = fname tock = time.time() sols = range(len(edges)-1,-1,-1) shifts = all_shifts[cntr] cntr += 1 p = Pool() solutions = p.map(handle_rectification_helper, sols) #solutions = map(handle_rectification_helper, [15]) p.close() all_solutions.append(solutions) tick = time.time() print "-----> Mask took %i. Writing to disk." % (tick-tock) output = np.zeros((1, len(fidl))) snrs = np.zeros((1, len(fidl))) sdout= np.zeros((1, len(fidl))) itout= np.zeros((1, len(fidl))) # the barset [bs] is used for determining object position x, x, bs = IO.readmosfits(barset_file, options) for i_slit in xrange(len(solutions)): solution = all_solutions[0][i_slit] header = EPS[0].copy() obj = header['OBJECT'] target_name = bs.ssl[-(i_slit+1)]['Target_Name'] header['OBJECT'] = target_name pixel_dist = np.float(bs.ssl[-(i_slit+1)]['Target_to_center_of_slit_distance'])/0.18 pixel_dist -= solution['offset'] ll = solution["lambda"] header["wat0_001"] = "system=world" header["wat1_001"] = "wtype=linear" header["wat2_001"] = "wtype=linear" header["dispaxis"] = 1 header["dclog1"] = "Transform" header["dc-flag"] = 0 header["ctype1"] = "AWAV" header["cunit1"] = "Angstrom" header["crval1"] = ll[0] header["crval2"] = -solution["eps_img"].shape[0]/2 - pixel_dist header["crpix1"] = 1 header["crpix2"] = 1 header["cdelt1"] = 1 header["cdelt2"] = 1 header["cname1"] = "angstrom" header["cname2"] = "pixel" header["cd1_1"] = ll[1]-ll[0] header["cd1_2"] = 0 header["cd2_1"] = 0 header["cd2_2"] = 1 S = output.shape img = solution["eps_img"] std = solution["sd_img"] tms = solution["itime_img"] for i_solution in xrange(1,len(all_solutions)): print "Combining solution %i" %i_solution solution = all_solutions[i_solution][i_slit] img += solution["eps_img"] std += solution["sd_img"] tms += solution["itime_img"] output = np.append(output, img, 0) output = np.append(output, np.nan*np.zeros((3,S[1])), 0) snrs = np.append(snrs, img*tms/std, 0) snrs = np.append(snrs, np.nan*np.zeros((3,S[1])), 0) sdout = np.append(sdout, std, 0) sdout = np.append(sdout, np.nan*np.zeros((3,S[1])), 0) itout = np.append(itout, tms, 0) itout = np.append(itout, np.nan*np.zeros((3,S[1])), 0) header['bunit'] = ('electron/second', 'electron power') IO.writefits(img, maskname, "{0}_{1}_{2}_eps.fits".format(maskname, band, target_name), options, overwrite=True, header=header, lossy_compress=False) header['bunit'] = ('electron/second', 'sigma/itime') IO.writefits(std/tms, maskname, "{0}_{1}_{2}_sig.fits".format(maskname, band, target_name), options, overwrite=True, header=header, lossy_compress=False) header['bunit'] = ('second', 'exposure time') IO.writefits(tms, maskname, "{0}_{1}_{2}_itime.fits".format(maskname, band, target_name), options, overwrite=True, header=header, lossy_compress=False) header['bunit'] = ('', 'SNR') IO.writefits(img*tms/std, maskname, "{0}_{1}_{2}_snrs.fits".format(maskname, band, target_name), options, overwrite=True, header=header, lossy_compress=False) header = EPS[0].copy() header["wat0_001"] = "system=world" header["wat1_001"] = "wtype=linear" header["wat2_001"] = "wtype=linear" header["dispaxis"] = 1 header["dclog1"] = "Transform" header["dc-flag"] = 0 header["ctype1"] = "AWAV" header["cunit1"] = ("Angstrom", 'Start wavelength') header["crval1"] = ll[0] header["crval2"] = 1 header["crpix1"] = 1 header["crpix2"] = 1 header["cdelt1"] = 1 header["cdelt2"] = 1 header["cname1"] = "angstrom" header["cname2"] = "pixel" header["cd1_1"] = (ll[1]-ll[0], 'Angstrom/pixel') header["cd1_2"] = 0 header["cd2_1"] = 0 header["cd2_2"] = 1 header["bunit"] = "ELECTRONS/SECOND" IO.writefits(output, maskname, "{0}_{1}_eps.fits".format(maskname, band), options, overwrite=True, header=header, lossy_compress=False) header["bunit"] = "" IO.writefits(snrs, maskname, "{0}_{1}_snrs.fits".format(maskname, band), options, overwrite=True, header=header, lossy_compress=False) header["bunit"] = "ELECTRONS/SECOND" IO.writefits(sdout/itout, maskname, "{0}_{1}_sig.fits".format(maskname, band), options, overwrite=True, header=header, lossy_compress=False) header["bunit"] = "SECOND" IO.writefits(itout, maskname, "{0}_{1}_itime.fits".format(maskname, band), options, overwrite=True, header=header, lossy_compress=False)
import numpy as np import time import pylab as pl import scipy as sp from scipy import interpolate as II import spline import MOSFIRE.Options as options from MOSFIRE import IO, Fit, Wavelength path = "/scr2/mosfire/c9_npk/npk_calib4_q1700_pa_0/" mdat = IO.readmosfits(path + "m110323_2737.fits") fdat = IO.readfits(path + "pixelflat_2d_H.fits") ldat = IO.readfits(path + "lambda_solution_m110323_2737.fits") dat = mdat[1]/fdat[1] lam = ldat[1] yroi=slice(707,736) slit = dat[yroi,:] lslit = lam[yroi,:] DRAW = False if DRAW: pl.figure(1) pl.clf() pl.subplot(2,2,1)
def handle_rectification(maskname, in_files, wavename, band_pass, files, options, commissioning_shift=3.0, target='default'): '''Handle slit rectification and coaddition. Args: maskname: The mask name string in_files: List of stacked spectra in electron per second. Will look like ['electrons_Offset_1.5.txt.fits', 'electrons_Offset_-1.5.txt.fits'] wavename: path (relative or full) to the wavelength stack file, string band_pass: Band pass name, string barset_file: Path to a mosfire fits file containing the full set of FITS extensions for the barset. It can be any file in the list of science files. Returns: None Writes files: [maskname]_[band]_[object name]_eps.fits -- The rectified, background subtracted, stacked eps spectrum [maskname]_[band]_[object name]_sig.fits -- Rectified, background subtracted, stacked weight spectrum (STD/itime) [maskname]_[band]_[object_name]_itime.fits Rectified, CRR stacked integration time spectrum [maskname]_[band]_[object_name]_snrs.fits Rectified signal to noise spectrum ''' global edges, dats, vars, itimes, shifts, lambdas, band, fidl, all_shifts band = band_pass dlambda = Wavelength.grating_results(band) hpp = Filters.hpp[band] fidl = np.arange(hpp[0], hpp[1], dlambda) lambdas = IO.readfits(wavename, options) if np.any(lambdas[1].data < 0) or np.any(lambdas[1].data > 29000): info("***********WARNING ***********") info("The file {0} may not be a wavelength file.".format(wavename)) info("Check before proceeding.") info("***********WARNING ***********") edges, meta = IO.load_edges(maskname, band, options) shifts = [] posnames = [] postoshift = {} for file in in_files: info(":: "+str(file)) II = IO.read_drpfits(maskname, file, options) off = np.array((II[0]["decoff"], II[0]["raoff"]),dtype=np.float64) if "yoffset" in II[0]: off = -II[0]["yoffset"] else: # Deal with data taken during commissioning if II[0]["frameid"] == 'A': off = 0.0 else: off = commissioning_shift try: off0 except: off0 = off shift = off - off0 shifts.append(shift) posnames.append(II[0]["frameid"]) postoshift[II[0]['frameid']] = shift info("Position {0} shift: {1:2.2f} as".format(off, shift)) # this is to deal with cases in which we want to rectify one single file if len(set(posnames)) is 1: plans = [['A']] else: plans = Background.guess_plan_from_positions(set(posnames)) all_shifts = [] for plan in plans: to_append = [] for pos in plan: to_append.append(postoshift[pos]) all_shifts.append(to_append) # Reverse the elements in all_shifts to deal with an inversion all_shifts.reverse() theBPM = IO.badpixelmask() all_solutions = [] cntr = 0 if target is 'default': outname = maskname else: outname = target for plan in plans: if len(plan) is 1: p0 = 'A' p1 = 'B' else: p0 = plan[0].replace("'", "p") p1 = plan[1].replace("'", "p") suffix = "%s-%s" % (p0,p1) info("Handling plan %s" % suffix) fname = "bsub_{0}_{1}_{2}.fits".format(outname,band,suffix) EPS = IO.read_drpfits(maskname, fname, options) EPS[1] = np.ma.masked_array(EPS[1], theBPM, fill_value=0) fname = "var_{0}_{1}_{2}.fits".format(outname, band, suffix) VAR = IO.read_drpfits(maskname, fname, options) VAR[1] = np.ma.masked_array(VAR[1], theBPM, fill_value=np.inf) fname = "itime_{0}_{1}_{2}.fits".format(outname, band, suffix) ITIME = IO.read_drpfits(maskname, fname, options) ITIME[1] = np.ma.masked_array(ITIME[1], theBPM, fill_value=0) dats = EPS vars = VAR itimes = ITIME EPS[0]["ORIGFILE"] = fname tock = time.time() sols = range(len(edges)-1,-1,-1) shifts = all_shifts[cntr] cntr += 1 p = Pool() solutions = p.map(handle_rectification_helper, sols) p.close() all_solutions.append(solutions) tick = time.time() info("-----> Mask took %i. Writing to disk." % (tick-tock)) output = np.zeros((1, len(fidl))) snrs = np.zeros((1, len(fidl))) sdout= np.zeros((1, len(fidl))) itout= np.zeros((1, len(fidl))) # the barset [bs] is used for determining object position files = IO.list_file_to_strings(files) info("Using "+str(files[0])+" for slit configuration.") x, x, bs = IO.readmosfits(files[0], options) for i_slit in xrange(len(solutions)): solution = all_solutions[0][i_slit] header = EPS[0].copy() obj = header['OBJECT'] target_name = bs.ssl[-(i_slit+1)]['Target_Name'] header['OBJECT'] = target_name pixel_dist = np.float(bs.ssl[-(i_slit+1)]['Target_to_center_of_slit_distance'])/0.18 pixel_dist -= solution['offset'] ll = solution["lambda"] header["wat0_001"] = "system=world" header["wat1_001"] = "wtype=linear" header["wat2_001"] = "wtype=linear" header["dispaxis"] = 1 header["dclog1"] = "Transform" header["dc-flag"] = 0 header["ctype1"] = "AWAV" header["cunit1"] = "Angstrom" header["crval1"] = ll[0] header["crval2"] = -solution["eps_img"].shape[0]/2 - pixel_dist header["crpix1"] = 1 header["crpix2"] = 1 #remove redundant CDELTi due to wavelength issues with ds9 #see: https://github.com/Keck-DataReductionPipelines/MosfireDRP/issues/44 #header["cdelt1"] = 1 #header["cdelt2"] = 1 header["cname1"] = "angstrom" header["cname2"] = "pixel" header["cd1_1"] = ll[1]-ll[0] header["cd1_2"] = 0 header["cd2_1"] = 0 header["cd2_2"] = 1 try: header["BARYCORR"]= (lambdas[0]['BARYCORR'],lambdas[0].comments['BARYCORR']) except KeyError: warning( "Barycentric corrections not applied to the wavelength solution") pass S = output.shape img = solution["eps_img"] std = solution["sd_img"] tms = solution["itime_img"] for i_solution in xrange(1,len(all_solutions)): info("Combining solution %i" %i_solution) solution = all_solutions[i_solution][i_slit] img += solution["eps_img"] std += solution["sd_img"] tms += solution["itime_img"] #print "adding in quadrature" output = np.append(output, img, 0) output = np.append(output, np.nan*np.zeros((3,S[1])), 0) snrs = np.append(snrs, img*tms/std, 0) snrs = np.append(snrs, np.nan*np.zeros((3,S[1])), 0) sdout = np.append(sdout, std, 0) sdout = np.append(sdout, np.nan*np.zeros((3,S[1])), 0) itout = np.append(itout, tms, 0) itout = np.append(itout, np.nan*np.zeros((3,S[1])), 0) header['bunit'] = ('electron/second', 'electron power') IO.writefits(img, maskname, "{0}_{1}_{2}_eps.fits".format(outname, band, target_name), options, overwrite=True, header=header, lossy_compress=False) header['bunit'] = ('electron/second', 'sigma/itime') IO.writefits(std/tms, maskname, "{0}_{1}_{2}_sig.fits".format(outname, band, target_name), options, overwrite=True, header=header, lossy_compress=False) header['bunit'] = ('second', 'exposure time') IO.writefits(tms, maskname, "{0}_{1}_{2}_itime.fits".format(outname, band, target_name), options, overwrite=True, header=header, lossy_compress=False) header['bunit'] = ('', 'SNR') IO.writefits(img*tms/std, maskname, "{0}_{1}_{2}_snrs.fits".format(outname, band, target_name), options, overwrite=True, header=header, lossy_compress=False) header = EPS[0].copy() header["wat0_001"] = "system=world" header["wat1_001"] = "wtype=linear" header["wat2_001"] = "wtype=linear" header["dispaxis"] = 1 header["dclog1"] = "Transform" header["dc-flag"] = 0 header["ctype1"] = "AWAV" header["cunit1"] = ("Angstrom", 'Start wavelength') header["crval1"] = ll[0] header["crval2"] = 1 header["crpix1"] = 1 header["crpix2"] = 1 #remove redundant CDELTi due to wavelength issues with ds9 #see: https://github.com/Keck-DataReductionPipelines/MosfireDRP/issues/44 #header["cdelt1"] = 1 #header["cdelt2"] = 1 header["cname1"] = "angstrom" header["cname2"] = "pixel" header["cd1_1"] = (ll[1]-ll[0], 'Angstrom/pixel') header["cd1_2"] = 0 header["cd2_1"] = 0 header["cd2_2"] = 1 try: header["BARYCORR"]= (lambdas[0]['BARYCORR'],lambdas[0].comments['BARYCORR']) except KeyError: warning( "Barycentric corrections not applied to the wavelength solution") pass header["bunit"] = "ELECTRONS/SECOND" info("############ Final reduced file: {0}_{1}_eps.fits".format(outname,band)) IO.writefits(output, maskname, "{0}_{1}_eps.fits".format(outname, band), options, overwrite=True, header=header, lossy_compress=False) header["bunit"] = "" IO.writefits(snrs, maskname, "{0}_{1}_snrs.fits".format(outname, band), options, overwrite=True, header=header, lossy_compress=False) header["bunit"] = "ELECTRONS/SECOND" IO.writefits(sdout/itout, maskname, "{0}_{1}_sig.fits".format(outname, band), options, overwrite=True, header=header, lossy_compress=False) header["bunit"] = "SECOND" IO.writefits(itout, maskname, "{0}_{1}_itime.fits".format(outname, band), options, overwrite=True, header=header, lossy_compress=False)
nums = [26, 27, 28, 29] roll = 0 elif False: # test_marc H name = "test_marc" band = "H" nums = [19, 20, 21, 22, 23, 24] roll = 0 flatlist = [] if roll == 0: for num in nums: flatlist.append("/users/npk/desktop/5apr/m120406_%4.4i.fits" % num) else: for num in nums: header, ff, bs, targs, ssl, msl, asl = IO.readmosfits( "/users/npk/desktop/5apr/m120406_%4.4i.fits" % num) hdu = pyfits.PrimaryHDU(np.roll(ff, -5, 0), header) hdulist = pyfits.HDUList([hdu]) for tbl in [targs, ssl, msl, asl]: hdu = pyfits.new_table(tbl) hdulist.append(hdu) fn = "/users/npk/desktop/5apr/roll_m120406_%4.4i.fits" % num os.remove(fn) hdulist.writeto(fn) flatlist.append(fn) print flatlist
def handle_flats(flatlist, maskname, band, options, extension=None): ''' handle_flats is the primary entry point to the Flats module. handle_flats takes a list of individual exposure FITS files and creates: 1. A CRR, dark subtracted, pixel-response flat file. 2. A set of polynomials that mark the edges of a slit Inputs: flatlist: Either a string of an input file or a list of file names maskname: The name of a mask band: A string indicating the bandceil Outputs: file {maskname}/flat_2d_{band}.fits -- pixel response flat file {maskname}/edges.np ''' tick = time.time() # Check bpos = np.ones(92) * -1 flatlist = IO.list_file_to_strings(flatlist) print flatlist for fname in flatlist: hdr, dat, bs = IO.readmosfits(fname, options, extension=extension) try: bs0 except: bs0 = bs if np.any(bs0.pos != bs.pos): raise Exception("Barsets do not seem to match") if hdr["filter"] != band: print ("Filter name %s does not match header filter name " "%s in file %s" % (band, hdr["filter"], fname)) for i in xrange(len(bpos)): b = hdr["B{0:02d}POS".format(i+1)] if bpos[i] == -1: bpos[i] = b else: if bpos[i] != b: raise Exception("Bar positions are not all the same in " "this set of flat files") bs = bs0 # Imcombine if True: print "Attempting to combine: ", flatlist combine(flatlist, maskname, band, options) print "Combined '%s' to '%s'" % (flatlist, maskname) path = "combflat_2d_%s.fits" % band (header, data) = IO.readfits(path, use_bpm=True) print "Flat written to %s" % path # Edge Trace results = find_and_fit_edges(data, header, bs, options) results[-1]["maskname"] = maskname results[-1]["band"] = band np.save("slit-edges_{0}".format(band), results) save_ds9_edges(results, options) # Generate Flat out = "pixelflat_2d_%s.fits" % (band) make_pixel_flat(data, results, options, out, flatlist) print "Pixel flat took {0:6.4} s".format(time.time()-tick)
def go(maskname, band, filenames, wavefile, wavoptions, longoptions, use_flat=False): ''' The go command is the main entry point into this module. Inputs: maskname: String of the mask name band: String of 'Y', 'J', 'H', or 'K' filenames: List of filenames to reduce wavefile: String of path to FITS file with the wavelength solution wavoptions: The Wavelength Options dictionary longoptions: Dictionary containing: {'yrange': The pixel range to extract over 'row_position': The row to solve the initial wavelength solution on} use_flat: Boolean False [default] means to use no flat field Boolean True means to divide by the pixelflat ''' wavename = Wavelength.filelist_to_wavename(filenames, band, maskname, wavoptions).rstrip(".fits") print "Wavefile: {0}".format(wavefile) lamhdr, lamdat = IO.readfits(wavefile) positions = [] objname = None for listfile in filenames: fnames = IO.list_file_to_strings(listfile) if len(fnames) != 1: raise Exception("I currently expect only one file per position. Remove multiple entries and try again") header, data, bs = IO.readmosfits(fnames[0], wavoptions) if objname is None: objname = header["object"] if objname != header["object"]: print ("Trying to combine longslit stack of object {0} " "with object {1}".format(objname, header["object"])) print("{0:18s} {1:30s} {2:2s} {3:4.1f}".format(file, header["object"], header["frameid"], header["yoffset"])) positions.append([fnames[0], header, data, bs]) print("{0:2g} nod positions found. Producing stacked difference" \ " image.".format(len(positions))) for i in xrange(len(positions)-1): A = positions[i] B = positions[i+1] print("----------- -----".format(A,B)) dname, varname = imdiff(A, B, maskname, band, header, wavoptions) if use_flat: apply_flat(dname, maskname, band) apply_flat(varname, maskname, band) rectify(dname, lamdat, A, B, maskname, band, wavoptions, longoptions) rectify(varname, lamdat, A, B, maskname, band, wavoptions, longoptions) print dname dname, vname = imdiff(B, A, maskname, band, header, wavoptions) if use_flat: apply_flat(dname, maskname, band) apply_flat(vname, maskname, band) rectify(dname, lamdat, B, A, maskname, band, wavoptions, longoptions) rectify(vname, lamdat, B, A, maskname, band, wavoptions, longoptions) if False: fname = os.path.join(path, wavename + ".fits") B = IO.readfits(fname) B = [fname, B[0], B[1]] for i in xrange(len(positions)): A = positions[i] imdiff(A, B, maskname, band, wavoptions) rectify(path, dname, lamdat, A, B, maskname, band, wavoptions, longoptions) imdiff(B, A, maskname, band, wavoptions) rectify(path, dname, lamdat, B, A, maskname, band, wavoptions, longoptions)
from scipy import signal import scipy.ndimage import matplotlib import pyfits as pf import MOSFIRE.Options as options from MOSFIRE import IO, Fit, Wavelength pl.ion() path = "/users/npk/desktop/c9_reduce/npk_calib3_q1700_pa_0/" path = "/scr2/mosfire/c9_npk/npk_calib3_q1700_pa_0/" path = "/scr2/mosfire/firstlight/NGC5053" mdat = IO.readmosfits(path + "m120406_0291.fits") fdat = IO.readfits(path + "pixelflat_2d_J.fits") ldat = IO.readfits(path + "lambda_solution_m120406_0291.fits") gain = 2.15 dat = mdat[1]/fdat[1] * gain dat = mdat[1] * gain lam = ldat[1] dat[np.logical_not(np.isfinite(dat))] = 0. yroi=slice(86, 160) yroi=slice(173, 203) yroi=slice(1015, 1090)
def go(maskname, band, filenames, wavefile, wavoptions, longoptions, use_flat=False): ''' The go command is the main entry point into this module. Inputs: maskname: String of the mask name band: String of 'Y', 'J', 'H', or 'K' filenames: List of filenames to reduce wavefile: String of path to FITS file with the wavelength solution wavoptions: The Wavelength Options dictionary longoptions: Dictionary containing: {'yrange': The pixel range to extract over 'row_position': The row to solve the initial wavelength solution on} use_flat: Boolean False [default] means to use no flat field Boolean True means to divide by the pixelflat ''' wavename = Wavelength.filelist_to_wavename(filenames, band, maskname, wavoptions).rstrip(".fits") print "Wavefile: {0}".format(wavefile) lamhdr, lamdat = IO.readfits(wavefile) positions = [] objname = None for listfile in filenames: fnames = IO.list_file_to_strings(listfile) if len(fnames) != 1: raise Exception( "I currently expect only one file per position. Remove multiple entries and try again" ) header, data, bs = IO.readmosfits(fnames[0], wavoptions) if objname is None: objname = header["object"] if objname != header["object"]: print( "Trying to combine longslit stack of object {0} " "with object {1}".format(objname, header["object"])) print("{0:18s} {1:30s} {2:2s} {3:4.1f}".format(file, header["object"], header["frameid"], header["yoffset"])) positions.append([fnames[0], header, data, bs]) print("{0:2g} nod positions found. Producing stacked difference" \ " image.".format(len(positions))) for i in xrange(len(positions) - 1): A = positions[i] B = positions[i + 1] print("----------- -----".format(A, B)) dname, varname = imdiff(A, B, maskname, band, header, wavoptions) if use_flat: apply_flat(dname, maskname, band) apply_flat(varname, maskname, band) rectify(dname, lamdat, A, B, maskname, band, wavoptions, longoptions) rectify(varname, lamdat, A, B, maskname, band, wavoptions, longoptions) print dname dname, vname = imdiff(B, A, maskname, band, header, wavoptions) if use_flat: apply_flat(dname, maskname, band) apply_flat(vname, maskname, band) rectify(dname, lamdat, B, A, maskname, band, wavoptions, longoptions) rectify(vname, lamdat, B, A, maskname, band, wavoptions, longoptions) if False: fname = os.path.join(path, wavename + ".fits") B = IO.readfits(fname) B = [fname, B[0], B[1]] for i in xrange(len(positions)): A = positions[i] imdiff(A, B, maskname, band, wavoptions) rectify(path, dname, lamdat, A, B, maskname, band, wavoptions, longoptions) imdiff(B, A, maskname, band, wavoptions) rectify(path, dname, lamdat, B, A, maskname, band, wavoptions, longoptions)