def readfits(path, use_bpm=False): '''Read a fits file from path and return a tuple of (header, data, Target List, Science Slit List (SSL), Mechanical Slit List (MSL), Alignment Slit List (ASL)).''' if os.path.exists(path + ".gz"): path = path + ".gz" if not os.path.exists(path): error("The file at path '%s' does not exist." % path) raise Exception("The file at path '%s' does not exist." % path) hdulist = pf.open(path) header = hdulist[0].header data = hdulist[0].data datasec = "" try: datasec = header["DATASEC"] debug( "%s contains a DATASEC keyword not compatible with the pipeline" % path) debug("The content of the keyword will be erased on the reduced data") del header["DATASEC"] except: pass if use_bpm: theBPM = badpixelmask() data = np.ma.masked_array(data, theBPM, fill_value=0) return (header, data)
def science_slit_to_pixel(self, scislit): """Convert a science slit number to spatial pixel""" if (scislit < 1) or (scislit > len(self.ssl)): error("The requested science slit number %i does not exist" % scislit) raise Exception("The requested science slit number %i does not exist" % scislit) slits = self.scislit_to_csuslit(scislit) debug(str(slits)) return self.csu_slit_to_pixel(np.median(slits))
def science_slit_to_pixel(self, scislit): '''Convert a science slit number to spatial pixel''' if (scislit < 1) or (scislit > len(self.ssl)): error("The requested science slit number %i does not exist" \ % scislit) raise Exception("The requested science slit number %i does not exist" \ % scislit) slits = self.scislit_to_csuslit(scislit) debug(str(slits)) return self.csu_slit_to_pixel(np.median(slits))
def list_file_to_strings(fname): '''Read the filename in fname and convert to a series of paths. This emulates IRAF's @file system. However, in addtion, the first line of the file can be an absolute path. Example: list.txt /path/to/files file1 file2 file3 returns ['/path/to/files/file1', '/path/to/files/file2', '/path/to/files/file3'] whereas list.txt file1 file2 file3 returns ['file1', 'file2', 'file3'] ''' filelist = fname if type(fname) == str: filelist = [fname] if len(fname) == 0: return [] if fname[0][-5:] == '.fits': return fname output = [] for fname in filelist: debug("Loading: %s" % fname) inputs = np.loadtxt(fname, dtype=[("f", "S100")]) path = "" start_index = 0 if len(inputs): if os.path.isabs(inputs[0][0]): path = inputs[0][0] start_index = 1 for i in xrange(start_index, len(inputs)): output.append(os.path.join(path, inputs[i][0])) return output
def handle_flats(flatlist, maskname, band, options, extension=None, edgeThreshold=450, lampOffList=None, longslit=None): ''' handle_flats is the primary entry point to the Flats module. handle_flats takes a list of individual exposure FITS files and creates: 1. A CRR, dark subtracted, pixel-response flat file. 2. A set of polynomials that mark the edges of a slit Inputs: flatlist: maskname: The name of a mask band: A string indicating the bandceil Outputs: file {maskname}/flat_2d_{band}.fits -- pixel response flat file {maskname}/edges.np ''' tick = time.time() # Check bpos = np.ones(92) * -1 #Retrieve the list of files to use for flat creation. flatlist = IO.list_file_to_strings(flatlist) # Print the filenames to Standard-out for flat in flatlist: info(str(flat)) #Determine if flat files headers are in agreement for fname in flatlist: hdr, dat, bs = IO.readmosfits(fname, options, extension=extension) try: bs0 except: bs0 = bs if np.any(bs0.pos != bs.pos): print "bs0: " + str(bs0.pos) + " bs: " + str(bs.pos) error("Barset do not seem to match") raise Exception("Barsets do not seem to match") if hdr["filter"] != band: error("Filter name %s does not match header filter name " "%s in file %s" % (band, hdr["filter"], fname)) raise Exception("Filter name %s does not match header filter name " "%s in file %s" % (band, hdr["filter"], fname)) for i in xrange(len(bpos)): b = hdr["B{0:02d}POS".format(i + 1)] if bpos[i] == -1: bpos[i] = b else: if bpos[i] != b: error("Bar positions are not all the same in " "this set of flat files") raise Exception("Bar positions are not all the same in " "this set of flat files") bs = bs0 # Imcombine the lamps ON flats info("Attempting to combine previous files") combine(flatlist, maskname, band, options) # Imcombine the lamps OFF flats and subtract the off from the On sets if lampOffList != None: #Retrieve the list of files to use for flat creation. lampOffList = IO.list_file_to_strings(lampOffList) # Print the filenames to Standard-out for flat in lampOffList: info(str(flat)) print "Attempting to combine Lamps off data" combine(lampOffList, maskname, band, options, lampsOff=True) combine_off_on(maskname, band, options) debug("Combined '%s' to '%s'" % (flatlist, maskname)) info("Comgined to '%s'" % (maskname)) path = "combflat_2d_%s.fits" % band (header, data) = IO.readfits(path, use_bpm=True) info("Flat written to %s" % path) # Edge Trace if bs.long_slit: info("Long slit mode recognized") info("Central row position: " + str(longslit["row_position"])) info("Upper and lower limits: " + str(longslit["yrange"][0]) + " " + str(longslit["yrange"][1])) results = find_longslit_edges(data, header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit) elif bs.long2pos_slit: info("Long2pos mode recognized") results = find_long2pos_edges(data, header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit) else: results = find_and_fit_edges(data, header, bs, options, edgeThreshold=edgeThreshold) results[-1]["maskname"] = maskname results[-1]["band"] = band np.save("slit-edges_{0}".format(band), results) save_ds9_edges(results, options) # Generate Flat out = "pixelflat_2d_%s.fits" % (band) if lampOffList != None: make_pixel_flat(data, results, options, out, flatlist, lampsOff=True) else: make_pixel_flat(data, results, options, out, flatlist, lampsOff=False) info("Pixel flat took {0:6.4} s".format(time.time() - tick))
def handle_flats(flatlist, maskname, band, options, extension=None,edgeThreshold=450,lampOffList=None,longslit=None): ''' handle_flats is the primary entry point to the Flats module. handle_flats takes a list of individual exposure FITS files and creates: 1. A CRR, dark subtracted, pixel-response flat file. 2. A set of polynomials that mark the edges of a slit Inputs: flatlist: maskname: The name of a mask band: A string indicating the bandceil Outputs: file {maskname}/flat_2d_{band}.fits -- pixel response flat file {maskname}/edges.np ''' tick = time.time() # Check bpos = np.ones(92) * -1 #Retrieve the list of files to use for flat creation. flatlist = IO.list_file_to_strings(flatlist) # Print the filenames to Standard-out for flat in flatlist: info(str(flat)) #Determine if flat files headers are in agreement for fname in flatlist: hdr, dat, bs = IO.readmosfits(fname, options, extension=extension) try: bs0 except: bs0 = bs if np.any(bs0.pos != bs.pos): print "bs0: "+str(bs0.pos)+" bs: "+str(bs.pos) error("Barset do not seem to match") raise Exception("Barsets do not seem to match") if hdr["filter"] != band: error ("Filter name %s does not match header filter name " "%s in file %s" % (band, hdr["filter"], fname)) raise Exception("Filter name %s does not match header filter name " "%s in file %s" % (band, hdr["filter"], fname)) for i in xrange(len(bpos)): b = hdr["B{0:02d}POS".format(i+1)] if bpos[i] == -1: bpos[i] = b else: if bpos[i] != b: error("Bar positions are not all the same in " "this set of flat files") raise Exception("Bar positions are not all the same in " "this set of flat files") bs = bs0 # Imcombine the lamps ON flats info("Attempting to combine previous files") combine(flatlist, maskname, band, options) # Imcombine the lamps OFF flats and subtract the off from the On sets if lampOffList != None: #Retrieve the list of files to use for flat creation. lampOffList = IO.list_file_to_strings(lampOffList) # Print the filenames to Standard-out for flat in lampOffList: info(str(flat)) print "Attempting to combine Lamps off data" combine(lampOffList, maskname, band, options, lampsOff=True) combine_off_on( maskname, band, options) debug("Combined '%s' to '%s'" % (flatlist, maskname)) info("Comgined to '%s'" % (maskname)) path = "combflat_2d_%s.fits" % band (header, data) = IO.readfits(path, use_bpm=True) info("Flat written to %s" % path) # Edge Trace if bs.long_slit: info( "Long slit mode recognized") info( "Central row position: "+str(longslit["row_position"])) info( "Upper and lower limits: "+str(longslit["yrange"][0])+" "+str(longslit["yrange"][1])) results = find_longslit_edges(data,header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit) elif bs.long2pos_slit: info( "Long2pos mode recognized") results = find_long2pos_edges(data,header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit) else: results = find_and_fit_edges(data, header, bs, options,edgeThreshold=edgeThreshold) results[-1]["maskname"] = maskname results[-1]["band"] = band np.save("slit-edges_{0}".format(band), results) save_ds9_edges(results, options) # Generate Flat out = "pixelflat_2d_%s.fits" % (band) if lampOffList != None: make_pixel_flat(data, results, options, out, flatlist, lampsOff=True) else: make_pixel_flat(data, results, options, out, flatlist, lampsOff=False) info( "Pixel flat took {0:6.4} s".format(time.time()-tick))
def imcombine(filelist, out, options, method="average", reject="none",\ lsigma=3, hsigma=3, mclip=False,\ nlow=None, nhigh=None): '''Combines images in input list with optional rejection algorithms. Args: filelist: The list of files to imcombine out: The full path to the output file method: either "average" or "median" combine options: Options dictionary bpmask: The full path to the bad pixel mask reject: none, minmax, sigclip nlow,nhigh: Parameters for minmax rejection, see iraf docs mclip: use median as the function to calculate the baseline values for sigclip rejection? lsigma, hsigma: low and high sigma rejection thresholds. Returns: None Side effects: Creates the imcombined file at location `out' ''' assert method in ['average', 'median'] if os.path.exists(out): os.remove(out) if reject == 'none': info('Combining files using ccdproc.combine task') info(' reject=none') for file in filelist: debug(' Combining: {}'.format(file)) ccdproc.combine(filelist, out, method=method,\ minmax_clip=False,\ iraf_minmax_clip=True,\ sigma_clip=False,\ unit="adu") info(' Done.') elif reject == 'minmax': ## The IRAF imcombine minmax rejection behavior is different than the ## ccdproc minmax rejection behavior. We are using the IRAF like ## behavior here. To support this a pull request for the ccdproc ## package has been made: ## https://github.com/astropy/ccdproc/pull/358 ## ## Note that the ccdproc behavior still differs slightly from the ## nominal IRAF behavior in that the rejection does not consider whether ## any of the rejected pixels have been rejected for other reasons, so ## if nhigh=1 and that pixel was masked for some other reason, the ## new ccdproc algorithm, will not mask the next highest pixel, it will ## still just mask the highest pixel even if it is already masked. ## ## From IRAF (help imcombine): ## nlow = 1, nhigh = 1 (minmax) ## The number of low and high pixels to be rejected by the ## "minmax" algorithm. These numbers are converted to fractions ## of the total number of input images so that if no rejections ## have taken place the specified number of pixels are rejected ## while if pixels have been rejected by masking, thresholding, or ## non-overlap, then the fraction of the remaining pixels, ## truncated to an integer, is used. ## ## Check that minmax rejection is possible given the number of images if nlow is None: nlow = 0 if nhigh is None: nhigh = 0 if nlow + nhigh >= len(filelist): warning( 'nlow + nhigh >= number of input images. Combining without rejection' ) nlow = 0 nhigh = 0 if ccdproc.version.major >= 1 and ccdproc.version.minor >= 1\ and ccdproc.version.release: info('Combining files using ccdproc.combine task') info(' reject=clip_extrema') info(' nlow={}'.format(nlow)) info(' nhigh={}'.format(nhigh)) for file in filelist: info(' {}'.format(file)) ccdproc.combine(filelist, out, method=method,\ minmax_clip=False,\ clip_extrema=True,\ nlow=nlow, nhigh=nhigh,\ sigma_clip=False,\ unit="adu") info(' Done.') else: ## If ccdproc does not have new rejection algorithm in: ## https://github.com/astropy/ccdproc/pull/358 ## Manually perform rejection using ccdproc.combiner.Combiner object info( 'Combining files using local clip_extrema rejection algorithm') info('and the ccdproc.combiner.Combiner object.') info(' reject=clip_extrema') info(' nlow={}'.format(nlow)) info(' nhigh={}'.format(nhigh)) for file in filelist: info(' {}'.format(file)) ccdlist = [] for file in filelist: ccdlist.append(ccdproc.CCDData.read(file, unit='adu', hdu=0)) c = ccdproc.combiner.Combiner(ccdlist) nimages, nx, ny = c.data_arr.mask.shape argsorted = np.argsort(c.data_arr.data, axis=0) mg = np.mgrid[0:nx, 0:ny] for i in range(-1 * nhigh, nlow): where = (argsorted[i, :, :].ravel(), mg[0].ravel(), mg[1].ravel()) c.data_arr.mask[where] = True if method == 'average': result = c.average_combine() elif method == 'median': result = c.median_combine() for key in ccdlist[0].header.keys(): header_entry = ccdlist[0].header[key] if key != 'COMMENT': result.header[key] = (header_entry, ccdlist[0].header.comments[key]) hdul = result.to_hdu() # print(hdul) # for hdu in hdul: # print(type(hdu.data)) hdul[0].writeto(out) # result.write(out) info(' Done.') elif reject == 'sigclip': info('Combining files using ccdproc.combine task') info(' reject=sigclip') info(' mclip={}'.format(mclip)) info(' lsigma={}'.format(lsigma)) info(' hsigma={}'.format(hsigma)) baseline_func = {False: np.mean, True: np.median} ccdproc.combine(filelist, out, method=method,\ minmax_clip=False,\ clip_extrema=False,\ sigma_clip=True,\ sigma_clip_low_thresh=lsigma,\ sigma_clip_high_thresh=hsigma,\ sigma_clip_func=baseline_func[mclip],\ sigma_clip_dev_func=np.std,\ ) info(' Done.') else: raise NotImplementedError( '{} rejection unrecognized by MOSFIRE DRP'.format(reject))
def readmosfits(fname, options, extension=None): '''Read a fits file written by MOSFIRE from path and return a tuple of (header, data, Target List, Science Slit List (SSL), Mechanical Slit List (MSL), Alignment Slit List (ASL)). Note, the extension is typically not used, only used if the detector server does not append slit extension. ''' if os.path.isabs(fname): path = fname else: path = os.path.join(fname_to_path(fname, options), fname) hdulist = pf.open(path) header = hdulist[0].header data = hdulist[0].data theBPM = badpixelmask() data = np.ma.masked_array(data, theBPM) if extension is not None: hdulist = pf.open(extension) try: header = hdulist[0].header datasec = "" try: datasec = header["DATASEC"] debug( "%s contains a DATASEC keyword not compatible with the pipeline" % path) debug( "The content of the keyword will be erased on the reduced data" ) del header["DATASEC"] except: pass targs = hdulist[1].data ssl = hdulist[2].data msl = hdulist[3].data asl = hdulist[4].data except: error("Improper MOSFIRE FITS File: %s" % path) raise Exception("Improper MOSFIRE FITS File: %s" % path) # if np.abs(header["REGTMP1"] - 77) > 0.1: # warning("**************************************") # warning("The temperature of the detector is %3.3f where it " # "should be 77.000 deg. Please notify Keck support staff." % # header["REGTMP1"]) ssl = ssl[ssl.field("Slit_Number") != ' '] msl = msl[msl.field("Slit_Number") != ' '] asl = asl[asl.field("Slit_Number") != ' '] # ELIMINATE POSITION B of the long2pos slit ssl = ssl[ssl.field("Target_Name") != 'posB'] msl = msl[msl.field("Target_in_Slit") != 'posB'] asl = asl[asl.field("Target_in_Slit") != 'posBalign'] targs = targs[targs.field("Target_Name") != 'posB'] targs = targs[targs.field("Target_Name") != "posBalign"] bs = CSU.Barset() bs.set_header(header, ssl=ssl, msl=msl, asl=asl, targs=targs) return (header, data, bs)
def writefits(img, maskname, fname, options, header=None, bs=None, overwrite=False, lossy_compress=False): '''Convenience wrapper to write MOSFIRE drp-friendly FITS files Args: img: Data array to write to disk maskname: Name of the science mask fname: Full or relative path to output file options: {} Unused header: Optional, the header to write bs: Optional unused overwrite: Force overwrite of file, default False/No. lossy_compress: Zero out the lowest order bits of the floats in order to make FITS files amenable to compression. The loss is at least 10 x less than 5e- which is the lowest reasonable read- noise value. Results: Writes a file to fname with data img and header header. ''' if lossy_compress: hdu = pf.PrimaryHDU(floatcompress(img)) else: hdu = pf.PrimaryHDU(img) fn = fname if header is None: header = {"DRPVER": (MOSFIRE.__version__, "DRP Version Date")} else: header["DRPVER"] = (MOSFIRE.__version__, 'DRP Version Date') warnings.filterwarnings('ignore') if header is not None: for k, value, comment in header.cards: if k in hdu.header: continue if k == 'COMMENT': continue if k == '': continue k = k.rstrip() hdu.header[k] = (value, comment) warnings.filterwarnings('always') if overwrite: try: os.remove(fn) debug("Removed old file '{0}'".format(fn)) except: pass info("Wrote to '%s'" % (fn)) warnings.filterwarnings('ignore', 'Card is too long, comment will be truncated.') hdu.writeto(fn) warnings.filterwarnings('always') if lossy_compress: os.system("gzip --force {0}".format(fn))
def imcombine(files, maskname, options, flat, outname=None, shifts=None, extension=None): ''' From a list of files it imcombine returns the imcombine of several values. The imcombine code also estimates the readnoise ad RN/sqrt(numreads) so that the variance per frame is equal to (ADU + RN^2) where RN is computed in ADUs. Arguments: files[]: list of full path to files to combine maskname: Name of mask options: Options dictionary flat[2048x2048]: Flat field (values should all be ~ 1.0) outname: If set, will write (see notes below for details) eps_[outname].fits: electron/sec file itimes_[outname].fits: integration time var_[outname].fits: Variance files shifts[len(files)]: If set, will "roll" each file by the amount in the shifts vector in pixels. This argument is used when telescope tracking is poor. If you need to use this, please notify Keck staff about poor telescope tracking. Returns 6-element tuple: header: The combined header electrons [2048x2048]: e- (in e- units) var [2048x2048]: electrons + RN**2 (in e-^2 units) bs: The MOSFIRE.Barset instance itimes [2048x2048]: itimes (in s units) Nframe: The number of frames that contribute to the summed arrays above. If Nframe > 5 I use the sigma-clipping Cosmic Ray Rejection tool. If Nframe < 5 then I drop the max/min elements. Notes: header -- fits header ADUs -- The mean # of ADUs per frame var -- the Variance [in adu] per frame. bs -- Barset itimes -- The _total_ integration time in second Nframe -- The number of frames in a stack. Thus the number of electron per second is derived as: e-/sec = (ADUs * Gain / Flat) * (Nframe/itimes) The total number of electrons is: el = ADUs * Gain * Nframe ''' ADUs = np.zeros((len(files), 2048, 2048)) itimes = np.zeros((len(files), 2048, 2048)) prevssl = None prevmn = None patternid = None maskname = None header = None if shifts is None: shifts = np.zeros(len(files)) warnings.filterwarnings('ignore') for i in xrange(len(files)): fname = files[i] thishdr, data, bs = IO.readmosfits(fname, options, extension=extension) itimes[i,:,:] = thishdr["truitime"] base = os.path.basename(fname).rstrip(".fits") fnum = int(base.split("_")[1]) if shifts[i] == 0: ADUs[i,:,:] = data.filled(0.0) / flat else: ADUs[i,:,:] = np.roll(data.filled(0.0) / flat, np.int(shifts[i]), axis=0) ''' Construct Header''' if header is None: header = thishdr header["imfno%3.3i" % (fnum)] = (fname, "img%3.3i file name" % fnum) map(lambda x: rem_header_key(header, x), ["CTYPE1", "CTYPE2", "WCSDIM", "CD1_1", "CD1_2", "CD2_1", "CD2_2", "LTM1_1", "LTM2_2", "WAT0_001", "WAT1_001", "WAT2_001", "CRVAL1", "CRVAL2", "CRPIX1", "CRPIX2", "RADECSYS"]) for card in header.cards: if card == '': continue key,val,comment = card if key in thishdr: if val != thishdr[key]: newkey = key + ("_img%2.2i" % fnum) try: header[newkey.rstrip()] = (thishdr[key], comment) except: pass ''' Now handle error checking''' if maskname is not None: if thishdr["maskname"] != maskname: error("File %s uses mask '%s' but the stack is of '%s'" % (fname, thishdr["maskname"], maskname)) raise Exception("File %s uses mask '%s' but the stack is of '%s'" % (fname, thishdr["maskname"], maskname)) maskname = thishdr["maskname"] if thishdr["aborted"]: error("Img '%s' was aborted and should not be used" % fname) raise Exception("Img '%s' was aborted and should not be used" % fname) if prevssl is not None: if len(prevssl) != len(bs.ssl): # todo Improve these checks error("The stack of input files seems to be of " "different masks") raise Exception("The stack of input files seems to be of " "different masks") prevssl = bs.ssl if patternid is not None: if patternid != thishdr["frameid"]: error("The stack should be of '%s' frames only, but " "the current image is a '%s' frame." % (patternid, thishdr["frameid"])) raise Exception("The stack should be of '%s' frames only, but " "the current image is a '%s' frame." % (patternid, thishdr["frameid"])) patternid = thishdr["frameid"] if maskname is not None: if maskname != thishdr["maskname"]: error("The stack should be of CSU mask '%s' frames " "only but contains a frame of '%s'." % (maskname, thishdr["maskname"])) raise Exception("The stack should be of CSU mask '%s' frames " "only but contains a frame of '%s'." % (maskname, thishdr["maskname"])) maskname = thishdr["maskname"] if thishdr["BUNIT"] != "ADU per coadd": error("The units of '%s' are not in ADU per coadd and " "this violates an assumption of the DRP. Some new code " "is needed in the DRP to handle the new units of " "'%s'." % (fname, thishdr["BUNIT"])) raise Exception("The units of '%s' are not in ADU per coadd and " "this violates an assumption of the DRP. Some new code " "is needed in the DRP to handle the new units of " "'%s'." % (fname, thishdr["BUNIT"])) ''' Error checking is complete''' debug("%s %s[%s]/%s: %5.1f s, Shift: %i px" % (fname, maskname, patternid, header['filter'], np.mean(itimes[i]), shifts[i])) warnings.filterwarnings('always') # the electrons and el_per_sec arrays are: # [2048, 2048, len(files)] and contain values for # each individual frame that is being combined. # These need to be kept here for CRR reasons. electrons = np.array(ADUs) * Detector.gain el_per_sec = electrons / itimes output = np.zeros((2048, 2048)) exptime = np.zeros((2048, 2048)) numreads = header["READS0"] RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain RN = Detector.RN / np.sqrt(numreads) # Cosmic ray rejection code begins here. This code construction the # electrons and itimes arrays. standard = True new_from_chuck = False # Chuck Steidel has provided a modified version of the CRR procedure. # to enable it, modify the variables above. if new_from_chuck and not standard: if len(files) >= 5: print "Sigclip CRR" srt = np.argsort(electrons, axis=0, kind='quicksort') shp = el_per_sec.shape sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]] electrons = electrons[srt, sti[1], sti[2]] el_per_sec = el_per_sec[srt, sti[1], sti[2]] itimes = itimes[srt, sti[1], sti[2]] # Construct the mean and standard deviation by dropping the top and bottom two # electron fluxes. This is temporary. mean = np.mean(el_per_sec[1:-1,:,:], axis = 0) std = np.std(el_per_sec[1:-1,:,:], axis = 0) drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) ) print "dropping: ", len(drop[0]) electrons[drop] = 0.0 itimes[drop] = 0.0 electrons = np.sum(electrons, axis=0) itimes = np.sum(itimes, axis=0) Nframe = len(files) else: warning( "With less than 5 frames, the pipeline does NOT perform") warning( "Cosmic Ray Rejection.") # the "if false" line disables cosmic ray rejection" if False: for i in xrange(len(files)): el = electrons[i,:,:] it = itimes[i,:,:] el_mf = scipy.signal.medfilt(el, 5) bad = np.abs(el - el_mf) / np.abs(el) > 10.0 el[bad] = 0.0 it[bad] = 0.0 electrons[i,:,:] = el itimes[i,:,:] = it electrons = np.sum(electrons, axis=0) itimes = np.sum(itimes, axis=0) Nframe = len(files) if standard and not new_from_chuck: if len(files) >= 9: info("Sigclip CRR") srt = np.argsort(electrons, axis=0, kind='quicksort') shp = el_per_sec.shape sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]] electrons = electrons[srt, sti[1], sti[2]] el_per_sec = el_per_sec[srt, sti[1], sti[2]] itimes = itimes[srt, sti[1], sti[2]] # Construct the mean and standard deviation by dropping the top and bottom two # electron fluxes. This is temporary. mean = np.mean(el_per_sec[2:-2,:,:], axis = 0) std = np.std(el_per_sec[2:-2,:,:], axis = 0) drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) ) info("dropping: "+str(len(drop[0]))) electrons[drop] = 0.0 itimes[drop] = 0.0 electrons = np.sum(electrons, axis=0) itimes = np.sum(itimes, axis=0) Nframe = len(files) elif len(files) > 5: warning( "WARNING: Drop min/max CRR") srt = np.argsort(el_per_sec,axis=0) shp = el_per_sec.shape sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]] electrons = electrons[srt, sti[1], sti[2]] itimes = itimes[srt, sti[1], sti[2]] electrons = np.sum(electrons[1:-1,:,:], axis=0) itimes = np.sum(itimes[1:-1,:,:], axis=0) Nframe = len(files) - 2 else: warning( "With less than 5 frames, the pipeline does NOT perform") warning( "Cosmic Ray Rejection.") # the "if false" line disables cosmic ray rejection" if False: for i in xrange(len(files)): el = electrons[i,:,:] it = itimes[i,:,:] # calculate the median image el_mf = scipy.signal.medfilt(el, 5) el_mf_large = scipy.signal.medfilt(el_mf, 15) # LR: this is a modified version I was experimenting with. For the version # written by Nick, see the new_from_chuck part of this code # sky sub el_sky_sub = el_mf - el_mf_large # add a constant value el_plus_constant = el_sky_sub + 100 bad = np.abs(el - el_mf) / np.abs(el_plus_constant) > 50.0 el[bad] = 0.0 it[bad] = 0.0 electrons[i,:,:] = el itimes[i,:,:] = it electrons = np.sum(electrons, axis=0) itimes = np.sum(itimes, axis=0) Nframe = len(files) ''' Now handle variance ''' numreads = header["READS0"] RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain RN = Detector.RN / np.sqrt(numreads) var = (electrons + RN**2) ''' Now mask out bad pixels ''' electrons[data.mask] = np.nan var[data.mask] = np.inf if "RN" in header: error("RN Already populated in header") raise Exception("RN Already populated in header") header['RN'] = ("%1.3f" , "Read noise in e-") header['NUMFRM'] = (Nframe, 'Typical number of frames in stack') header['BUNIT'] = 'ELECTRONS/SECOND' IO.writefits(np.float32(electrons/itimes), maskname, "eps_%s" % (outname), options, header=header, overwrite=True) # Update itimes after division in order to not introduce nans itimes[data.mask] = 0.0 header['BUNIT'] = 'ELECTRONS^2' IO.writefits(var, maskname, "var_%s" % (outname), options, header=header, overwrite=True, lossy_compress=True) header['BUNIT'] = 'SECOND' IO.writefits(np.float32(itimes), maskname, "itimes_%s" % (outname), options, header=header, overwrite=True, lossy_compress=True) return header, electrons, var, bs, itimes, Nframe