def __init__(self, filenames, stackedbias, stackedbias_err, stackedbias_mask, hdu_num): self.filenames = filenames self.nfiles = len(filenames) self.stackedbias = fits.open(stackedbias) self.stackedbias_err = fits.open(stackedbias_err) self.stackedbias_mask = fits.open(stackedbias_mask) self.hdu_num = hdu_num self.images = [] for filename in filenames: img = RawImage(filename, hdu_num) img.subtract_overscan() self.images.append(img) nx, ny = self.images[0].data.shape cube = np.zeros((self.nfiles, nx, ny)) cube_weights = np.zeros_like(cube) for j in range(self.nfiles): cube[j, :, :] = self.images[j].data - self.stackedbias[1].data.T cube_err = np.sqrt(self.images[j].data_err**2 + self.stackedbias_err[1].data.T**2) med = np.median(cube[j, :, :]) cube[j, :, :] /= med # If we're normalizing the counts, we should do the same for # the errors? cube_err /= med cube_weights[j, :, :] = cube_err**-2.0 cubemedian = np.median(cube, axis=0) # We took the median, not the weighted mean, for the counts, but # we'll approximate the error using the weights. # See http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Dealing_with_variance. sum_weights = np.sum(cube_weights, axis=0) cubestd = np.sqrt(sum_weights**-1.0) med = np.median(cubemedian) cubemedian /= med # Again, normalize the errors whenever we normalize the counts. cubestd /= med self.data = cubemedian self.data_err = cubestd # Use the superbias and superflat errors to create a mask, where # 0 = good pixel, 1 = bad bias pixel, and 2 = bad flat pixel. pixmask = self.stackedbias_mask[1].data.T flat_err = cubestd cflat_err = flat_err - np.median(flat_err) ns = np.percentile(cflat_err, 15.87) ps = np.percentile(cflat_err, 84.13) sigma_err = max(np.abs(ns), np.abs(ps)) bad_pix = ((flat_err == 0.0) | \ (np.abs(flat_err - np.median(flat_err)) > 5.0*sigma_err)) & \ (pixmask == 0) pixmask[bad_pix] = 2 self.data_mask = pixmask
def __init__(self, filenames, hdu_num): self.filenames = filenames self.nfiles = len(filenames) self.hdu_num = hdu_num self.images = [] for filename in filenames: img = RawImage(filename, hdu_num) img.subtract_overscan() self.images.append(img) nx, ny = self.images[0].data.shape cube = np.zeros((self.nfiles, nx, ny)) cube_weights = np.zeros_like(cube) for j in range(self.nfiles): cube[j, :, :] = self.images[j].data cube_weights[j, :, :] = self.images[j].data_err ** -2.0 cubeclipped = stats.sigma_clip(cube, sig=3.0, cenfunc=np.mean, axis=0) # Compute the weighted mean for each pixel in the stack, using # only those weights for counts that weren't clipped. cubemean = np.ma.average(cubeclipped, axis=0, weights=cube_weights) # In the very unlikely (impossible?) event that all pixels in the # stack were masked, assume zero counts. cubemean = np.ma.filled(cubemean, 0.0) self.data = cubemean # Use pairwise differences of overscan-subtracted biases to estimate # the error. rmses = [] for i in range(0, self.nfiles - 1, 2): diff = ( self.images[i + 1].data[self.images[i + 1].py_datasec] - self.images[i].data[self.images[i].py_datasec] ) mn = np.mean(diff) rms = np.sqrt(np.mean((diff - mn) ** 2.0)) rmses.append(rms) mean_rms = np.mean(rmses) self.data_err = np.full_like(self.data, mean_rms) print "hdu %d, bias diff rms %f, scatter %f" % ( hdu_num, mean_rms, np.sqrt(np.mean((np.array(rmses) - mean_rms) ** 2.0)), ) # Mask those pixels whose rms in the stack of unclipped pixels is # greater than five times the rms measured from pairwise differences. rms_arr = np.ma.average((cubeclipped - cubemean) ** 2.0, axis=0, weights=cube_weights) rms_arr = np.sqrt(np.ma.filled(rms_arr, 0.0)) pixmask = np.zeros_like(cubemean, dtype=int) bad_pix = ((rms_arr == 0.0) | (rms_arr > 5.0 * mean_rms)) & (pixmask == 0) pixmask[bad_pix] = 1 self.data_mask = pixmask
filenames = glob.glob(os.path.join(args.stacked_bias, "d*.fits")) if len(filenames) > 100: np.random.shuffle(filenames) filenames = filenames[0:100] sb = StackedBias(filenames, hdu_num) sb.save() elif args.stacked_image: filenames = glob.glob(os.path.join(args.stacked_image, "d*.fits")) np.random.shuffle(filenames) si = StackedFlat(filenames[0:100], "/scratch2/scratchdirs/nugent/ryan_counts/mar_biases/stackedbias.%02d.fits" % hdu_num, "/scratch2/scratchdirs/nugent/ryan_counts/mar_biases/stackedbias.err.%02d.fits" % hdu_num, hdu_num) si.save() # At some point will want to rename the below data products to use %02d. elif args.processed_image: filenames = sorted(glob.glob("/scratch2/scratchdirs/nugent/ryan_counts/20150313/d*.fits")) sb = "/scratch2/scratchdirs/nugent/ryan_counts/mar_biases/stackedbias.%02d.fits" % hdu_num sb_err = "/scratch2/scratchdirs/nugent/ryan_counts/mar_biases/stackedbias.err.%02d.fits" % hdu_num sf = "/scratch2/scratchdirs/nugent/ryan_counts/mar_1000/stackedflat.%02d.fits" % hdu_num sf_err = "/scratch2/scratchdirs/nugent/ryan_counts/mar_1000/stackedflat.err.%02d.fits" % hdu_num sf_mask = "/scratch2/scratchdirs/nugent/ryan_counts/mar_1000/stackedflat.mask.%02d.fits" % hdu_num for filename in filenames: if ".p." in filename: continue ri = RawImage(filename, hdu_num) ri.subtract_overscan() ri.subtract_stackedbias(sb, sb_err) ri.divide_stackedflat(sf, sf_err) ri.update_mask(sf_mask) ri.save()