def __init__(self, calib_dir, offline=False, denom=1023): self.denom = denom try: wgt = load_weights(calib_dir) self.height, self.width = wgt.shape self.weights = (wgt.astype("f4") * self.denom).astype("i4") except IOError: self.width, self.height = load_res(calib_dir) self.weights = np.full((self.height, self.width), self.denom, dtype='i4') print('Could not load lens-shading map') try: self.hot = load_hot(calib_dir, offline=offline) hot_x = self.hot % self.width hot_y = self.hot // self.width self.weights[hot_y, hot_x] = 0 except IOError: print('Could not load hotcells') try: _, self.blk_lvl, _ = load_electrons(calib_dir) except IOError: self.blk_lvl = 0 print('Could not load black level')
def compute(data): sum_ = data['sum'] num_ = data['num'] ssq_ = data['ssq'] snd = data['second'] data.close() mean = sum_ / num_ var = ((ssq_ / num_) - ((sum_ / num_)**2)) * ((num_ / (num_ - 1))) total_res = mean.size indices = np.arange(total_res) if not args.raw: try: wgt = load_weights(args.calib).flatten() mean *= wgt var *= wgt**2 snd *= wgt except IOError: print('lens.npz not found') try: cut = np.logical_not(load_dark(args.calib)) mean = mean[cut] var = var[cut] snd = snd[cut] indices = indices[cut] except IOError: print('dark.npz not found') snd_thresh = args.snd_thresh mean_thresh = args.mean_thresh var_thresh = args.var_thresh #TODO if False: # compute heuristically print('computing threshold and finding hotcells') temp_mean_sm = np.mean(snd_max) std_snd_max = np.std(snd_max) # sort snd_max and find largest values in second max temp_large_sm = snd_max[snd_max > temp_mean_sm] # compute thresh, make cuts, obtain hotcells large_sm_mean = np.mean(temp_large_sm) large_sm_std = np.std(temp_large_sm) # extra buffer for large standard deviations thresh = large_sm_mean + large_sm_std snd_cut = (snd <= snd_thresh) mean_cut = (mean <= mean_thresh) var_cut = (var <= var_thresh) keep = snd_cut & mean_cut & var_cut comp_mean = mean[keep] comp_var = var[keep] comp_snd = snd[keep] hotcells = indices[np.logical_not(keep)] # calculate new means and variances to plot print('done.\ngetting data:') print('\n mean/variance array information: \n') print('mean size: ', mean.size) print('variance size: ', var.size) print('cut mean size: ', comp_mean.size) print('cut variance size: ', comp_var.size) print('\n computation information \n') print('total resolution: ', total_res) print('threshold: %.3f' % args.snd_thresh) print('number of hotcells: ', hotcells.size) print('%% of hotcells found: %.3f' % (100.0 * (hotcells.size / total_res))) if args.commit: export(hotcells, os.path.join(args.calib, 'hot_online.npz')) if args.plot: plot(mean, var, snd, comp_mean, comp_var, comp_snd, snd_thresh)
args = parser.parse_args() # get range from weights gmin = 0 blk_lvl = 0 try: gmin, blk_lvl, _ = load_electrons(args.calib) blk_lvl = int(blk_lvl) except FileNotFoundError: print('Could not find gain data. Run pixelstats/electrons.py first') try: # find first-order correction for varying saturation level # from weighting, i.e. fraction of the sensor with saturation # below each calibrated value wgt = load_weights(args.calib) print(wgt.shape) npix = (wgt.shape[1] - 2 * args.xborder) * wgt.shape[0] if args.xborder else wgt.size hist, _ = np.histogram(wgt * (1023 - blk_lvl) + 1, bins=np.arange(1025 - blk_lvl)) cumsum = np.cumsum(hist)[::args.bin_sz] sat_frac = cumsum[:-1] / cumsum[-1] except FileNotFoundError: print('Weights not found. Using equal weights.') sat_frac = np.zeros((1023 - blk_lvl) // args.bin_sz) width, height = load_res(args.calib) npix = (width - 2 * args.xborder) * height bins = np.arange(args.bin_sz, 1024 - blk_lvl, args.bin_sz)
def process(filename, args): # load data, from either raw file directly from phone, or as output from combine.py utility: if args.raw: version,header,sum,ssq,max,second = unpack_all(filename) index = get_pixel_indices(header) images = interpret_header(header, "images") num = np.full(index.size, images) width = interpret_header(header, "width") height = interpret_header(header, "height") else: # load the image geometry from the calibrations: width, height = load_res(args.calib) try: npz = np.load(filename) except: print("could not process file ", filename, " as .npz file. Use --raw option?") return sum = npz['sum'] ssq = npz['ssq'] num = npz['num'] cmean = sum / num cvari = (ssq / num - cmean**2) * num / (num-1) # apply gains if appropriate if args.gain: try: wgt = load_weights(args.calib).flatten() except IOError: print("weights not found.") return cmean = cmean * wgt cvari = cvari * wgt**2 # select pixels to plot index = np.arange(sum.size) xpos = index % width ypos = index // width rpos = np.sqrt((xpos - xpos.mean())**2 + (ypos - ypos.mean())**2) keep = np.ones(width*height, dtype=bool) if args.no_dark or args.all_dark: try: dark = load_dark(args.calib) except IOError: print("dark pixel file does not exist.") return if args.no_dark: keep &= np.logical_not(dark) if args.all_dark: keep &= dark if args.hot: max_mean = args.hot[0] max_vari = args.hot[1] print("saving hot pixel list from mean > ", max_mean, " or var > ", max_vari) hot = ((cmean > max_mean) + (cvari > max_vari)) hot_list = index[hot] hotfile = os.path.join(args.calib, "hot_online.npz") print("saving ", hot_list.size, "hot pixels to file ", hotfile) print("total pixels in device: ", width * height) frac = hot_list.size / (width*height) print("faction of hot pixels: ", frac) np.savez(hotfile, hot_list=hot_list) keep &= (hot == False) # first show spatial distribution plt.figure(1, figsize=(6,8)) if args.by_radius: plt.subplot(211) plt.hist2d(rpos, cmean,norm=LogNorm(),bins=[500,500],range=[[0,rpos.max()],[0,args.max_mean]], cmap='seismic') plt.xlabel('radius') plt.ylabel('mean') plt.subplot(212) plt.hist2d(rpos, cvari,norm=LogNorm(),bins=[500,500],range=[[0,rpos.max()],[0,args.max_var]], cmap='seismic') plt.xlabel('radius') plt.ylabel('variance') else: plt.subplot(211) plt.imshow(cmean.reshape(height, width), cmap='seismic', vmax=args.max_mean) plt.colorbar() plt.subplot(212) plt.imshow(cvari.reshape(height, width), #norm=LogNorm(), cmap='seismic', vmax=args.max_var) plt.colorbar() # now do 2D histogram(s) for mean and variance plt.figure(2, figsize=(10,8)) if args.by_filter: # 4 subplots for i in range(4): ix = i % 2 iy = i // 2 pos = keep * ((xpos%2)==ix) * ((ypos%2)==iy) plt.subplot(2,2,i+1) plt.hist2d(cmean[pos],cvari[pos],norm=LogNorm(), bins=(500,500), range=((0,args.max_mean),(0, args.max_var))) plt.xlabel("mean") plt.ylabel("variance") else: plt.hist2d(cmean,cvari,norm=LogNorm(),bins=[500,500],range=[[0,args.max_mean],[0,args.max_var]]) plt.xlabel("mean") plt.ylabel("variance") if args.save_plot: plot_name = "plots/mean_var_calib.pdf" if args.calib \ else "plots/mean_var.pdf" print("saving plot to file: ", plot_name) plt.savefig(plot_name) plt.show() return h,bins = np.histogram(np.clip(cmean,0,10), bins=100, range=(0,10)) err = h**0.5 cbins = 0.5*(bins[:-1] + bins[1:]) plt.errorbar(cbins,h,yerr=err,color="black",fmt="o") plt.ylim(1.0,1E7) plt.ylabel("pixels") plt.xlabel("mean") plt.yscale('log') plt.savefig("hmean.pdf") plt.show() h,bins = np.histogram(np.clip(cvari,0,100), bins=100, range=(0,100)) err = h**0.5 cbins = 0.5*(bins[:-1] + bins[1:]) plt.errorbar(cbins,h,yerr=err,color="black",fmt="o") plt.ylim(1.0,1E7) plt.ylabel("pixels") plt.xlabel("variance") plt.yscale('log') plt.savefig("hvari.pdf") plt.show()