def getCommonModeFromHist(im,gainAv=30,searchRadiusFrac=.4,debug=False): im = im.ravel() bins = np.arange(-2*gainAv,3*gainAv) hst,dum = np.histogram(im.ravel(),bins) bins = bins[:-1] rad = np.round(gainAv*searchRadiusFrac) idx = filtvec(bins,[-rad,rad]) pk = bins[idx][hst[idx].argmax()] idx = filtvec(bins,[pk-rad,pk+rad]) bins = bins[idx] hst = hst[idx] bg = np.sum(bins*hst)/np.sum(hst) if debug: nfigure('debug common mode hist correction') plt.clf() plt.plot(bins,hst) plt.waitforbuttonpress() return bg
def getCorrectionFunc(order=5,Imat=None,p=None,pc=None,fraclims_dc=[.9,1.1],wrapit=True): """ Getting nonlinear correction factors form a calibration dataset consiting of: i0 array of intensity/parameter values the calibration has been made for Imat 2D array of the corresponding reference patterns, in each row there is one ravelled array of each intensity bin in i0. i0_wp a working point around which a correction polynomial will be developed for each pixel. order the polynomial order up to which will be deveoped. fraclims_dc relative factor for the i0,Imat data limits which are used to determine the working point location. Returns corrFunc(i,D), a function that takes a flat array of intensity/ parameter values as well as a Matrix D of flattened patterns the correction is to be applied on (rows in D are again corresponding to each intensity in i). """ if pc is None: pc = np.mean(p) msk = tools.filtvec(p,pc*np.asarray(fraclims_dc)) p0 = tools.polyFit(p[msk],Imat[msk,...],2) dc = tools.polyVal(p0,pc) comps = tools.polyFit(p-pc,Imat-dc,order,removeOrders=[0]) compsder = tools.polyDer(comps) c = lambda(i): tools.polyVal(comps,i-np.asarray(tools.iterfy(pc)))+dc c_prime = lambda(i): tools.polyVal(compsder,i-np.asarray(tools.iterfy(pc))) t = lambda(i): (c_prime(pc).T * (i-pc)).T + dc cprimeic = c_prime(pc) dcorr_const = -cprimeic*pc + c(pc) - t(0) def corrFunc(D,i): i = i.ravel() return cprimeic * ( i + ((D-c(i))/c_prime(i)).swapaxes(0,-1) ).swapaxes(0,-1) if wrapit: def corrFuncTransposed(D,i=None,normalize=False,fillValue=np.nan): if i is None: i = np.apply_over_axes(np.nansum,D,range(np.ndim(D)-1)).ravel() cr = corrFunc(D.swapaxes(0,-1),i).swapaxes(0,-1) if normalize: cr/=i cr[:,~np.logical_and(i>np.min(i0),i<np.max(i0))] *= fillValue return cr #else: #return corrFunc(D.swapaxes(0,-1),i).swapaxes(0,-1) corrFuncWrapped = wrapFunc(corrFuncTransposed,transposeStack=True) def corrFuncWrap(D,i=None,normalize=False,fillValue=np.nan): if i is not None: Df = D*i.filter([np.min(i0),np.max(i0)]).ones() else: Df = D return corrFuncWrapped(Df,i=i,normalize=normalize,fillValue=fillValue) return corrFuncWrap else: return corrFunc
def getNoiseMap(Istack,lims_perc=None,lims=None): noise = noiseMap(Istack) #np.shape(noise) if lims_perc is not None: lims = np.percentile(noise,lims_perc) tools.nfigure('Selected noise limits') pl.clf() tools.histSmart(noise.ravel()[~np.isnan(noise.ravel())],fac=200) pp = plt.axhspan(*lims) plt.gca().add_patch(pp) plt.draw() if lims==None: tools.nfigure('Find noise limits') pl.clf() tools.histSmart(noise.ravel()[~np.isnan(noise.ravel())],fac=200) #pl.gca().set_xscale('log') pl.draw() print "Select noise limits" lims = tools.getSpanCoordinates() return ~tools.filtvec(noise,lims),noise
def getCorr(order=5,i0=None,Imat=None,i0_wp=1e6,fraclims_dc=[.9,1.1]): """ Getting nonlinear correction factors form a calibration dataset consiting of: i0 array of intensities the calibration has been made for Imat 2D array of the corresponding reference patterns, in each row there is one ravelled array of each intensity bin in i0. i0_wp a working point around which a correction polynomial will be developed for each pixel. order the polynomial order up to which will be deveoped. fraclims_dc relative factor for the i0,Imat data limits which are used to determine the working point location. Returns """ #i0,Imat = getData() msk = tools.filtvec(i0,i0_wp*np.asarray(fraclims_dc)) p0 = tools.polyFit(i0[msk],Imat[msk,:],2) dc = tools.polyVal(p0,i0_wp) comps = tools.polyFit(i0-i0_wp,Imat-dc,order,removeOrders=[0]) compsder = tools.polyDer(comps) c = lambda(i): tools.polyVal(comps,i-np.asarray(tools.iterfy(i0_wp)))+dc c_prime = lambda(i): tools.polyVal(compsder,i-np.asarray(tools.iterfy(i0_wp))) t = lambda(i): (c_prime(i0_wp).T * (i-i0_wp)).T + dc cprimeic = c_prime(i0_wp) dcorr_const = -cprimeic*i0_wp + c(i0_wp) - t(0) def dcorr(i,D): return (i*cprimeic.T + dcorr_const.T + ((D-c(i))*cprimeic/c_prime(i)).T).T #return (i*cprimeic.T + dcorr_const.T ).T return dcorr,comps,t tools.nfigure('testplot') plt.clf() plt.subplot(1,2,1) Imean = (Imat.T/i0).T tools.imagesc(np.asarray([ti / np.mean(Imean[-10:,:],0) for ti in Imean])) tools.clim_std(6) cl = plt.gci().get_clim() plt.colorbar() plt.set_cmap(plt.cm.RdBu_r) plt.subplot(1,2,2) cmps = copy.copy(comps) cmps[-2,:] = 0 cc = lambda(i): tools.polyVal(cmps,i-np.asarray(tools.iterfy(i0_wp))) Ir = Imat-c(i0)+t(i0)-t(0) Ir = dcorr(i0,Imat) #Ir = ((Imat-cc(i0)).T/i0).T #tools.imagesc(Ir) Ir = (Ir.T/i0).T tools.imagesc(np.asarray([ti / np.mean(Ir[-10:,:],0) for ti in Ir])) plt.clim(cl) plt.colorbar() plt.set_cmap(plt.cm.RdBu_r) plt.draw() tools.nfigure('testplot_components') plt.clf() ah = None for n,comp in enumerate(comps): if ah is None: ah = plt.subplot(len(comps),1,n+1) else: plt.subplot(len(comps),1,n+1,sharex=ah) plt.plot(comp) lims = np.percentile(comp,[1,99]) plt.ylim(lims) return c,c_prime