def fit_traces(x, yy, deg=5, domain=None): """ returns TraceSet object modeling y[i] vs. x Args: x : 1D array y : 2D array[nspec, nx] deg : optional Legendre degree """ nspec, nx = yy.shape assert len(x) == nx, "y.shape[1] ({}) != len(x) ({})".format(nx, len(x)) assert np.all(np.diff(x) > 0), "x not monotonically increasing" if domain is None: xmin, xmax = x[0], x[-1] else: xmin, xmax = domain c = np.zeros((nspec, deg + 1)) #implement oleksandr-pavlyk's fix from the memory branch xx = x - xmin xx *= 2.0 / (xmax - xmin) xx -= 1.0 for i in range(nspec): #xx = 2.0 * (x-xmin) / (xmax-xmin) - 1.0 c[i] = legfit(xx, yy[i], deg) return TraceSet(c, [xmin, xmax])
def invert(self, domain=None, coeff=None, deg=None): """ Utility to return a traceset modeling x vs. y instead of y vs. x """ if domain is None: domain = [self.wmin, self.wmax] ispec = np.arange(self.nspec) # Doing for all spectra if coeff is None: coeff = self.ycoeff # doing y-wavelength map ytmp = list() for ii in ispec: fit_dict = dufits.mk_fit_dict(coeff[ii, :], coeff.shape[1], 'legendre', domain[0], domain[1]) xtmp = np.array((domain[0], domain[1])) yfit = dufits.func_val(xtmp, fit_dict) ytmp.append(yfit) ymin = np.min(ytmp) ymax = np.max(ytmp) x = np.linspace(domain[0], domain[1], 1000) if deg is None: deg = self.ncoeff + 2 #- Now get the coefficients for inverse mapping c = np.zeros((coeff.shape[0], deg + 1)) for ii in ispec: fit_dict = dufits.mk_fit_dict(coeff[ii, :], coeff.shape, 'legendre', domain[0], domain[1]) y = dufits.func_val(x, fit_dict) yy = 2.0 * (y - ymin) / (ymax - ymin) - 1.0 c[ii] = legfit(yy, x, deg) return c, ymin, ymax
def fit_traces(x, yy, deg=5, domain=None): """ returns TraceSet object modeling y[i] vs. x Args: x : 1D array y : 2D array[nspec, nx] deg : optional Legendre degree """ nspec, nx = yy.shape assert len(x) == nx, "y.shape[1] ({}) != len(x) ({})".format(nx, len(x)) assert np.all(np.diff(x) > 0), "x not monotonically increasing" if domain is None: xmin, xmax = x[0], x[-1] else: xmin, xmax = domain c = np.zeros((nspec, deg+1)) #implement oleksandr-pavlyk's fix from the memory branch xx = x - xmin xx *= 2.0/(xmax-xmin) xx -= 1.0 for i in range(nspec): #xx = 2.0 * (x-xmin) / (xmax-xmin) - 1.0 c[i] = legfit(xx, yy[i], deg) return TraceSet(c, [xmin, xmax])
def baselineSpectrum(spectrum,order=1,baselineIndex=()): x=np.arange(len(spectrum)) coeffs = legendre.legfit(x[baselineIndex],spectrum[baselineIndex],order) # pdb.set_trace() spectrum -= legendre.legval(x,coeffs) return(spectrum)
def invert(self, domain=None, coeff=None, deg=None): """ Utility to return a traceset modeling x vs. y instead of y vs. x """ if domain is None: domain=[self.wmin,self.wmax] ispec=np.arange(self.nspec) # Doing for all spectra if coeff is None: coeff=self.ycoeff # doing y-wavelength map ytmp=list() for ii in ispec: fit_dict=dufits.mk_fit_dict(coeff[ii,:],coeff.shape[1],'legendre',domain[0],domain[1]) xtmp=np.array((domain[0],domain[1])) yfit = dufits.func_val(xtmp, fit_dict) ytmp.append(yfit) ymin = np.min(ytmp) ymax = np.max(ytmp) x = np.linspace(domain[0], domain[1], 1000) if deg is None: deg = self.ncoeff+2 #- Now get the coefficients for inverse mapping c = np.zeros((coeff.shape[0], deg+1)) for ii in ispec: fit_dict=dufits.mk_fit_dict(coeff[ii,:],coeff.shape,'legendre',domain[0],domain[1]) y = dufits.func_val(x,fit_dict) yy = 2.0 * (y-ymin) / (ymax-ymin) - 1.0 c[ii] = legfit(yy, x, deg) return c,ymin,ymax
def cheb_fitcurve(x, y, order): x = cheb.chebpts2(len(x)) order = 64 coef = legend.legfit(x, y, order) assert_equal(len(coef), order + 1) y1 = legend.legval(x, coef) err_1 = np.linalg.norm(y1 - y) / np.linalg.norm(y) coef = cheb.chebfit(x, y, order) assert_equal(len(coef), order + 1) thrsh = abs(coef[0] / 1000) for i in range(len(coef)): if abs(coef[i]) < thrsh: coef = coef[0:i + 1] break y2 = cheb.chebval(x, coef) err_2 = np.linalg.norm(y2 - y) / np.linalg.norm(y) plt.plot(x, y2, '.') plt.plot(x, y, '-') plt.title("nPt={} order={} err_cheby={:.6g} err_legend={:.6g}".format( len(x), order, err_2, err_1)) plt.show() assert_almost_equal(cheb.chebval(x, coef), y) # return coef
def fit_poly(w, r, type, order): if type == "legendre": coef = legfit(w, r, order) return coef elif type == "chebyshev": coef = chebfit(w, r, order) return coef
def invert_legendre_polynomial(wavemin, wavemax, ycoef, xcoef, sigmacoef, fiber, npix_y, wave_of_y, width=7): # Wavelength array used in 'invert_legendre_polynomial' wave = np.linspace(wavemin, wavemax, 100) # Determines value of Y, so we can know its coeficient and then its position y_of_wave = legval(u(wave, wavemin, wavemax), ycoef[fiber]) coef = legfit(u(y_of_wave, 0, npix_y), wave, deg=ycoef[fiber].size) wave_of_y[fiber] = legval(u(np.arange(npix_y).astype(float), 0, npix_y), coef) # Determines wavelength intensity (x) based on Y x_of_y = legval(u(wave_of_y[fiber], wavemin, wavemax), xcoef[fiber]) sigma_of_y = legval(u(wave_of_y[fiber], wavemin, wavemax), sigmacoef[fiber]) # Ascertain X by using low and high uncertainty x1_of_y = np.floor(x_of_y).astype(int) - width / 2 x2_of_y = np.floor(x_of_y).astype(int) + width / 2 + 2 return (x1_of_y, x_of_y, x2_of_y, sigma_of_y)
def test_legfit(self): def f(x): return x * (x - 1) * (x - 2) # Test exceptions assert_raises(ValueError, leg.legfit, [1], [1], -1) assert_raises(TypeError, leg.legfit, [[1]], [1], 0) assert_raises(TypeError, leg.legfit, [], [1], 0) assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = leg.legfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(leg.legval(x, coef3), y) # coef4 = leg.legfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) # coef2d = leg.legfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = leg.legfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
def test_legfit(self) : def f(x) : return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, leg.legfit, [1], [1], -1) assert_raises(TypeError, leg.legfit, [[1]], [1], 0) assert_raises(TypeError, leg.legfit, [], [1], 0) assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1,1]) # Test fit x = np.linspace(0,2) y = f(x) # coef3 = leg.legfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(leg.legval(x, coef3), y) # coef4 = leg.legfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) # coef2d = leg.legfit(x, np.array([y,y]).T, 3) assert_almost_equal(coef2d, np.array([coef3,coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = leg.legfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = leg.legfit(x, np.array([yw,yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
def invert_legendre_polynomial(wavemin, wavemax, ycoef, xcoef, fiber, npix_y, wave_of_y) : # Wavelength array used in 'invert_legendre_polynomial' wave = np.linspace(wavemin, wavemax, 100) # Determines value of Y, so we can know its coeficient and then its position y_of_wave = legval(u(wave, wavemin, wavemax), ycoef[fiber]) coef = legfit(u(y_of_wave, 0, npix_y), wave, deg=ycoef[fiber].size) wave_of_y[fiber] = legval(u(np.arange(npix_y).astype(float), 0, npix_y), coef) # Determines wavelength intensity (x) based on Y x_of_y = legval(u(wave_of_y[fiber], wavemin, wavemax), xcoef[fiber]) # Ascertain X by using low and high uncertainty x1_of_y = np.floor(x_of_y).astype(int) - 3 x2_of_y = np.floor(x_of_y).astype(int) + 4 return (x1_of_y, x2_of_y)
def recompute_legendre_coefficients(xcoef, ycoef, wavemin, wavemax, degxx, degxy, degyx, degyy, dx_coeff, dy_coeff): """ Modifies legendre coefficients of an input trace set using polynomial coefficents (as defined by the routine monomials) Args: xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD wavemin : float wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1 used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber]) degxx : int, degree of polynomial for x shifts as a function of x (x is axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction) degxy : int, degree of polynomial for x shifts as a function of y (y is axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis) degyx : int, degree of polynomial for y shifts as a function of x degyy : int, degree of polynomial for y shifts as a function of y dx_coeff : 1D np.array of polynomial coefficients of size (degxx*degxy) as defined by the routine monomials. dy_coeff : 1D np.array of polynomial coefficients of size (degyx*degyy) as defined by the routine monomials. Returns: xcoef : 2D np.array of shape (nfibers,ncoef) with modified Legendre coefficents ycoef : 2D np.array of shape (nfibers,ncoef) with modified Legendre coefficents """ wave = np.linspace(wavemin, wavemax, 100) nfibers = xcoef.shape[0] rw = legx(wave, wavemin, wavemax) for fiber in range(nfibers): x = legval(rw, xcoef[fiber]) y = legval(rw, ycoef[fiber]) m = monomials(x, y, degxx, degxy) dx = m.T.dot(dx_coeff) xcoef[fiber] = legfit(rw, x + dx, deg=xcoef.shape[1] - 1) m = monomials(x, y, degyx, degyy) dy = m.T.dot(dy_coeff) ycoef[fiber] = legfit(rw, y + dy, deg=ycoef.shape[1] - 1) return xcoef, ycoef
def recompute_legendre_coefficients(xcoef,ycoef,wavemin,wavemax,degxx,degxy,degyx,degyy,dx_coeff,dy_coeff) : """ Modifies legendre coefficients of an input trace set using polynomial coefficents (as defined by the routine monomials) Args: xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD wavemin : float wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1 used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber]) degxx : int, degree of polynomial for x shifts as a function of x (x is axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction) degxy : int, degree of polynomial for x shifts as a function of y (y is axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis) degyx : int, degree of polynomial for y shifts as a function of x degyy : int, degree of polynomial for y shifts as a function of y dx_coeff : 1D np.array of polynomial coefficients of size (degxx*degxy) as defined by the routine monomials. dy_coeff : 1D np.array of polynomial coefficients of size (degyx*degyy) as defined by the routine monomials. Returns: xcoef : 2D np.array of shape (nfibers,ncoef) with modified Legendre coefficents ycoef : 2D np.array of shape (nfibers,ncoef) with modified Legendre coefficents """ wave=np.linspace(wavemin,wavemax,100) nfibers=xcoef.shape[0] rw=legx(wave,wavemin,wavemax) for fiber in range(nfibers) : x = legval(rw,xcoef[fiber]) y = legval(rw,ycoef[fiber]) m=monomials(x,y,degxx,degxy) dx=m.T.dot(dx_coeff) xcoef[fiber]=legfit(rw,x+dx,deg=xcoef.shape[1]-1) m=monomials(x,y,degyx,degyy) dy=m.T.dot(dy_coeff) ycoef[fiber]=legfit(rw,y+dy,deg=ycoef.shape[1]-1) return xcoef,ycoef
def legForwardTransform(orders, locations, functionVals): if len(locations.shape)==1: return np.array(leg.legfit(locations, functionVals, orders[0])) else: if locations.shape[1]==2: V=leg.legvander2d(locations[:,0], locations[:,1], orders) elif locations.shape[1]==3: V=leg.legvander3d(locations[:,0],locations[:,1],locations[:,2], orders) elif locations.shape[1]==4: V=legvander4d(locations,orders) elif locations.shape[1]==5: V=legvander5d(locations,orders) else: raise NotImplementedError # there's a bad startup joke about this being good enough for the paper. ret, _, _, _=npl.lstsq(V, functionVals, rcond=None) return np.reshape(ret, (np.array(orders)+1).flatten())
def Legendre_polynomial_basis(c, potential, domain, N, wave_func): x = np.linspace(-domain / 2, domain / 2, N) #represent out wave function in the legendre polynomial basis wave_legen = legen.legfit(x, wave_func, N) #calculate H |bj>, where H = -c Lap + V #calculate -c Lap |bj> Hbj_first = -1 * c * legen.legder(wave_legen, 2) #calculate V|bj>, here, V is a constant Hbj_secod = potential * wave_legen Hbj = Hbj_first + Hbj_secod[0:N - 1] return Hbj
def invert(self, domain=None, deg=None): """ Return a traceset modeling x vs. y instead of y vs. x """ ytmp = self.eval(None, (self._xmin, self._xmax)) ymin = np.min(ytmp) ymax = np.max(ytmp) x = np.linspace(self._xmin, self._xmax, 1000) if deg is None: deg = self._coeff.shape[1]+2 c = np.zeros((self.ntrace, deg+1)) for i in range(self.ntrace): y = self.eval(i, x) yy = 2.0 * (y-ymin) / (ymax-ymin) - 1.0 c[i] = legfit(yy, x, deg) return TraceSet(c, domain=(ymin, ymax))
def solve_task(x_coords, y_coords, pols): # x_coords = range(20) # y_coords = np.random.normal(10, 3, 20) deg = len(x_coords) - 1 # pols = pols[:deg] # pols = get_polynoms(deg) # A = calculate_A(x_coords, pols) # B = y_coords # # coefs = np.linalg.solve(A, B) # coefs, info = la.cgs(A, B, tol=1e-10) # coefs = legndr.legfit(x_coords,y_coords,deg-1) # print('coefs = ', len(coefs)) # print(info) coefs = legndr.legfit(x_coords, y_coords, deg, rcond=1e-15) vizualize(x_coords, y_coords, pols, coefs)
def on_interpolate(self): with open('/home/danila/PycharmProjects/LegendreInterpolation/input.txt', 'r') as f_input: points = np.loadtxt(f_input) points = points[np.argsort(points[:, 0])] points = points.T x_left, x_right = points[0, 0], points[0, -1] points[0] = (points[0] - (x_right + x_left) / 2) * 2 / (x_right - x_left) deg = points.shape[1] # print(points) coefs = legndr.legfit(points[0], points[1], deg - 1, rcond=1e-15) roots, _ = legndr.leggauss(deg) self.q_interpol_roots.setText(str(list(roots))) self.q_series_coeffs.setText(str(list(enumerate(coefs)))) self.q_interpol_deg.setText(str(deg)) self.vizualize(x_left, x_right, points, coefs)
def invert(self, domain=None, deg=None): """ Return a traceset modeling x vs. y instead of y vs. x """ ytmp = self.eval(None, np.array([self._xmin, self._xmax])) ymin = np.min(ytmp) ymax = np.max(ytmp) x = np.linspace(self._xmin, self._xmax, 1000) if deg is None: deg = self._coeff.shape[1]+2 c = np.zeros((self.ntrace, deg+1)) for i in range(self.ntrace): y = self.eval(i, x) #yy = 2.0 * (y-ymin) / (ymax-ymin) - 1.0 #implement oleksandr-pavlyk's fix from the memory branch yy = (y-ymin) * (2.0 / (ymax-ymin)) - 1.0 c[i] = legfit(yy, x, deg) return TraceSet(c, domain=(ymin, ymax))
def invert(self, domain=None, deg=None): """ Return a traceset modeling x vs. y instead of y vs. x """ ytmp = self.eval(None, np.array([self._xmin, self._xmax])) ymin = np.min(ytmp) ymax = np.max(ytmp) x = np.linspace(self._xmin, self._xmax, 1000) if deg is None: deg = self._coeff.shape[1] + 2 c = np.zeros((self.ntrace, deg + 1)) for i in range(self.ntrace): y = self.eval(i, x) #yy = 2.0 * (y-ymin) / (ymax-ymin) - 1.0 #implement oleksandr-pavlyk's fix from the memory branch yy = (y - ymin) * (2.0 / (ymax - ymin)) - 1.0 c[i] = legfit(yy, x, deg) return TraceSet(c, domain=(ymin, ymax))
def interpolate_np(x_coords, y_coords, n, deg): """ :param y_coords: 1d array of y coordiantes :param x_coords: 1d array of x coordiantes :param n: int, the number of points in interpolate segment :param deg: int, the degree of the fitting polynomial deg < n <= len(points[0]) """ assert deg < n <= len(x_coords) x_interp, y_interp = list(), list() # for i in range(0, len(x_coords) - 1, n - 1 or n): # right_i = i + n if i + n < len(x_coords) else len(x_coords) # coefs = leg.legfit(x_coords[i:right_i], y_coords[i:right_i], deg) if deg != n - 1 else \ # leginterpol(x_coords[i:right_i], y_coords[i:right_i]) # print(coefs) # if n != 1: # temp_x_interp_points = np.arange(x_coords[i], # x_coords[right_i - 1], # (x_coords[right_i - 1] - x_coords[i]) / 10000) # else: # temp_x_interp_points = np.arange(x_coords[i] - (x_coords[i + 1] - x_coords[i]) / 2, # x_coords[i] + (x_coords[i + 1] - x_coords[i]) / 2, # (x_coords[i + 1] - x_coords[i]) / 10) # # print(i, right_i) # # x_interp.extend(temp_x_interp_points) # y_interp.extend(leg.legval(temp_x_interp_points, coefs)) coefs = leg.legfit(x_coords, y_coords, deg) # print('\n',max((leg.legval(x_coords, coefs) - y_coords))) # coefs = np.polyfit(x_coords, y_coords, deg) # print(max((np.polyval(x_coords, coefs) - y_coords))) # print(V) plt.plot(x_coords, y_coords, 'ro') plt.plot(x_coords, leg.legval(x_coords, coefs)) plt.show()
def fit_leg_poly(x, y, w, flag, POLY_ORDER): # Successive approximation # p00 = np.zeros(1) # Set initial guess to zero # for j in range(0, POLY_ORDER+1): # pa = fmin(chisq_leg, p00, args=(x, y, w, flag), \ # ftol=1.0e-5, xtol = 1.0e-3, maxiter=3e3, maxfun=3e3, disp=False) # p00 = np.concatenate((pa, [0.0])) #Generate initial guess for the next iteration # Replaced by direct computation if np.sum(flag)==0: yfit = np.zeros(len(flag)) yres = np.zeros(len(flag)) return yfit, yres ind = np.where(flag==1)[0] pa = leg.legfit(x[ind], y[ind], POLY_ORDER, w=1.0/w[ind]**2) yfit = leg.legval(x, pa) #Fit is computed on all channels, whether flagged or not yres = (y - yfit) return yfit, yres
def clipping(xdf1, Diff, x_var): # xdf1['Std'] = xdf1[Diff].rolling(rolls).std() # xdf1['Median'] = xdf1[Diff].rolling(rolls).median() # print(xdf1) xdf = xdf1.copy() ''' for i in range(0,iters): upper = xdf['Median'] + sigma_upper * xdf['Std'] lower = xdf['Median'] - sigma_lower * xdf['Std'] mask = xdf[Diff] < upper mask = xdf[Diff] > lower xdf = xdf[mask] xStd = xdf[Diff].rolling(rolls).std() xdf = xdf.assign(Std = xStd) xMedian = xdf[Diff].rolling(rolls).agg(regression) xdf = xdf.assign(Median = xMedian) ''' mask = sigma_clip(xdf[Diff], sigma=sig, iters=itr) xdf = xdf.assign(maskDiff=mask) coefs = nppl.legfit(xdf[x_var], xdf['maskDiff'], leg_num) fit = nppl.legval(xdf[x_var], coefs) return xdf, coefs, fit
def projection(self, a, b, f, component=0): r"""Given a function, returns the approximating polynomial in the interval [a,b] :math:`f(x) \approx p(x) = \sum_{n=0}^p \frac{<f,\phi^n>}{<\phi^n,\phi^n>} \phi^n` where :math:`<f,g> = \int_a^b f(x) g(x) \mathrm{d} x` and :math:`\phi^n` is the local Legendre polynomial We are basically projecting the function into the Legendre basis space We also know that :math:`\int_a^b f(x) g(x) dx \approx \sum_{k=0}^{p-1} w_k dx/2 f(\frac{b-a}{2} x_k + \frac{b+a}{2}) g(\frac{b-a}{2} x_k + \frac{b+a}{2})` :math:`\phi^n (\frac{b-a}{2} x_k + \frac{b+a}{2}) = L^n(x_k)` (where L is the Legendre polynomial on [-1,1]) component is the component of f we want to use (assume scalar function) """ # Evaluate the function at the local Gaussian nodes (one at a time) xgs = self.shifted_xgauss(a, b) fgauss = np.zeros(xgs.shape) for g, xg in enumerate(xgs): fgauss[g] = f(xg)[component] # There are two ways to do the projection. # 1) Manually # dx = b-a # coef = np.zeros(self.N_s) # for n in np.arange(self.N_s): # num = 0.5*dx*sum( self.w * fgauss * self.phi[:,n]) # den = 0.5*dx*sum( self.w * self.phi[:,n] * self.phi[:,n]) # coef[n] = num/den # 2) Using the built-in fit function to fit the data at the # Gaussian quadrature nodes. coef = leg.legfit(self.x, fgauss, self.p) return coef
if index ==0: init_norm = _dict['params'][0]/_dict['n_trues'] if Scale_to_true: scale = init_norm*_dict['n_trues'] else: scale = 1 cut = '${0}$'.format(cut) cut = cut.replace('E','\overline{E}') y_err = _dict['y_err'] *scale y_data = _dict['y_data'] *scale model = Model(Legendre) init_params = legfit(np.cos(3.1415*x_data/180), y_data, 2,w = 1.0/y_err) params = Parameters() params.add('p0', init_params[0], min=0, max=2) params.add('p1', init_params[1], min=-2, max=2) params.add('p2', init_params[2], min=-2, max=2) result = model.fit(y_data, params, x=x_data, weights=1.0/y_err) _m = re.findall(r'p[0-9]: .+ \+/- ([0-9]+\.[0-9]+)', result.fit_report()) par_errors = (list(map(float,_m))) params = init_params print('params',params) print('par errors', par_errors) x_fit, y_fit = get_fit(x_data,params)
def legfit(x, y, deg, rcond=None, full=False, w=None): from numpy.polynomial.legendre import legfit return legfit(x, y, deg, rcond, full, w)
def legfit(xs, ys, deg): coeffs = leg.legfit(xs, ys, deg) p = leg.Legendre(coeffs) return mkseries(xs, ys, p)
def fit_legendre(g_t, order=10): """ General fit of a noisy imaginary time Green's function to a low order Legendre expansion in imaginary time. Only Hermiticity is imposed on the fit, so discontinuities has to be fixed separately (see the method enforce_discontinuity) Author: Hugo U.R. Strand Parameters ---------- g_t : TRIQS imaginary time Green's function (matrix valued) Imaginary time Green's function to fit (possibly noisy binned data) order : int Maximal order of the fitted Legendre expansion Returns ------- g_l : TRIQS Legendre polynomial Green's function (matrix valued) Fitted Legendre Green's function with order `order` """ import numpy.polynomial.legendre as leg if isinstance(g_t, BlockGf): return map_block(lambda g_bl: fit_legendre(g_bl, order), g_t) assert isinstance(g_t, Gf) and isinstance(g_t.mesh, MeshImTime), "fit_legendre expects imaginary-time Green function objects" assert len(g_t.target_shape) == 2, "fit_legendre currently only implemented for matrix_valued Green functions" # -- flatten the data to 2D N_tau x (N_orb * N_orb) shape = g_t.data.shape fshape = [shape[0], np.prod(shape[1:])] # -- extend data accounting for hermiticity mesh = g_t.mesh tau = np.array([ t.value for t in mesh ]) # Rescale to the interval (-1,1) x = 2. * tau / mesh.beta - 1. data = g_t.data.reshape(fshape) data_herm = np.transpose(g_t.data, axes=(0, 2, 1)).conjugate().reshape(fshape) # -- Separated real valued linear system, with twice the number of RHS terms data_re = 0.5 * (data + data_herm).real data_im = 0.5 * (data + data_herm).imag data_ext = np.hstack((data_re, data_im)) c_l_ext = leg.legfit(x, data_ext, order - 1) c_l_re, c_l_im = np.split(c_l_ext, 2, axis=-1) c_l = c_l_re + 1.j * c_l_im # -- make Legendre Green's function of the fitted coeffs lmesh = MeshLegendre(mesh.beta, mesh.statistic, order) # Nb! We have to scale the actual Legendre coeffs to the Triqs "scaled" Legendre coeffs # see Boehnke et al. PRB (2011) l = np.arange(len(lmesh)) scale = np.sqrt(2.*l + 1) / mesh.beta scale = scale.reshape([len(lmesh)] + [1]*len(g_t.target_shape)) g_l = Gf(mesh=lmesh, target_shape=g_t.target_shape) g_l.data[:] = c_l.reshape(g_l.data.shape) / scale return g_l
from matplotlib.backends.backend_pdf import PdfPages from matplotlib import pyplot as plt, cm psr = argparse.ArgumentParser() psr.add_argument("-o", dest='opt', help="output") psr.add_argument('ipt', nargs="+", help="input") args = psr.parse_args() from Readlog import coeff3d # coeff3d EE_tmp, radius, coeff = coeff3d() assert EE_tmp[4] == "1.8", "1.8 MeV curve is missing." from numpy.polynomial import legendre as lg c = lg.legfit(radius, coeff[:, 0, 4], 9) c[1::2] = 0 # force even function def ld(f): a = pd.read_hdf(f) rho2 = a["x_sph"]**2 + a["y_sph"]**2 r = np.sqrt(a["z_sph"]**2 + rho2) E = np.exp(a["l0_sph"] - (lg.legval(r, c) - np.log(1.8))) return pd.DataFrame({"E": E, "r": r, "rho2": rho2, "z": a["z_sph"]}) d = pd.concat(map(ld, args.ipt)) rl = np.arange(0.65, 0.25, -0.05) b = np.arange(0, 9, 0.05)
def shift_ycoef_using_external_spectrum(psf, xytraceset, image, fibers, spectrum_filename, degyy=2, width=7): """ Measure y offsets (external wavelength calibration) from a preprocessed image , a PSF + trace set using a cross-correlation of boxcar extracted spectra and an external well-calibrated spectrum. The PSF shape is used to convolve the input spectrum. It could also be used to correct for the PSF asymetry (disabled for now). A relative flux calibration of the spectra is performed internally. Args: psf : specter PSF xytraceset : XYTraceset object image : DESI preprocessed image object fibers : 1D np.array of fiber indices spectrum_filename : path to input spectral file ( read with np.loadtxt , first column is wavelength (in vacuum and Angstrom) , second column in flux (arb. units) Optional: width : int, extraction boxcar width, default is 7 degyy : int, degree of polynomial fit of shifts as a function of y, used to reject outliers. Returns: ycoef : 2D np.array of same shape as input, with modified Legendre coefficents for each fiber to convert wavelenght to YCCD """ log = get_logger() wavemin = xytraceset.wavemin wavemax = xytraceset.wavemax xcoef = xytraceset.x_vs_wave_traceset._coeff ycoef = xytraceset.y_vs_wave_traceset._coeff tmp = np.loadtxt(spectrum_filename).T ref_wave = tmp[0] ref_spectrum = tmp[1] log.info("read reference spectrum in %s with %d entries" % (spectrum_filename, ref_wave.size)) log.info("rextract spectra with boxcar") # boxcar extraction qframe = qproc_boxcar_extraction(xytraceset, image, fibers=fibers, width=7) # resampling on common finer wavelength grid flux, ivar, wave = resample_boxcar_frame(qframe.flux, qframe.ivar, qframe.wave, oversampling=2) # median flux used as internal spectral reference mflux = np.median(flux, axis=0) mivar = np.median(ivar, axis=0) * flux.shape[0] * (2. / np.pi ) # very appoximate ! # trim ref_spectrum i = (ref_wave >= wave[0]) & (ref_wave <= wave[-1]) ref_wave = ref_wave[i] ref_spectrum = ref_spectrum[i] # check wave is linear or make it linear if np.abs( (ref_wave[1] - ref_wave[0]) - (ref_wave[-1] - ref_wave[-2])) > 0.0001 * (ref_wave[1] - ref_wave[0]): log.info( "reference spectrum wavelength is not on a linear grid, resample it" ) dwave = np.min(np.gradient(ref_wave)) tmp_wave = np.linspace(ref_wave[0], ref_wave[-1], int((ref_wave[-1] - ref_wave[0]) / dwave)) ref_spectrum = resample_flux(tmp_wave, ref_wave, ref_spectrum) ref_wave = tmp_wave i = np.argmax(ref_spectrum) central_wave_for_psf_evaluation = ref_wave[i] fiber_for_psf_evaluation = (flux.shape[0] // 2) try: # compute psf at most significant line of ref_spectrum dwave = ref_wave[i + 1] - ref_wave[i] hw = int(3. / dwave) + 1 # 3A half width wave_range = ref_wave[i - hw:i + hw + 1] x, y = psf.xy(fiber_for_psf_evaluation, wave_range) x = np.tile( x[hw] + np.arange(-hw, hw + 1) * (y[-1] - y[0]) / (2 * hw + 1), (y.size, 1)) y = np.tile(y, (2 * hw + 1, 1)).T kernel2d = psf._value(x, y, fiber_for_psf_evaluation, central_wave_for_psf_evaluation) kernel1d = np.sum(kernel2d, axis=1) log.info( "convolve reference spectrum using PSF at fiber %d and wavelength %dA" % (fiber_for_psf_evaluation, central_wave_for_psf_evaluation)) ref_spectrum = fftconvolve(ref_spectrum, kernel1d, mode='same') except: log.warning("couldn't convolve reference spectrum: %s %s" % (sys.exc_info()[0], sys.exc_info()[1])) # resample input spectrum log.info("resample convolved reference spectrum") ref_spectrum = resample_flux(wave, ref_wave, ref_spectrum) log.info("absorb difference of calibration") x = (wave - wave[wave.size // 2]) / 50. kernel = np.exp(-x**2 / 2) f1 = fftconvolve(mflux, kernel, mode='same') f2 = fftconvolve(ref_spectrum, kernel, mode='same') if np.all(f2 > 0): scale = f1 / f2 ref_spectrum *= scale log.info("fit shifts on wavelength bins") # define bins n_wavelength_bins = degyy + 4 y_for_dy = np.array([]) dy = np.array([]) ey = np.array([]) wave_for_dy = np.array([]) for b in range(n_wavelength_bins): wmin = wave[0] + ((wave[-1] - wave[0]) / n_wavelength_bins) * b if b < n_wavelength_bins - 1: wmax = wave[0] + ( (wave[-1] - wave[0]) / n_wavelength_bins) * (b + 1) else: wmax = wave[-1] ok = (wave >= wmin) & (wave <= wmax) sw = np.sum(mflux[ok] * (mflux[ok] > 0)) if sw == 0: continue dwave, err = compute_dy_from_spectral_cross_correlation( mflux[ok], wave[ok], ref_spectrum[ok], ivar=mivar[ok], hw=10.) bin_wave = np.sum(mflux[ok] * (mflux[ok] > 0) * wave[ok]) / sw x, y = psf.xy(fiber_for_psf_evaluation, bin_wave) eps = 0.1 x, yp = psf.xy(fiber_for_psf_evaluation, bin_wave + eps) dydw = (yp - y) / eps if err * dydw < 1: dy = np.append(dy, -dwave * dydw) ey = np.append(ey, err * dydw) wave_for_dy = np.append(wave_for_dy, bin_wave) y_for_dy = np.append(y_for_dy, y) log.info("wave = %fA , y=%d, measured dwave = %f +- %f A" % (bin_wave, y, dwave, err)) if False: # we don't need this for now try: log.info("correcting bias due to asymmetry of PSF") hw = 5 oversampling = 4 xx = np.tile( np.arange(2 * hw * oversampling + 1) - hw * oversampling, (2 * hw * oversampling + 1, 1)) / float(oversampling) yy = xx.T x, y = psf.xy(fiber_for_psf_evaluation, central_wave_for_psf_evaluation) prof = psf._value(xx + x, yy + y, fiber_for_psf_evaluation, central_wave_for_psf_evaluation) dy_asym_central = np.sum(yy * prof) / np.sum(prof) for i in range(dy.size): x, y = psf.xy(fiber_for_psf_evaluation, wave_for_dy[i]) prof = psf._value(xx + x, yy + y, fiber_for_psf_evaluation, wave_for_dy[i]) dy_asym = np.sum(yy * prof) / np.sum(prof) log.info( "y=%f, measured dy=%f , bias due to PSF asymetry = %f" % (y, dy[i], dy_asym - dy_asym_central)) dy[i] -= (dy_asym - dy_asym_central) except: log.warning("couldn't correct for asymmetry of PSF: %s %s" % (sys.exc_info()[0], sys.exc_info()[1])) log.info("polynomial fit of shifts and modification of PSF ycoef") # pol fit coef = np.polyfit(wave_for_dy, dy, degyy, w=1. / ey**2) pol = np.poly1d(coef) for i in range(dy.size): log.info( "wave=%fA y=%f, measured dy=%f+-%f , pol(wave) = %f" % (wave_for_dy[i], y_for_dy[i], dy[i], ey[i], pol(wave_for_dy[i]))) log.info("apply this to the PSF ycoef") wave = np.linspace(wavemin, wavemax, 100) dy = pol(wave) dycoef = legfit(legx(wave, wavemin, wavemax), dy, deg=ycoef.shape[1] - 1) for fiber in range(ycoef.shape[0]): ycoef[fiber] += dycoef return ycoef
def shift_ycoef_using_external_spectrum(psf,xytraceset,image,fibers,spectrum_filename,degyy=2,width=7) : """ Measure y offsets (external wavelength calibration) from a preprocessed image , a PSF + trace set using a cross-correlation of boxcar extracted spectra and an external well-calibrated spectrum. The PSF shape is used to convolve the input spectrum. It could also be used to correct for the PSF asymetry (disabled for now). A relative flux calibration of the spectra is performed internally. Args: psf : specter PSF xytraceset : XYTraceset object image : DESI preprocessed image object fibers : 1D np.array of fiber indices spectrum_filename : path to input spectral file ( read with np.loadtxt , first column is wavelength (in vacuum and Angstrom) , second column in flux (arb. units) Optional: width : int, extraction boxcar width, default is 7 degyy : int, degree of polynomial fit of shifts as a function of y, used to reject outliers. Returns: ycoef : 2D np.array of same shape as input, with modified Legendre coefficents for each fiber to convert wavelenght to YCCD """ log = get_logger() wavemin = xytraceset.wavemin wavemax = xytraceset.wavemax xcoef = xytraceset.x_vs_wave_traceset._coeff ycoef = xytraceset.y_vs_wave_traceset._coeff tmp=np.loadtxt(spectrum_filename).T ref_wave=tmp[0] ref_spectrum=tmp[1] log.info("read reference spectrum in %s with %d entries"%(spectrum_filename,ref_wave.size)) log.info("rextract spectra with boxcar") # boxcar extraction qframe = qproc_boxcar_extraction(xytraceset, image, fibers=fibers, width=7) # resampling on common finer wavelength grid flux, ivar, wave = resample_boxcar_frame(qframe.flux, qframe.ivar, qframe.wave, oversampling=2) # median flux used as internal spectral reference mflux=np.median(flux,axis=0) mivar=np.median(ivar,axis=0)*flux.shape[0]*(2./np.pi) # very appoximate ! # trim ref_spectrum i=(ref_wave>=wave[0])&(ref_wave<=wave[-1]) ref_wave=ref_wave[i] ref_spectrum=ref_spectrum[i] # check wave is linear or make it linear if np.abs((ref_wave[1]-ref_wave[0])-(ref_wave[-1]-ref_wave[-2]))>0.0001*(ref_wave[1]-ref_wave[0]) : log.info("reference spectrum wavelength is not on a linear grid, resample it") dwave = np.min(np.gradient(ref_wave)) tmp_wave = np.linspace(ref_wave[0],ref_wave[-1],int((ref_wave[-1]-ref_wave[0])/dwave)) ref_spectrum = resample_flux(tmp_wave, ref_wave , ref_spectrum) ref_wave = tmp_wave i=np.argmax(ref_spectrum) central_wave_for_psf_evaluation = ref_wave[i] fiber_for_psf_evaluation = (flux.shape[0]//2) try : # compute psf at most significant line of ref_spectrum dwave=ref_wave[i+1]-ref_wave[i] hw=int(3./dwave)+1 # 3A half width wave_range = ref_wave[i-hw:i+hw+1] x,y=psf.xy(fiber_for_psf_evaluation,wave_range) x=np.tile(x[hw]+np.arange(-hw,hw+1)*(y[-1]-y[0])/(2*hw+1),(y.size,1)) y=np.tile(y,(2*hw+1,1)).T kernel2d=psf._value(x,y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation) kernel1d=np.sum(kernel2d,axis=1) log.info("convolve reference spectrum using PSF at fiber %d and wavelength %dA"%(fiber_for_psf_evaluation,central_wave_for_psf_evaluation)) ref_spectrum=fftconvolve(ref_spectrum,kernel1d, mode='same') except : log.warning("couldn't convolve reference spectrum: %s %s"%(sys.exc_info()[0],sys.exc_info()[1])) # resample input spectrum log.info("resample convolved reference spectrum") ref_spectrum = resample_flux(wave, ref_wave , ref_spectrum) log.info("absorb difference of calibration") x=(wave-wave[wave.size//2])/50. kernel=np.exp(-x**2/2) f1=fftconvolve(mflux,kernel,mode='same') f2=fftconvolve(ref_spectrum,kernel,mode='same') if np.all(f2>0) : scale=f1/f2 ref_spectrum *= scale log.info("fit shifts on wavelength bins") # define bins n_wavelength_bins = degyy+4 y_for_dy=np.array([]) dy=np.array([]) ey=np.array([]) wave_for_dy=np.array([]) for b in range(n_wavelength_bins) : wmin=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*b if b<n_wavelength_bins-1 : wmax=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*(b+1) else : wmax=wave[-1] ok=(wave>=wmin)&(wave<=wmax) sw= np.sum(mflux[ok]*(mflux[ok]>0)) if sw==0 : continue dwave,err = compute_dy_from_spectral_cross_correlation(mflux[ok],wave[ok],ref_spectrum[ok],ivar=mivar[ok],hw=3.) bin_wave = np.sum(mflux[ok]*(mflux[ok]>0)*wave[ok])/sw x,y=psf.xy(fiber_for_psf_evaluation,bin_wave) eps=0.1 x,yp=psf.xy(fiber_for_psf_evaluation,bin_wave+eps) dydw=(yp-y)/eps if err*dydw<1 : dy=np.append(dy,-dwave*dydw) ey=np.append(ey,err*dydw) wave_for_dy=np.append(wave_for_dy,bin_wave) y_for_dy=np.append(y_for_dy,y) log.info("wave = %fA , y=%d, measured dwave = %f +- %f A"%(bin_wave,y,dwave,err)) if False : # we don't need this for now try : log.info("correcting bias due to asymmetry of PSF") hw=5 oversampling=4 xx=np.tile(np.arange(2*hw*oversampling+1)-hw*oversampling,(2*hw*oversampling+1,1))/float(oversampling) yy=xx.T x,y=psf.xy(fiber_for_psf_evaluation,central_wave_for_psf_evaluation) prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation) dy_asym_central = np.sum(yy*prof)/np.sum(prof) for i in range(dy.size) : x,y=psf.xy(fiber_for_psf_evaluation,wave_for_dy[i]) prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,wave_for_dy[i]) dy_asym = np.sum(yy*prof)/np.sum(prof) log.info("y=%f, measured dy=%f , bias due to PSF asymetry = %f"%(y,dy[i],dy_asym-dy_asym_central)) dy[i] -= (dy_asym-dy_asym_central) except : log.warning("couldn't correct for asymmetry of PSF: %s %s"%(sys.exc_info()[0],sys.exc_info()[1])) log.info("polynomial fit of shifts and modification of PSF ycoef") # pol fit coef = np.polyfit(wave_for_dy,dy,degyy,w=1./ey**2) pol = np.poly1d(coef) for i in range(dy.size) : log.info("wave=%fA y=%f, measured dy=%f+-%f , pol(wave) = %f"%(wave_for_dy[i],y_for_dy[i],dy[i],ey[i],pol(wave_for_dy[i]))) log.info("apply this to the PSF ycoef") wave = np.linspace(wavemin,wavemax,100) dy = pol(wave) dycoef = legfit(legx(wave,wavemin,wavemax),dy,deg=ycoef.shape[1]-1) for fiber in range(ycoef.shape[0]) : ycoef[fiber] += dycoef return ycoef
def baselineSpectrum(spectrum, order=1, baselineIndex=()): x = np.linspace(-1, 1, len(spectrum)) coeffs = legendre.legfit(x[baselineIndex], spectrum[baselineIndex], order) spectrum -= legendre.legval(x, coeffs) return(spectrum)
for i in range(0, iters): upper = c_df['Median'] + a * c_df['Std'] lower = c_df['Median'] - b * c_df['Std'] mask = c_df['Intensity'] < upper mask = c_df['Intensity'] > lower c_df = c_df[mask] c_df['Std'] = c_df['Intensity'].rolling(50).std() c_df['Median'] = c_df['Intensity'].rolling(50).agg(regression) #Now it's finally time for the Legendre fit baby coefs = nppl.legfit(c_df['Wavelength'], c_df['Intensity'], 15) fit = nppl.legval(c_df['Wavelength'], coefs) plt.plot(wave, flux, 'bo') plt.plot(i_df['Wavelength'], i_df['Intensity']) plt.plot(c_df['Wavelength'], c_df['Intensity']) plt.plot(c_df['Wavelength'], fit) plt.title("Vega Spectra data with ${\sigma}$ clipped data") plt.xlabel('Wavelength (Angstroms)') plt.ylabel('Flux (W/m**2/m)') plt.show() plt.plot(wave, flux) plt.title('Vega Spectra data') plt.xlabel('Wavelength (Angstroms)') plt.ylabel('Flux (W/m**2/m)')
def response_correct(data, normdata1d, dispaxis=0, output='', threshold=0., low_reject=3., high_reject=3., iters=3, function='legendre', order=3): ''' Response correction for a 2-D spectrum Parameters ---------- data : numpy.ndarray The data to be corrected. Usually a (combined) flat field image. normdata1d: numpy.ndarray 1-D numpy array which contains the suitable normalization image. dispaxis : {0, 1} The dispersion axis. 0 and 1 mean column and line, respectively. threshold : float The final 2-D response map pixels smaller than this value will be replaced by 1.0. Usage ----- nsmooth = 7 normdata1d = np.sum(mflat[700:900, :] , axis=0) normdata1d = convolve(normdata1d, Box1DKernel(nsmooth), boundary='extend') response = preproc.response_correct(data = mflat.data, normdata1d=normdata1d, dispaxis=1, order=10) ''' nlambda = len(normdata1d) nrepeat = data.shape[dispaxis - 1] if data.shape[dispaxis] != nlambda: wstr = "data shape ({:d}, {:d}) with dispaxis {:d} \ does not match with normdata1d ({:d})" wstr = wstr.format(data.shape[0], data.shape[1], dispaxis, normdata1d.shape[0]) raise Warning(wstr) x = np.arange(0, nlambda) if function == 'legendre': fitted = legval(x, legfit(x, normdata1d, deg=order)) # TODO: The iteration here should be the iteration over the # fitting, not the sigma clip itself. residual = normdata1d - fitted clip = sigma_clip(residual, iters=iters, sigma_lower=low_reject, sigma_upper=high_reject) else: raise Warning("{:s} is not implemented yet".format(function)) mask = clip.mask weight = (~mask).astype(float) # masked pixel has weight = 0. coeff = legfit(x, normdata1d, deg=order, w=weight) if function == 'legendre': response = legval(x, coeff) response /= np.average(response) response[response < threshold] = 1. response_map = np.repeat(response, nrepeat) if dispaxis == 0: response_map = response_map.reshape(nlambda, nrepeat) elif dispaxis == 1: response_map = response_map.reshape(nrepeat, nlambda) response2d = data / response_map return response2d
def baselineSpectrum(spectrum, order=1, baselineIndex=()): x = np.linspace(-1, 1, len(spectrum)) coeffs = legendre.legfit(x[baselineIndex], spectrum[baselineIndex], order) spectrum -= legendre.legval(x, coeffs) return (spectrum)
def getSMICA(theta_i=0.0, theta_f=180.0, nSteps=1800, lmax=100, lmin=2, newSMICA=False, useSPICE=True, newDeg=False, R1=False): """ Purpose: load CMB and mask maps from files, return correlation function for unmasked and masked CMB Mostly follows Copi et. al. 2013 for cut sky C(theta) Uses: get_crosspower.py (for plotting) C(theta) save file getSMICAfile.npy Inputs: theta_i,theta_f: starting and ending points for C(theta) in degrees nSteps: number of intervals between i,f points lmax: the maximum l value to include in legendre series for C(theta) lmin: the lowest l to use in C(theta,Cl) and S_{1/2} = CIC calculation newSMICA: set to True to reload data from files and recompute if False, will load C(theta) curves from file useSPICE: if True, will use SPICE to find power spectra if False, will use anafast, following Copi et. al. 2013 Default: True newDeg: set to True to recalculate map and mask degredations Note: the saved files are dependent on the value of lmax that was used Default: False R1: set to True to use R1 versions of SMICA and mask. Otherwise, R2 is used Only affects which Planck files are used; irrelevant if newDeg=False. Default: False Outupts: theta: nSteps+1 angles that C(theta) arrays are for (degrees) unmasked: C(theta) unmasked (microK^2) masked: C(theta) masked (microK^2) """ saveFile = 'getSMICAfile.npy' #for anafast saveFile2 = 'getSMICAfile2.npy' #for spice if newSMICA: # start with map degredations mapDegFile = 'smicaMapDeg.fits' maskDegFile = 'maskMapDeg.fits' if newDeg: # load maps; default files have 2048,NESTED,GALACTIC dataDir = '/Data/' if R1: smicaFile = 'COM_CompMap_CMB-smica-field-I_2048_R1.20.fits' maskFile = 'COM_Mask_CMB-union_2048_R1.10.fits' else: smicaFile = 'COM_CMB_IQU-smica-field-int_2048_R2.01_full.fits' maskFile = 'COM_CMB_IQU-common-field-MaskInt_2048_R2.01.fits' print 'opening file ', smicaFile, '... ' smicaMap, smicaHead = hp.read_map(dataDir + smicaFile, nest=True, h=True) print 'opening file ', maskFile, '... ' maskMap, maskHead = hp.read_map(dataDir + maskFile, nest=True, h=True) if R1: smicaMap *= 1e-6 #microK to K # degrade map and mask resolutions from 2048 to 128; convert NESTED to RING useAlm = True # set to True to do harmonic space scaling, False for ud_grade NSIDE_big = 2048 NSIDE_deg = 128 while 4 * NSIDE_deg < lmax: NSIDE_deg *= 2 print 'resampling maps at NSIDE = ', NSIDE_deg, '... ' order_out = 'RING' if useAlm: # transform to harmonic space smicaMapRing = hp.reorder(smicaMap, n2r=True) maskMapRing = hp.reorder(maskMap, n2r=True) smicaCl, smicaAlm = hp.anafast(smicaMapRing, alm=True, lmax=lmax) maskCl, maskAlm = hp.anafast(maskMapRing, alm=True, lmax=lmax) # this gives 101 Cl values and 5151 Alm values. Why not all 10201 Alm.s? # scale by pixel window functions bigWin = hp.pixwin(NSIDE_big) degWin = hp.pixwin(NSIDE_deg) winRatio = degWin / bigWin[:degWin.size] degSmicaAlm = hp.almxfl(smicaAlm, winRatio) degMaskAlm = hp.almxfl(maskAlm, winRatio) # re-transform back to real space smicaMapDeg = hp.alm2map(degSmicaAlm, NSIDE_deg) maskMapDeg = hp.alm2map(degMaskAlm, NSIDE_deg) else: smicaMapDeg = hp.ud_grade(smicaMap, nside_out=NSIDE_deg, order_in='NESTED', order_out=order_out) maskMapDeg = hp.ud_grade(maskMap, nside_out=NSIDE_deg, order_in='NESTED', order_out=order_out) # note: degraded resolution mask will no longer be only 0s and 1s. # Should it be? Yes. # turn smoothed mask back to 0s,1s mask threshold = 0.9 maskMapDeg[np.where(maskMapDeg > threshold)] = 1 maskMapDeg[np.where(maskMapDeg <= threshold)] = 0 #testing #hp.mollview(smicaMapDeg) #plt.show() #hp.mollview(maskMapDeg) #plt.show() #return 0 hp.write_map(mapDegFile, smicaMapDeg, nest=False) # use False if order_out='RING' above hp.write_map(maskDegFile, maskMapDeg, nest=False) else: # just load previous degradations (dependent on previous lmax) print 'loading previously degraded map and mask...' smicaMapDeg = hp.read_map(mapDegFile, nest=False) maskMapDeg = hp.read_map(maskDegFile, nest=False) # find power spectra print 'find power spectra... ' if useSPICE: ClFile1 = 'spiceCl_unmasked.fits' ClFile2 = 'spiceCl_masked.fits' # note: lmax for spice is 3*NSIDE-1 or less ispice(mapDegFile, ClFile1, subav="YES", subdipole="YES") Cl_unmasked = hp.read_cl(ClFile1) ispice(mapDegFile, ClFile2, maskfile1=maskDegFile, subav="YES", subdipole="YES") Cl_masked = hp.read_cl(ClFile2) Cl_mask = np.zeros(Cl_unmasked.shape[0]) # just a placeholder ell = np.arange(Cl_unmasked.shape[0]) else: # use anafast Cl_unmasked = hp.anafast(smicaMapDeg, lmax=lmax) Cl_masked = hp.anafast(smicaMapDeg * maskMapDeg, lmax=lmax) Cl_mask = hp.anafast(maskMapDeg, lmax=lmax) ell = np.arange(lmax + 1) #anafast output seems to start at l=0 # plot them doPlot = False #True if doPlot: gcp.showCl(ell, np.array([Cl_masked, Cl_unmasked]), title='power spectra of unmasked, masked SMICA map') # Legendre transform to real space print 'Legendre transform to real space... ' # note: getCovar uses linspace in x for thetaArray thetaDomain, CofTheta = getCovar(ell[:lmax + 1], Cl_unmasked[:lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=lmin) thetaDomain, CCutofThetaTA = getCovar(ell[:lmax + 1], Cl_masked[:lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=lmin) CofTheta *= 1e12 # K^2 to microK^2 CCutofThetaTA *= 1e12 # K^2 to microK^2 if useSPICE: CCutofTheta = CCutofThetaTA #/(4*np.pi) else: thetaDomain, AofThetaInverse = getCovar( ell[:lmax + 1], Cl_mask[:lmax + 1], theta_i=theta_i, theta_f=theta_f, nSteps=nSteps, lmin=0) # don't zilch the mask # note: zilching the mask's low power drastically changed C(theta) for masked anafast # Not sure why. CCutofTheta = CCutofThetaTA / AofThetaInverse xArray = np.cos(thetaDomain * np.pi / 180.) # back to frequency space for S_{1/2} = CIC calculation if useSPICE: CCutofL = Cl_masked[:lmax + 1] * 1e12 #K^2 to microK^2 else: legCoefs = legfit(xArray, CCutofTheta, lmax) CCutofL = legCoefs * (4 * np.pi) / (2 * ell[:lmax + 1] + 1) # S_{1/2} myJmn = getJmn(lmax=lmax) SMasked = np.dot(CCutofL[lmin:], np.dot(myJmn[lmin:, lmin:], CCutofL[lmin:])) SNoMask = np.dot( Cl_unmasked[lmin:lmax + 1], np.dot(myJmn[lmin:, lmin:], Cl_unmasked[lmin:lmax + 1])) * 1e24 #two factors of K^2 to muK^2 # save results if useSPICE: np.save( saveFile2, np.array( [thetaDomain, CofTheta, CCutofTheta, SNoMask, SMasked])) else: np.save( saveFile, np.array( [thetaDomain, CofTheta, CCutofTheta, SNoMask, SMasked])) else: # load from file if useSPICE: fileData = np.load(saveFile2) else: fileData = np.load(saveFile) thetaDomain = fileData[0] CofTheta = fileData[1] CCutofTheta = fileData[2] SNoMask = fileData[3] SMasked = fileData[4] return thetaDomain, CofTheta, CCutofTheta, SNoMask, SMasked
def test_legfit(self): def f(x): return x*(x - 1)*(x - 2) def f2(x): return x**4 + x**2 + 1 # Test exceptions assert_raises(ValueError, leg.legfit, [1], [1], -1) assert_raises(TypeError, leg.legfit, [[1]], [1], 0) assert_raises(TypeError, leg.legfit, [], [1], 0) assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) assert_raises(TypeError, leg.legfit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = leg.legfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(leg.legval(x, coef3), y) coef3 = leg.legfit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(leg.legval(x, coef3), y) # coef4 = leg.legfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) # check things still work if deg is not in strict increasing coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) # coef2d = leg.legfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = leg.legfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) # test fitting only even Legendre polynomials x = np.linspace(-1, 1) y = f2(x) coef1 = leg.legfit(x, y, 4) assert_almost_equal(leg.legval(x, coef1), y) coef2 = leg.legfit(x, y, [0, 2, 4]) assert_almost_equal(leg.legval(x, coef2), y) assert_almost_equal(coef1, coef2)
def legendre_fit(fit_me, xi, xf, weights,order=7,*args, **kwargs): legfunc = legendre.legfit(xi,fit_me,deg=order,w=weights) fitted = legendre.legval(xf,legfunc) residual = legendre.legval(xi, legfunc) - fit_me return (fitted, residual)
def test_legfit(self) : def f(x) : return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, leg.legfit, [1], [1], -1) assert_raises(TypeError, leg.legfit, [[1]], [1], 0) assert_raises(TypeError, leg.legfit, [], [1], 0) assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1,1]) # Test fit x = np.linspace(0,2) y = f(x) # coef3 = leg.legfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(leg.legval(x, coef3), y) # coef4 = leg.legfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) # coef2d = leg.legfit(x, np.array([y,y]).T, 3) assert_almost_equal(coef2d, np.array([coef3,coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = leg.legfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = leg.legfit(x, np.array([yw,yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) #test NA y = f(x) y[10] = 100 xm = x.view(maskna=1) xm[10] = np.NA res = leg.legfit(xm, y, 3) assert_almost_equal(res, coef3) ym = y.view(maskna=1) ym[10] = np.NA res = leg.legfit(x, ym, 3) assert_almost_equal(res, coef3) y2 = np.vstack((y,y)).T y2[10,0] = 100 y2[15,1] = 100 y2m = y2.view(maskna=1) y2m[10,0] = np.NA y2m[15,1] = np.NA res = leg.legfit(x, y2m, 3).T assert_almost_equal(res[0], coef3) assert_almost_equal(res[1], coef3) wm = np.ones_like(x, maskna=1) wm[10] = np.NA res = leg.legfit(x, y, 3, w=wm) assert_almost_equal(res, coef3)
histos.append(histSP) hist_x_data = np.array( [histSP.GetBinCenter(i) for i in range(1, len(histSP.binvalues) + 1)]) hist_y_data = np.array( [histSP.GetBinContent(i) for i in range(1, len(histSP.binvalues) + 1)]) hist_y_derr = np.array( [histSP.GetBinError(i) for i in range(1, len(histSP.binvalues) + 1)]) fit_x_data = np.cos(hist_x_data * 3.1415 / 180) best_params, diagnostic = legfit(fit_x_data, hist_y_data, 3, w=1.0 / hist_y_derr, full=True) print(diagnostic) print('Dipole-monopole ratio: {0:.2f}\n'.format(best_params[2] / best_params[0])) data_dict['x_data'] = hist_x_data data_dict['cuts'][title] = {'params': best_params} data_dict['cuts'][title]['param_errors'] = diagnostic data_dict['cuts'][title]['y_data'] = hist_y_data data_dict['cuts'][title]['y_err'] = hist_y_derr data_dict['cuts'][title]['cut_range'] = [e0, e1] data_dict['cuts'][title]['n_trues'] = n_events data_dict['cuts'][title]['n_accidentals'] = n_accidentals data_dict['cuts'][title]['n_coinc'] = n_coinc
def x2l(self): for sle in self.sles: self.ul[sle] = lgd.legfit(self.xs, self.ux[sle], self.p_degree)