def cheb1Dfit(x, y, order=5, weight=None, niteration=0, \ high_nsig=0.0, low_nsig=0.0): stat = True n = len(x) if weight is None: weight1 = np.ones(n) else: weight1 = copy.deepcopy(weight) c = chebfit(x,y,order,w=weight1) for k in range(niteration): residualdata = y - chebval(x,c) # Calculating weighted standard deviation mean, sig = weighted_average_std(residualdata, weight1) # Clipiing the data if mean != 0: if sig/mean > 1.0e-10 or sig/mean < -1.0e-10: highlimit = high_nsig * sig lowlimit = -low_nsig * sig for j in range(n): if residualdata[j] > highlimit or residualdata[j] < lowlimit: weight1[j] = 0.0 # Fitting again with warnings.catch_warnings(): warnings.filterwarnings("error") try: c = chebfit(x,y,order,w=weight1) except np.polynomial.polyutils.RankWarning: print('RankWarning') stat = False return c, weight1, stat
def normalize_spectra(self): """ Normalize the data and model spectra """ self.spectra['m_flux_norm'] = deepcopy(self.spectra['m_flux']) self.spectra['d_flux_norm'] = deepcopy(self.spectra['d_flux']) self.spectra['unc_norm'] = deepcopy(self.spectra['unc']) chunks = 1000 min_ = min(self.spectra['wave']) max_ = max(self.spectra['wave']) num = int((max_ - min_) / chunks) + 1 for i in range(num): k = ((self.spectra['wave'] >= min_ + chunks * i) & (self.spectra['wave'] <= min_ + chunks * (i + 1))) if len(self.spectra['d_flux_norm'][k]) < 10: continue coeffs = chebfit(self.spectra['wave'][k], self.spectra['d_flux_norm'][k], 2) poly = chebval(self.spectra['wave'][k], coeffs) self.spectra['d_flux_norm'][ k] = self.spectra['d_flux_norm'][k] / poly self.spectra['unc_norm'][k] = self.spectra['unc_norm'][k] / poly coeffs = chebfit(self.spectra['wave'][k], self.spectra['m_flux_norm'][k], 2) poly = chebval(self.spectra['wave'][k], coeffs) self.spectra['m_flux_norm'][ k] = self.spectra['m_flux_norm'][k] / poly
def polyLine(startPt, endPt, polyIndex, t, sgf, data): x = [] y = [] vt = [] for j in range(startPt, endPt): #for loop 特性,需要到endpt 因此要+1 x.append(t[j]) y.append(float(sgf[j])) vt.append(float(data[j])) #=================Polyfit 回歸多項式 ========= # ============================================================================= # tp=np.polyfit(x,y,polyIndex) # ys_line=np.polyval(tp,x) # ============================================================================= #=================chebyshev 回歸多項式 ========= coeffRaw = chy.chebfit(x, vt, polyIndex) coeffSGF = chy.chebfit(x, y, polyIndex) ys_lineRaw = chy.chebval(x, coeffRaw) ys_lineSGF = chy.chebval(x, coeffSGF) #=================rsq: 決定係數========= rsqSGF = round( coeff_of_determination(np.array(y), ys_lineRaw, startPt, endPt), 2) rsqRW = round( coeff_of_determination(np.array(vt), ys_lineRaw, startPt, endPt), 2) return coeffRaw, ys_lineRaw, rsqSGF, rsqRW
def define_background(self, q, I, k, plot=False, az_idx=0): """ Background profile - q, I points with Chebdev poly fit. Can supply 2d array for q and I, which define the background intensity as a function of azimuthal position. This is predominantly for use with the pyxe package! Args: q (ndarray): 1d or 2d array with q positions I (ndarray): 1d or 2d array with intensity values k (int): Order for Cheyshev polynomial plot (bool): True/False az_idx (int): Azimuthal slice to plot """ # Calculate fit and apply to all slices if np.array(q).ndim == 1: f = chebfit(q, I, k) # Calculate fit for each slice else: assert self.phi.size == q.shape[0] f = np.zeros((self.q.shape[0], k + 1)) for az in range(q.shape[0]): finite = np.isfinite(q[az]) f[az] = chebfit(q[az][finite], I[az][finite], k) if plot: plt.plot(self.q, chebval(self.q, f[az_idx]), 'k-') x = q[az_idx] if q.ndim == 2 else q y = I[az_idx] if q.ndim == 2 else I plt.plot(x, y, 'r+') plt.show() self._back = f
def handle_summary(outname=None, filelist=[]): """Extract all std-correction vectors and create ensemble (obsolete)""" if outname is None: outname = 'correction.npy' # Get a list of good correction vectors keepers = [] for ifile in filelist: print ifile f = np.load(ifile)[0] # Correction values should be small # if "std-correction" not in data.keys(): if np.nanmin(f['std-correction']) < 9: keepers.append(f) print "Keeping %s" % ifile # Set fiducial wavelengths from first correction vector corl = keepers[0]['nm'].copy() # Insert first vector cor = np.zeros((len(corl), len(keepers))) cor[:, 0] = keepers[0]['std-correction'].copy() # Insert the rest of the vectors for ix, keeper in enumerate(keepers[1:]): f = interp1d(keeper['nm'], keeper['std-correction'], bounds_error=False, fill_value=np.nan) cor[:, ix] = f(corl) # Create mean correction cs = np.nanmean(cor, 1) # Fit wl coefficients ccs = chebfit(corl, cs, 6) # Create output cor = [{"nm": corl, "cor": cs, "coeff": ccs}] np.save(outname, cor)
def main(plot=False): warnings.filterwarnings('error', category=RuntimeWarning) days, fuel_mass, uglovi, snaga = pop_init(gen=1) print(days.shape) print(fuel_mass.shape) print(uglovi.shape) print(snaga.shape) pop_fit = np.empty(broj_jedinki) koeficijenti = np.empty((broj_jedinki, podaci.chebdeg + 1)) fitness = np.loadtxt('gen_99.txt', dtype=float, usecols=364) ind = np.argmin(fitness) pop_elita = None print('Fitness: ', fitness[ind]) for i in range(broj_gen): for j in range(30, 40): print(j) print('File fitness: ', fitness[j]) print('Launch offset:', days[j]) print('Fuel mass:', fuel_mass[j] / 255 * podaci.max_fuel_mass) #print('Uglovi:', (uglovi[j]%(2*pi))*360/(2*pi)) _r, _, _, min_dist_dest = simulacija_pogon.simulacija( days[j], fuel_mass[j], uglovi[j], snaga[j], y_max) # if pop_fit[j] == -1: pop_fit[j] = fitnes(min_dist_dest) koeficijenti[j] = chebfit(x_osa(broj_segmenata), uglovi[j], podaci.chebdeg) if plot: x, y = crtanje_planeta(plot=False) plt.plot(np.array(x[2]) / au, np.array(y[2]) / au, 'b--', np.array(x[3]) / au, np.array(y[3]) / au, 'r-.', _r[:, 0] / au, _r[:, 1] / au, 'g:', linewidth=0.9) plt.plot(0.0, 0.0, 'k*', markersize=7) plt.axis('scaled') plt.xlabel('x-osa [astronomska jedinica]') plt.ylabel('y-osa [astronomska jedinica]') plt.title('Flyby Marsa') plt.show() # print(podaci.trajanje) # pop_bit = np.array([[0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 15.1], # [1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 5.6], # [0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 10.8], # [1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 2.6], # [0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 4.9]]) # pop_elita = np.array([[0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1.5]]) pop_bit = float_to_bit(days[:, np.newaxis], fuel_mass, koeficijenti, snaga) # print(pop_bit) pop_bit_new, pop_elita = genetski_algoritam.genetski_algoritam( pop_bit, pop_elita, podaci.p_elit, podaci.p_mut) # print(pop_bit_new) # print(pop_elita) # date_time, fuel_mass, uglovi, snaga = bit_to_float(pop_bit_new) print(np.min(pop_fit))
def fit_poly(w, r, type, order): if type == "legendre": coef = legfit(w, r, order) return coef elif type == "chebyshev": coef = chebfit(w, r, order) return coef
def handle_summary(outname=None, filelist=[]): if outname is None: outname = 'correction.npy' keepers = [] for file in filelist: print file f = np.load(file)[0] if np.nanmin(f['std-correction']) < 9: keepers.append(f) print "Keeping %s" % file corl = keepers[0]['nm'].copy() cor = np.zeros((len(corl), len(keepers))) cor[:,0] = keepers[0]['std-correction'].copy() for ix, keeper in enumerate(keepers[1:]): f = interp1d(keeper['nm'], keeper['std-correction'], bounds_error=False, fill_value = np.nan) cor[:,ix] = f(corl) cs = np.nanmean(cor,1) ccs = chebfit(corl, cs, 6) cor = [{"nm": corl, "cor": cs, "coeff": ccs}] np.save(outname, cor)
def cheb_fitcurve(x, y, order): x = cheb.chebpts2(len(x)) order = 64 coef = legend.legfit(x, y, order) assert_equal(len(coef), order + 1) y1 = legend.legval(x, coef) err_1 = np.linalg.norm(y1 - y) / np.linalg.norm(y) coef = cheb.chebfit(x, y, order) assert_equal(len(coef), order + 1) thrsh = abs(coef[0] / 1000) for i in range(len(coef)): if abs(coef[i]) < thrsh: coef = coef[0:i + 1] break y2 = cheb.chebval(x, coef) err_2 = np.linalg.norm(y2 - y) / np.linalg.norm(y) plt.plot(x, y2, '.') plt.plot(x, y, '-') plt.title("nPt={} order={} err_cheby={:.6g} err_legend={:.6g}".format( len(x), order, err_2, err_1)) plt.show() assert_almost_equal(cheb.chebval(x, coef), y) # return coef
def polyLine(startPt, endPt, polyIndex, t, sgf, data): x = [] y = [] vt = [] for j in range(startPt, endPt): #for loop 特性,需要到endpt 因此要+1 x.append(t[j]) y.append(float(sgf[j])) vt.append(float(data[j])) #=================Polyfit 回歸多項式 ========= # ============================================================================= # tp=np.polyfit(x,y,polyIndex) # ys_line=np.polyval(tp,x) # ============================================================================= #=================chebyshev 回歸多項式 ========= coeff = chy.chebfit(x, y, polyIndex) # ============================================================================= # print(chy.Chebyshev.fit(x,y,polyIndex)) # print(coeff) # print(np.poly1d(coeff)) # ============================================================================= ys_line = chy.chebval(x, coeff) # ============================================================================= # approximated_values = np.poly1d(coeff)(x) # print(ys_line,approximated_values) # ============================================================================= #=================rsq: 決定係數========= rsqSGF = round( coeff_of_determination(np.array(y), ys_line, startPt, endPt), 3) rsqTT = round( coeff_of_determination(np.array(vt), ys_line, startPt, endPt), 3) return coeff, ys_line, rsqSGF, rsqTT
def che_fit(txt_path, deg, fit_save_dir): ''' input : a txt of label, imgw, imgh, x, y, w, h, centerx, centery, 360deg return label, imgw, imgh, x, y, w, h, centerx, centery, coef(16,24) ''' with open(txt_path, 'r') as f: img_info = np.loadtxt(txt_path) img_info = img_info.reshape(-1, 369) new_path = os.path.join(fit_save_dir, txt_path.split('/')[-1]) results = [] for objects_info in img_info: # 1,360 objects_new = np.zeros(9 + deg + 1) objects_new[0:9] = objects_info[0:9] bboxw = objects_info[5] bboxh = objects_info[6] bbox_len = np.sqrt(bboxw * bboxw + bboxh * bboxh) r = objects_info[9:] / float(bbox_len) theta = np.linspace(-1, 1, 360) coefficient, Res = chebyshev.chebfit(theta, r, deg, full=True) objects_new[9:] = np.array(coefficient) results.append(objects_new) results = np.array(results) np.savetxt(new_path, results)
def handle_summary(outname=None, filelist=[]): if outname is None: outname = 'correction.npy' keepers = [] for file in filelist: print file f = np.load(file)[0] if np.nanmin(f['std-correction']) < 9: keepers.append(f) print "Keeping %s" % file corl = keepers[0]['nm'].copy() cor = np.zeros((len(corl), len(keepers))) cor[:, 0] = keepers[0]['std-correction'].copy() for ix, keeper in enumerate(keepers[1:]): f = interp1d(keeper['nm'], keeper['std-correction'], bounds_error=False, fill_value=np.nan) cor[:, ix] = f(corl) cs = np.nanmean(cor, 1) ccs = chebfit(corl, cs, 6) cor = [{"nm": corl, "cor": cs, "coeff": ccs}] np.save(outname, cor)
def test_recursivechebyshevfunction(self): '''Test routines to compute using Chebyshev polynomials recursively.''' from scipy.linalg import funm from numpy.polynomial.chebyshev import chebfit, chebval from numpy import exp, linspace # Starting Matrix matrix1 = self.create_matrix(scaled=True) self.write_matrix(matrix1, self.input_file) # Function x = linspace(-1.0, 1.0, 200) y = [exp(i) for i in x] coef = chebfit(x, y, 16 - 1) # Check Matrix dense_check = funm(matrix1.todense(), lambda x: chebval(x, coef)) self.CheckMat = csr_matrix(dense_check) # Result Matrix input_matrix = nt.Matrix_ps(self.input_file, False) poly_matrix = nt.Matrix_ps(self.mat_dim) polynomial = nt.ChebyshevPolynomial(len(coef)) for j in range(0, len(coef)): polynomial.SetCoefficient(j, coef[j]) permutation = nt.Permutation(input_matrix.GetLogicalDimension()) permutation.SetRandomPermutation() self.fsp.SetLoadBalance(permutation) polynomial.ComputeFactorized(input_matrix, poly_matrix, self.fsp) poly_matrix.WriteToMatrixMarket(result_file) comm.barrier() self.check_result()
def Create(x, y, itm_mask, degree): # filter out-of-money paths x_filtered = list(itertools.compress(x, itm_mask)) y_filtered = list(itertools.compress(y, itm_mask)) interpolator = ChebyshevInterpolator( chebyshev.chebfit(x_filtered, y_filtered, degree)) return interpolator
def get_coff(hull, degs): dist_degs = np.array(compute_dist(degs, hull)) dist_len = np.sqrt(dist_degs[:, 0]**2 + dist_degs[:, 1]**2) coefficient, Res = chebyshev.chebfit(degs, dist_len, 8, full=True) return coefficient
def polynomial_max_like(self): order = int(self.param["order"]) ratio = self.y_model / self.y errs = np.abs(self.y_err * self.y_model / self.y**2) coefs = chebfit(self.x, ratio, order, w=1. / errs) self.poly_coefs = np.array(coefs) self.model = chebval(self.x, coefs)
def main1(): days, fuel_mass, uglovi, snaga = pop_init() print(days) koeficijenti = chebfit(x_osa(broj_segmenata), uglovi.T, podaci.chebdeg).T # print(uglovi, snaga) pop_bit = float_to_bit(days, fuel_mass, koeficijenti, snaga) days2, fuel_mass2, uglovi2, snaga2 = bit_to_float(pop_bit) print(days2 - days)
def getCont(wave, flam, ss=21.): """Fits and returns continuum""" cont = maximum_filter(savgol_filter(flam, 5, 2, mode='interp'), ss) chebx = wave - wave.min() chebx *= 2. / chebx.max() chebx -= 1. contfit = chebfit(chebx, cont, 6) cont = chebval(chebx, contfit) return cont
def spline_interpolation(pointx, pointy, wave, wave_temp, flux, flux_temp, chebfitval, linewidth=2.0, endpoints='y', endpoint_order=4): """Sort spline points and interpolate between marked continuum points""" from numpy.polynomial import chebyshev from scipy.interpolate import splrep, splev # Insert endpoints if endpoints == "y" or endpoints == "t": sort_array = np.argsort(pointx) x = np.array(pointx)[sort_array] y = np.array(pointy)[sort_array] chebfit = chebyshev.chebfit(x, y, deg=endpoint_order) chebfitval = chebyshev.chebval(wave, chebfit) i = wave[150], wave[-150] window1, window2 = ((i[0] - 70) <= wave) & (wave <= (i[0] + 70)), ( (i[1] - 70) <= wave) & (wave <= (i[1] + 70)) y_val = np.median(chebfitval[window1]).astype(np.float64), np.median( chebfitval[window2]).astype(np.float64) pointx = np.concatenate([pointx, i]) pointy = np.concatenate([pointy, y_val]) ind_uni = f8(pointx) pointx = np.array(pointx)[ind_uni] pointy = np.array(pointy)[ind_uni] # Sort numerically # print(pointx) sort_array = np.argsort(pointx) # print(sort_array, pointx) # x, y = pointx[sort_array], pointy[sort_array] x = np.array(pointx)[sort_array] y = np.array(pointy)[sort_array] # Interpolate spline = splrep(x, y, k=3) def tap(s, x_temp): # offset wavelength array to ensure that wavelength does not become negative in the mirror diff = max(x_temp) - min(x_temp) k = np.tanh((x_temp - min(x_temp)) / (diff / 10.0)) return k if endpoints == "t": continuum = splev(wave, spline) * (tap(10, wave)) * (tap( 10, 2 * np.mean(wave) - wave)) + chebfitval * (1 - tap(10, wave)) + chebfitval * ( 1 - tap(10, 2 * np.mean(wave) - wave)) else: continuum = splev(wave, spline) return continuum
def cheby_newton_root(z, f, z0=None, degree=512): import numpy.polynomial.chebyshev as npcheb import scipy.optimize as scpop Lz = np.max(z) - np.min(z) if z0 is None: z0 = Lz / 2 def to_x(z, Lz): # convert back to [-1,1] return (2 / Lz) * z - 1 def to_z(x, Lz): # convert back from [-1,1] return (x + 1) * Lz / 2 logger.info("searching for roots starting from z={}".format(z0)) x = to_x(z, Lz) x0 = to_x(z0, Lz) cheb_coeffs = npcheb.chebfit(x, f, degree) cheb_interp = npcheb.Chebyshev(cheb_coeffs) cheb_der = npcheb.chebder(cheb_coeffs) def newton_func(x_newton): return npcheb.chebval(x_newton, cheb_coeffs) def newton_derivative_func(x_newton): return npcheb.chebval(x_newton, cheb_der) try: x_root = scpop.newton(newton_func, x0, fprime=newton_derivative_func, tol=1e-10) z_root = to_z(x_root, Lz) except: logger.info("error in root find") x_root = np.nan z_root = np.nan logger.info("newton: found root z={} (x0:{} -> {})".format( z_root, x0, x_root)) for x0 in x: print(x0, newton_func(x0)) a = Lz / 4 b = Lz * 3 / 4 logger.info("bisecting between z=[{},{}] (x=[{},{}])".format( a, b, to_x(a, Lz), to_x(b, Lz))) logger.info("f(a) = {} and f(b) = {}".format(newton_func(to_x(a, Lz)), newton_func(to_x(b, Lz)))) x_root_2 = scpop.bisect(newton_func, to_x(a, Lz), to_x(b, Lz)) z_root_2 = to_z(x_root_2, Lz) logger.info("bisect: found root z={} (x={})".format(z_root_2, x_root_2)) return z_root_2
def to_coefficients(self, f): from numpy.polynomial.chebyshev import chebfit import numpy as np # Convert infinite grid to xg = [-1, 1] xg = self.zg / np.sqrt(self.C ** 2 + self.zg ** 2) # Get coefficients for standard Chebyshev polynomials c, res = chebfit(xg, f, deg=self.N, full=True) return c
def spline_interpolation(pointx, pointy, wave, wave_temp, flux, flux_temp, axis, leg_instance, con_instance, linewidth= 2.0, endpoints = 'y', endpoint_order = 4): """Sort spline points and interpolate between marked continuum points""" from numpy.polynomial import chebyshev from scipy.interpolate import splrep,splev #Insert endpoints if endpoints == 'y': sort_array = np.argsort(pointx) x = np.array(pointx)[sort_array] y = np.array(pointy)[sort_array] chebfit = chebyshev.chebfit(x, y, deg = endpoint_order) chebfitval = chebyshev.chebval(wave, chebfit) i =wave[150], wave[-150] window1, window2 = ((i[0]-70)<=wave) & (wave<=(i[0]+70)), ((i[1]-70)<=wave) & (wave<=(i[1]+70)) y_val = np.median(chebfitval[window1]).astype(np.float64),np.median(chebfitval[window2]).astype(np.float64) pointx = np.concatenate([pointx,i]) pointy = np.concatenate([pointy,y_val]) ind_uni = f8(pointx) pointx = np.array(pointx)[ind_uni] pointy = np.array(pointy)[ind_uni] try: leg_instance.remove() except AttributeError: pass finally: leg_instance, = axis.plot(wave, chebfitval, color='black', lw = linewidth, label = 'legendre fit', zorder = 10) # print(pointx,pointy) #Sort numerically sort_array = np.argsort(pointx) x = np.array(pointx)[sort_array] y = np.array(pointy)[sort_array] from gen_methods import smooth #Interpolate spline = splrep(x,y, k=3) continuum = splev(wave,spline) continuum = smooth(continuum, window_len=1, window='hanning') #Plot try: con_instance.remove() except AttributeError: pass finally: con_instance, = axis.plot(wave,continuum, color='red', lw = linewidth, label = 'continuum', zorder = 10) return continuum, leg_instance, con_instance
def to_coefficients(self, f): from numpy.polynomial.chebyshev import chebfit import numpy as np # Convert grid to standard xg = [-1, 1] xg = (self.zg - self.zmin) / self.L * 2. - 1. # Get coefficients for standard Chebyshev polynomials c, res = chebfit(xg, f, deg=self.N, full=True) return c
def to_coefficients(self, f): """Convert from grid values to coefficients""" from numpy.polynomial.chebyshev import chebfit # Convert semi-infinite grid to xg = [-1, 1] xg = (self.zg - self.C) / (self.zg + self.C) # Get coefficients for standard Chebyshev polynomials c, res = chebfit(xg, f, deg=self.N, full=True) return c
def test_chebfit(self) : def f(x) : return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, ch.chebfit, [1], [1], -1) assert_raises(TypeError, ch.chebfit, [[1]], [1], 0) assert_raises(TypeError, ch.chebfit, [], [1], 0) assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0) assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0) assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0) # Test fit x = np.linspace(0,2) y = f(x) coef = ch.chebfit(x, y, 3) assert_equal(len(coef), 4) assert_almost_equal(ch.chebval(x, coef), y) coef = ch.chebfit(x, y, 4) assert_equal(len(coef), 5) assert_almost_equal(ch.chebval(x, coef), y) coef2d = ch.chebfit(x, np.array([y,y]).T, 4) assert_almost_equal(coef2d, np.array([coef,coef]).T)
def test_chebfit(self): def f(x): return x * (x - 1) * (x - 2) # Test exceptions assert_raises(ValueError, ch.chebfit, [1], [1], -1) assert_raises(TypeError, ch.chebfit, [[1]], [1], 0) assert_raises(TypeError, ch.chebfit, [], [1], 0) assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0) assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0) assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0) # Test fit x = np.linspace(0, 2) y = f(x) coef = ch.chebfit(x, y, 3) assert_equal(len(coef), 4) assert_almost_equal(ch.chebval(x, coef), y) coef = ch.chebfit(x, y, 4) assert_equal(len(coef), 5) assert_almost_equal(ch.chebval(x, coef), y) coef2d = ch.chebfit(x, np.array([y, y]).T, 4) assert_almost_equal(coef2d, np.array([coef, coef]).T)
def fit_range(R, T, calrange, redo_lims=True): # try binning in T to estimate noise for each point inds = np.where((T < calrange.upper) & (T > calrange.lower)) Z = np.log10(R[inds]) if redo_lims: ZL = np.min(Z) ZU = np.max(Z) else: ZL = calrange.ZL ZU = calrange.ZU x = ((Z - ZL) - (ZU - Z)) / (ZU - ZL) w = get_weights(x, T[inds]) return x, cheby.chebfit(x, T[inds], calrange.order, w=w)
def test_chebfit(self): def f(x): return x * (x - 1) * (x - 2) # Test exceptions assert_raises(ValueError, ch.chebfit, [1], [1], -1) assert_raises(TypeError, ch.chebfit, [[1]], [1], 0) assert_raises(TypeError, ch.chebfit, [], [1], 0) assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0) assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0) assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0) assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[1, 1]) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = ch.chebfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(ch.chebval(x, coef3), y) # coef4 = ch.chebfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(ch.chebval(x, coef4), y) # coef2d = ch.chebfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = ch.chebfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = ch.chebfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
def test_chebfit(self) : def f(x) : return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, cheb.chebfit, [1], [1], -1) assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) assert_raises(TypeError, cheb.chebfit, [], [1], 0) assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1,1]) # Test fit x = np.linspace(0,2) y = f(x) # coef3 = cheb.chebfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(cheb.chebval(x, coef3), y) # coef4 = cheb.chebfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) # coef2d = cheb.chebfit(x, np.array([y,y]).T, 3) assert_almost_equal(coef2d, np.array([coef3,coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = cheb.chebfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = cheb.chebfit(x, np.array([yw,yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
def find_func(x, yreal): import numpy as np from numpy.polynomial.chebyshev import chebfit, chebval f = np.polyfit(x, yreal, len(yreal)) c = chebfit(x, yreal, len(yreal)) ytest_poly = [round(np.polyval(f, xi), 3) for xi in x] ytest_cheb = [round(chebval(xi, c), 3) for xi in x] ytest = ytest_poly function_coeff = f return ytest, function_coeff
def main(): warnings.filterwarnings("error", category=RuntimeWarning) comm = MPI.COMM_WORLD rank = comm.Get_rank() n = comm.Get_size() pop_elita = None if rank == 0: days, fuel_mass, uglovi, snaga = pop_init() pop_fit = np.empty(broj_jedinki) koeficijenti = np.empty((broj_jedinki, podaci.chebdeg + 1)) for i in range(broj_gen): for j in range(broj_jedinki): proc_id = comm.recv(source=MPI.ANY_SOURCE, tag=1) comm.send( (j, days[j], fuel_mass[j], uglovi[j], snaga[j], y_max), dest=proc_id) waiting = n while waiting > 0: data = comm.recv(source=MPI.ANY_SOURCE, tag=2) (j, r_, _, _, min_dist_dest) = data pop_fit[j] = fitnes(min_dist_dest) koeficijenti[j] = chebfit(x_osa(broj_segmenata), uglovi[j], podaci.chebdeg) waiting = waiting - 1 pop_bit = float_to_bit(days[:, np.newaxis], fuel_mass, koeficijenti, snaga, pop_fit) # print(pop_bit) pop_bit_new, pop_elita = genetski_algoritam.genetski_algoritam( pop_bit, pop_elita, podaci.p_elit, podaci.p_mut) days, fuel_mass, uglovi, snaga = bit_to_float(pop_bit_new) for it in range(broj_jedinki): days[it] = days[it] % max_time_span if i % 10 == 0: np.savetxt('gen_' + str(i) + '.txt', pop_bit_new, fmt='%d') for i in range(1, n): proc_id = comm.recv(source=MPI.ANY_SOURCE) comm.send(-1, dest=proc_id) else: while True: comm.send(comm.Get_rank(), dest=0, tag=1) data = comm.recv(source=0) if data != -1: _r, _v, _step, min_dist_dest = simulacija_pogon.simulacija( data[1], data[2], data[3], data[4], data[5]) comm.send((data[0], _r, _v, _step, min_dist_dest), dest=0, tag=2) else: print('Proc ' + str(comm.Get_rank()) + ' logs out') break
def interpCby(self, x_in, t_in, deg=20): # print("t_in",t_in) # print("x_in",x_in) self.timeb = [t_in.min(), t_in.max()] self.chebdeg = deg x_in = np.transpose(x_in) t_fit = t_in - self.timeb[0] cheb_coef = cheby.chebfit(x=t_fit, y=x_in, deg=deg, full=False) # print(cheby.chebfit(x=t_in,y=x_in,deg=deg,full=True)) # print("cheb_coef",cheb_coef) # cheb_coef = cheb_coef[0] self.cheb = cheb_coef
def cheby_fit(contours, num_coefs): coefs, r_maxs = [], [] for contour in contours: contour = np.array(contour) theta = np.linspace(-1, 1, 360, endpoint=False) r = contour[:, 1] r_max = np.max(r) r = r / r_max coef, res = chebyshev.chebfit(theta, r, num_coefs, full=True) coefs.append(coef) r_maxs.append(r_max) coefs = np.array(coefs) r_maxs = np.array(r_maxs) cheby_coef = np.hstack((coefs, r_maxs[:, np.newaxis])) return cheby_coef
def continuum_fit(wl, flux, fluxerr, edge_mask_len=50): """ Small function to estimate continuum. Takes spectrum and uses only the edges, as specified by edge_mask_len, fits a polynomial and returns the fit. :param wl: Wavelenght array in which to estimate continuum :param flux: Corresponding flux array :param fluxerr: Corresponding array containing flux errors :param edge_mask_len: Size of edges to use to interpolate continuum :return: Interpolated continuum """ from numpy.polynomial import chebyshev wl_chebfit = np.hstack((wl[:edge_mask_len],wl[-edge_mask_len:])) mask_cont = np.array([i for i,n in enumerate(wl) if n in wl_chebfit]) chebfit = chebyshev.chebfit(wl_chebfit, flux[mask_cont], deg = 1, w=1/fluxerr[mask_cont]**2) chebfitval = chebyshev.chebval(wl, chebfit) return chebfitval, chebfit
def th_plot(data, tt): fields = 'concurence', 'iops_mediana', 'lat_mediana' conc_4k = filter_data('concurrence_test_' + tt, fields, blocksize='4k') filtered_data = sorted(list(conc_4k(data))) x, iops, lat = zip(*filtered_data) _, ax1 = plt.subplots() xnew = np.linspace(min(x), max(x), 50) # plt.plot(xnew, power_smooth, 'b-', label='iops') ax1.plot(x, iops, 'b*') for degree in (3,): c = chebfit(x, iops, degree) vals = chebval(xnew, c) ax1.plot(xnew, vals, 'g--')
def th_plot(data, tt): fields = 'concurence', 'iops_mediana', 'lat_mediana' conc_4k = filter_data('concurrence_test_' + tt, fields, blocksize='4k') filtered_data = sorted(list(conc_4k(data))) x, iops, lat = zip(*filtered_data) _, ax1 = plt.subplots() xnew = np.linspace(min(x), max(x), 50) # plt.plot(xnew, power_smooth, 'b-', label='iops') ax1.plot(x, iops, 'b*') for degree in (3, ): c = chebfit(x, iops, degree) vals = chebval(xnew, c) ax1.plot(xnew, vals, 'g--')
def chebForwardTransform(orders, locations, functionVals): if len(locations.shape) == 1: return np.array(cheb.chebfit(locations, functionVals, orders[0])) else: if locations.shape[1] == 2: V = cheb.chebvander2d(locations[:, 0], locations[:, 1], orders) elif locations.shape[1] == 3: V = cheb.chebvander3d(locations[:, 0], locations[:, 1], locations[:, 2], orders) elif locations.shape[1] == 4: V = chebVander4d(locations, orders) elif locations.shape[1] == 5: V = chebVander5d(locations, orders) else: raise NotImplementedError # there's a bad startup joke about this being good enough for the paper. ret, _, _, _ = npl.lstsq(V, functionVals, rcond=None) return np.reshape(ret, (np.array(orders) + 1).flatten())
def handle_A(A, fine, outname=None, standard=None, corrfile=None, Aoffset=None, radius=2, flat_corrections=None, nosky=False, lmin=650, lmax=700): '''Loads 2k x 2k IFU frame "A" and extracts spectra from the locations in "fine". Args: A (string): filename of ifu FITS file to extract from. fine (string): filename of NumPy file with locations + wavelength soln outname (string): filename to write results to Aoffset (2tuple): X (nm)/Y (pix) shift to apply for flexure correction radius (float): Extraction radius in arcsecond flat_corrections (list): A list of FlatCorrection objects for correcting the extraction nosky (Boolean): if True don't subtract sky, merely sum in aperture Returns: The extracted spectrum, a dictionary: {'ph_10m_nm': Flux in photon / 10 m / nanometer integrated 'nm': Wavelength solution in nm 'N_spax': Total number of spaxels that created ph_10m_nm 'skyph': Sky flux in photon / 10 m / nanometer / spaxel 'radius_as': Extraction radius in arcsec 'pos': X/Y extraction location of spectrum in arcsec} Raises: None ''' fine = np.load(fine) if outname is None: outname = "%s" % (A) if Aoffset is not None: ff = np.load(Aoffset) flexure_x_corr_nm = ff[0]['dXnm'] flexure_y_corr_pix = ff[0]['dYpix'] print "Dx %2.1f nm | Dy %2.1f px" % (ff[0]['dXnm'], ff[0]['dYpix']) else: flexure_x_corr_nm = 0 flexure_y_corr_pix = 0 if os.path.isfile(outname+".npy"): print "USING extractions in %s.npy!" % outname print "rm %s.npy # if you want to recreate extractions" % outname E, meta = np.load(outname+".npy") E_var, meta_var = np.load("var_" + outname + ".npy") else: print "\nCREATING extractions ..." spec = pf.open(A) adcspeed = spec[0].header["ADCSPEED"] if adcspeed == 2: read_var = 22*22 else: read_var = 5*5 var = addcon(A, str(read_var), "var_" + outname + ".fits") print "\nExtracting object spectra" E, meta = Wavelength.wavelength_extract(spec, fine, filename=outname, flexure_x_corr_nm=flexure_x_corr_nm, flexure_y_corr_pix=flexure_y_corr_pix, flat_corrections = flat_corrections) meta['airmass'] = spec[0].header['airmass'] header = {} for k,v in spec[0].header.iteritems(): try: header[k] = v except: pass meta['HA'] = spec[0].header['HA'] meta['Dec'] = spec[0].header['Dec'] meta['RA'] = spec[0].header['RA'] meta['PRLLTC'] = spec[0].header['PRLLTC'] meta['equinox'] = spec[0].header['Equinox'] meta['utc'] = spec[0].header['utc'] meta['header'] = header meta['exptime'] = spec[0].header['exptime'] np.save(outname, [E, meta]) print "\nExtracting variance spectra" E_var, meta_var = Wavelength.wavelength_extract(var, fine, filename=outname, flexure_x_corr_nm = flexure_x_corr_nm, flexure_y_corr_pix = flexure_y_corr_pix, flat_corrections=flat_corrections) np.save("var_" + outname, [E_var, meta_var]) object = meta['header']['OBJECT'].split()[0] sixA, posA, adcpos, radius_used = identify_spectra_gui(E, radius=radius, PRLLTC=Angle(meta['PRLLTC'], unit='deg'), lmin=lmin, lmax=lmax, object=object, airmass=meta['airmass']) to_image(E, meta, outname, posA=posA, adcpos=adcpos) if standard is None: kixA = identify_bgd_spectra(E, posA, inner=radius_used*1.1) else: kixA = identify_sky_spectra(E, posA, inner=radius_used*1.1) # get the mean spectrum over the selected spaxels resA = interp_spectra(E, sixA, outname=outname+".pdf", corrfile=corrfile) skyA = interp_spectra(E, kixA, outname=outname+"_sky.pdf", corrfile=corrfile) varA = interp_spectra(E_var, sixA, outname=outname+"_var.pdf", corrfile=corrfile) ## Plot out the X/Y positions of the selected spaxels XSA = [] YSA = [] XSK = [] YSK = [] for ix in sixA: XSA.append(E[ix].X_as) YSA.append(E[ix].Y_as) for ix in kixA: XSK.append(E[ix].X_as) YSK.append(E[ix].Y_as) pl.figure() pl.clf() pl.ylim(-30,30) pl.xlim(-30,30) pl.scatter(XSA,YSA, color='red', marker='H', linewidth=.1) pl.scatter(XSK,YSK, color='green', marker='H', linewidth=.1) pl.savefig("XYs_%s.pdf" % outname) pl.close() # / End Plot # Define our standard wavelength grid ll = Wavelength.fiducial_spectrum() # Resample sky onto standard wavelength grid sky_A = interp1d(skyA[0]['nm'], skyA[0]['ph_10m_nm'], bounds_error=False) sky = sky_A(ll) # Resample variance onto standard wavelength grid var_A = interp1d(varA[0]['nm'], varA[0]['ph_10m_nm'], bounds_error=False) varspec = var_A(ll) # Copy and resample object spectrum onto standard wavelength grid res = np.copy(resA) res = [{"doc": resA[0]["doc"], "ph_10m_nm": np.copy(resA[0]["ph_10m_nm"]), "spectra": np.copy(resA[0]["spectra"]), "coefficients": np.copy(resA[0]["coefficients"]), "nm": np.copy(resA[0]["ph_10m_nm"])}] res[0]['nm'] = np.copy(ll) f1 = interp1d(resA[0]['nm'], resA[0]['ph_10m_nm'], bounds_error=False) # Calculate airmass correction airmass = meta['airmass'] extCorr = 10**(Atm.ext(ll*10) * airmass/2.5) print "Median airmass corr: %.4f" % np.median(extCorr) # Calculate output corrected spectrum if nosky: # Account for airmass and aperture res[0]['ph_10m_nm'] = f1(ll) * extCorr * len(sixA) else: # Account for sky, airmass and aperture res[0]['ph_10m_nm'] = (f1(ll)-sky_A(ll)) * extCorr * len(sixA) # Process standard star objects if standard is not None: print "STANDARD" # Extract reference data wav = standard[:,0]/10.0 flux = standard[:,1] # Calculate/Interpolate correction onto object wavelengths fun = interp1d(wav, flux, bounds_error=False, fill_value = np.nan) correction0 = fun(res[0]['nm'])/res[0]['ph_10m_nm'] # Filter for resolution flxf = filters.gaussian_filter(flux,19.) # Calculate/Interpolate filtered correction fun = interp1d(wav, flxf, bounds_error=False, fill_value = np.nan) correction = fun(res[0]['nm'])/res[0]['ph_10m_nm'] # Use unfiltered for H-beta region ROI = (res[0]['nm'] > 470.) & (res[0]['nm'] < 600.) correction[ROI] = correction0[ROI] res[0]['std-correction'] = correction res[0]['std-maxnm'] = np.max(wav) res[0]['exptime'] = meta['exptime'] res[0]['Extinction Correction'] = 'Applied using Hayes & Latham' res[0]['extinction_corr'] = extCorr res[0]['skyph'] = sky * len(sixA) res[0]['skynm'] = ll res[0]['var'] = varspec res[0]['radius_as'] = radius_used res[0]['position'] = posA res[0]['N_spax'] = len(sixA) res[0]['meta'] = meta res[0]['object_spaxel_ids'] = sixA res[0]['sky_spaxel_ids'] = kixA res[0]['sky_spectra'] = skyA[0]['spectra'] coef = chebfit(np.arange(len(ll)), ll, 4) xs = np.arange(len(ll)+1) newll = chebval(xs, coef) res[0]['dlam'] = np.diff(newll) np.save("sp_" + outname, res) print "Wrote sp_"+outname+".npy"
start=0 for i, index in enumerate(index_ranges): #Where the telluric features are t_start=int(index[0][0]) t_end=int(index[1][0]) #Get the indices away from the telluric features end=t_start #Rescale lamda array to be between -1 and 1 l=np.array([2*(w-lamdas[start:end].min())/(lamdas[start:end].max()-lamdas[start:end].min())-1 for w in lamdas[start:end]]) polyfit=chebfit(l, spec[start:end], order) polynomial=chebval(l, polyfit) print("Fitting Polynomials from lamda={}A to lamda={}A".format(lamdas[start], lamdas[end])) fit_spec[start:end]=polynomial fit_spec_no_smooth[start:end]=polynomial fit_spec[start:start+pixels]=start_blend(start, pixels, fit_spec, spec) fit_spec[end-pixels:end]=end_blend(end, pixels, fit_spec, spec) #Update the start value for the next section of the spectrum to fit start=t_end #Do the last section of the fit s=index_ranges[-1][1][0] #Rescale the wavelength array to be between -1 and 1.
def wavelegth_fitter(wavelength_fits, cube, var, aperture_mask, lamdas, order, aperture_indices, plot=False, verbose=False): """ Fit polynomials in the wavelength direction of each pixel in an aperture. This makes the P(x,lamda) values used in Horne 1986 Returns a cube of model values, called mcube Wavelength_fits is an cube of un-normalised values, initially all 0s cube is the datacube in question var is the variance cube aperture mask is a 3D mask of True and False. Good values=True, Bad ones =False lamdas is an array of lamda values along the wavelength axis, formed from CRPIX,CRVAL,CDELT in the fits header order is the order of fitting aperture indices are indices of the cube corresponding to our circular aperture, in an Nx2 array. This is looped over. plot=True shows a plot of the polynomials (both before and after normalising spatially) verbose=True prints more to the terminal """ #Lamdas must be scaled to between -1 and 1 for Chebyshev fitting min_val=lamdas.min() max_val=lamdas.max() scaled_l=np.array([2*(lamda-min_val)/(max_val-min_val) -1 for lamda in lamdas]) for n, (i, j) in enumerate(zip(aperture_indices[0], aperture_indices[1])): """Take each spaxel and fit a polynomial along the wavelength direction, each pixel weighted by one over its variance""" if verbose==True: print("Fitting Pixel {}".format(n)) weights=1.0/var[:, i, j] #Fit the Chebyshev coefficients coefficients=chebfit(scaled_l, cube[:, i, j], order, w=weights) #Make a polynomial from these polynomial=chebval(scaled_l, coefficients) #Ensure all values are positve polynomial[polynomial<0]=0 wavelength_fits[:, i, j]=polynomial if plot==True: for i, j in zip(aperture_indices[0], aperture_indices[1]): plt.plot(lamdas, wavelength_fits[:, :]) plt.show() if verbose==True: print("Normalising Spatially") #Normalise spatially spatial_norm=np.sum(np.sum(wavelength_fits, axis=2), axis=1) mcube=wavelength_fits/spatial_norm[:, np.newaxis, np.newaxis] if plot==True: for i, j in zip(aperture_indices[0], aperture_indices[1]): plt.plot(lamdas, mcube[:,i, j]) plt.show() return mcube
def handle_AB(A, B, fine, outname=None, corrfile=None, Aoffset=None, Boffset=None, radius=2, flat_corrections=None, nosky=False, lmin=650, lmax=700): '''Loads 2k x 2k IFU frame "A" and "B" and extracts A-B and A+B spectra from the "fine" location. Args: A (string): filename of ifu FITS file to extract from. B (string): filename of ifu FITS file to extract from. fine (string): filename of NumPy file with locations + wavelength soln outname (string): filename to write results to Aoffset (2tuple): X (nm)/Y (pix) shift to apply for flexure correction Boffset (2tuple): X (nm)/Y (pix) shift to apply for flexure correction radius (float): Extraction radius in arcsecond flat_corrections (list): A list of FlatCorrection objects for correcting the extraction nosky (Boolean): if True don't subtract sky, merely sum in aperture Returns: The extracted spectrum, a dictionary: {'ph_10m_nm': Flux in photon / 10 m / nanometer integrated 'var' 'nm': Wavelength solution in nm 'N_spaxA': Total number of "A" spaxels 'N_spaxB': Total number of "B" spaxels 'skyph': Sky flux in photon / 10 m / nanometer / spaxel 'radius_as': Extraction radius in arcsec 'pos': X/Y extraction location of spectrum in arcsec} Raises: None ''' fine = np.load(fine) if outname is None: outname = "%sm%s" % (A,B) if Aoffset is not None: ff = np.load(Aoffset) flexure_x_corr_nm = ff[0]['dXnm'] flexure_y_corr_pix = -ff[0]['dYpix'] print "Dx %2.1f | Dy %2.1f" % (ff[0]['dXnm'], ff[0]['dYpix']) else: flexure_x_corr_nm = 0 flexure_y_corr_pix = 0 read_var = 5*5 if os.path.isfile(outname + ".fits.npy"): print "USING extractions in %s!" % outname E, meta = np.load(outname + ".fits.npy") E_var, meta_var = np.load("var_" + outname + ".fits.npy") else: if not outname.endswith(".fits"): outname = outname + ".fits" diff = subtract(A,B, outname) add(A,B, "tmpvar_" + outname) adcspeed = diff[0].header["ADCSPEED"] if adcspeed == 2: read_var = 22*22 else: read_var = 5*5 var = add("tmpvar_" + outname, str(read_var), "var_" + outname) os.remove("tmpvar_" + outname + ".gz") E, meta = Wavelength.wavelength_extract(diff, fine, filename=outname, flexure_x_corr_nm = flexure_x_corr_nm, flexure_y_corr_pix = flexure_y_corr_pix, flat_corrections=flat_corrections) meta['airmass1'] = diff[0].header['airmass1'] meta['airmass2'] = diff[0].header['airmass2'] meta['airmass'] = diff[0].header['airmass'] header = {} for k,v in diff[0].header.iteritems(): try: header[k] = v except: pass meta['HA'] = diff[0].header['HA'] meta['Dec'] = diff[0].header['Dec'] meta['RA'] = diff[0].header['RA'] meta['PRLLTC'] = diff[0].header['PRLLTC'] meta['equinox'] = diff[0].header['Equinox'] meta['utc'] = diff[0].header['utc'] meta['header'] = header meta['exptime'] = diff[0].header['exptime'] np.save(outname, [E, meta]) exfile = "extracted_var_%s.npy" % outname E_var, meta_var = Wavelength.wavelength_extract(var, fine, filename=outname, flexure_x_corr_nm = flexure_x_corr_nm, flexure_y_corr_pix = flexure_y_corr_pix, flat_corrections=flat_corrections) np.save("var_" + outname, [E_var, meta_var]) sixA, posA, all_A = identify_spectra_gui(E, radius=radius, PRLLTC=Angle(meta['PRLLTC'], unit='deg'), lmin=lmin, lmax=lmax, airmass=meta['airmass']) sixB, posB, all_B = identify_spectra_gui(E, radius=radius, PRLLTC=Angle(meta['PRLLTC'], unit='deg'), lmin=lmin, lmax=lmax, airmass=meta['airmass']) to_image(E, meta, outname, posA=posA, posB=posB, adcpos=all_A) skyA = identify_bgd_spectra(E, posA) skyB = identify_bgd_spectra(E, posB) allix = np.concatenate([sixA, sixB]) resA = interp_spectra(E, sixA, sign=1, outname=outname+"_A.pdf", corrfile=corrfile) resB = interp_spectra(E, sixB, sign=-1, outname=outname+"_B.pdf", corrfile=corrfile) skyA = interp_spectra(E, skyA, sign=1, outname=outname+"_skyA.pdf", corrfile=corrfile) skyB = interp_spectra(E, skyB, sign=-1, outname=outname+"_skYB.pdf", corrfile=corrfile) varA = interp_spectra(E_var, sixA, sign=1, outname=outname+"_A_var.pdf", corrfile=corrfile) varB = interp_spectra(E_var, sixB, sign=1, outname=outname+"_B_var.pdf", corrfile=corrfile) ## Plot out the X/Y selected spectra XSA = [] YSA = [] XSB = [] YSB = [] for ix in sixA: XSA.append(E[ix].X_as) YSA.append(E[ix].Y_as) for ix in sixB: XSB.append(E[ix].X_as) YSB.append(E[ix].Y_as) pl.figure() pl.clf() pl.ylim(-30,30) pl.xlim(-30,30) pl.scatter(XSA,YSA, color='blue', marker='H', linewidth=.1) pl.scatter(XSB,YSB, color='red', marker='H', linewidth=.1) pl.savefig("XYs_%s.pdf" % outname) pl.close() # / End Plot np.save("sp_A_" + outname, resA) np.save("sp_B_" + outname, resB) np.save("var_A_" + outname, varA) np.save("var_B_" + outname, varB) ll = Wavelength.fiducial_spectrum() sky_A = interp1d(skyA[0]['nm'], skyA[0]['ph_10m_nm'], bounds_error=False) sky_B = interp1d(skyB[0]['nm'], skyB[0]['ph_10m_nm'], bounds_error=False) sky = np.nanmean([sky_A(ll), sky_B(ll)], axis=0) var_A = interp1d(varA[0]['nm'], varA[0]['ph_10m_nm'], bounds_error=False) var_B = interp1d(varB[0]['nm'], varB[0]['ph_10m_nm'], bounds_error=False) varspec = np.nanmean([var_A(ll), var_B(ll)], axis=0) * (len(sixA) + len(sixB)) res = np.copy(resA) res = [{"doc": resA[0]["doc"], "ph_10m_nm": np.copy(resA[0]["ph_10m_nm"]), "nm": np.copy(resA[0]["ph_10m_nm"])}] res[0]['nm'] = np.copy(ll) f1 = interp1d(resA[0]['nm'], resA[0]['ph_10m_nm'], bounds_error=False) f2 = interp1d(resB[0]['nm'], resB[0]['ph_10m_nm'], bounds_error=False) airmassA = meta['airmass1'] airmassB = meta['airmass2'] extCorrA = 10**(Atm.ext(ll*10)*airmassA/2.5) extCorrB = 10**(Atm.ext(ll*10)*airmassB/2.5) print "Median airmass corr: ", np.median(extCorrA), np.median(extCorrB) # If requested merely sum in aperture, otherwise subtract sky if nosky: res[0]['ph_10m_nm'] = \ np.nansum([ f1(ll) * extCorrA, f2(ll) * extCorrB], axis=0) * \ (len(sixA) + len(sixB)) else: res[0]['ph_10m_nm'] = \ np.nansum([ (f1(ll)-sky_A(ll)) * extCorrA, (f2(ll)-sky_B(ll)) * extCorrB], axis=0) * \ (len(sixA) + len(sixB)) res[0]['exptime'] = meta['exptime'] res[0]['Extinction Correction'] = 'Applied using Hayes & Latham' res[0]['extinction_corr_A'] = extCorrA res[0]['extinction_corr_B'] = extCorrB res[0]['skyph'] = sky res[0]['var'] = varspec res[0]['radius_as'] = radius res[0]['positionA'] = posA res[0]['positionB'] = posA res[0]['N_spaxA'] = len(sixA) res[0]['N_spaxB'] = len(sixB) res[0]['meta'] = meta res[0]['object_spaxel_ids_A'] = sixA res[0]['sky_spaxel_ids_A'] = skyA res[0]['object_spaxel_ids_B'] = sixB res[0]['sky_spaxel_ids_B'] = skyB coef = chebfit(np.arange(len(ll)), ll, 4) xs = np.arange(len(ll)+1) newll = chebval(xs, coef) res[0]['dlam'] = np.diff(newll) np.save("sp_" + outname, res)
def main(): M = 3 * pow(2, 5) N = 15 # good values a1 = np.pi / 8. a2 = np.pi / 4. alphas = np.array([i / float(M) for i in range(1, M + 3)]) r = np.zeros((len(alphas), 2)) err = np.zeros((len(alphas), 2)) A1 = np.linspace(0, a1, 50) T1 = getTheta(A1) Phitrue1 = phi(T1) A2 = np.linspace(a1, a2, 50) T2 = getTheta(A2) Phitrue2 = phi(T2) x = getChebNodes(N) for k in range(len(alphas)): p = alphas[k] print x #ax1 = [np.pi/8.*pow((xi+1.)/2,1./p) for xi in x] ax1 = fA(x, p, 0, a1) ax2 = fA(x, p, a2, a1) #ax2 = [np.pi/8.+np.pi/8.*pow((xi+1.)/2,1./p) for xi in x] theta1 = getTheta(ax1) theta2 = getTheta(ax2) # print "ax2" #print (ax2-1./8.*(theta2-np.sin(theta2))) # print "Theta" # print theta #print (1./8*(theta-np.sin(theta))-ax) phi1 = phi(theta1) phi2 = phi(theta2) fp1 = cheb.chebfit(x, phi1, N) fp2 = cheb.chebfit(x, phi2, N) print "Chebyshev Coeffs" print fp2 r[k, 0] = abs(fp1[-1]) r[k, 1] = abs(fp2[-1]) err[k, 0] = np.linalg.norm( Phitrue1 - cheb.chebval(fX(A1, p, 0, a1), fp1)) err[k, 1] = np.linalg.norm( Phitrue2 - cheb.chebval(fX(A2, p, a2, a1), fp2)) print "Nth coefficient" print r print "Error" print err # print alphas p1 = 1. / 3. p2 = 5. / 12. ax1 = fA(x, p1, 0., a1) ax2 = fA(x, p2, a2, a1) theta1 = getTheta(ax1) theta2 = getTheta(ax2) phi1 = phi(theta1) phi2 = phi(theta2) fp1 = cheb.chebfit(x, phi1, N) fp2 = cheb.chebfit(x, phi2, N) print fp1 plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.subplots_adjust(hspace=0.4) plt.subplot(211) plt.semilogy(alphas, r) plt.legend([r"$\phi_1(A)$", r"$\phi_2(A)$"]) xlab = ['1/6', '1/4', '1/3', '5/12', '1/2', '2/3', '3/4', '5/6', '1'] ticks = [1. / 6., 1. / 4., 1. / 3., 5. / 12., 1. / 2., 2. / 3., 3. / 4., 5. / 6., 1.] plt.xticks(ticks, xlab) plt.grid(True) plt.title( r'$\phi(x) \approx \tilde{\phi}(x) =\sum_k a_k T_k(cx^{\alpha}-1)$') plt.ylabel('coefficient decay') # plt.ylabel(r'a_N') plt.xlabel(r'$\alpha$') plt.subplot(212) plt.xlabel(r'$\alpha$') # plt.ylabel(r'$\phi_i(x)-\phi_i^N(x)$') plt.semilogy(alphas, err) plt.xticks(ticks, xlab) plt.grid(True) # plt.title('Error') plt.ylabel('Error') plt.legend([r"$\phi_1(A)$", r"$\phi_2(A)$"]) f = open("blah.txt", "w") MM = 200 t = np.linspace(0, 2 * np.pi, MM) aa = 1. / 8. * (t - np.sin(t)) phit = phi(t) phit1 = cheb.chebval(fX(aa[0:MM / 2], p1, 0., a1), fp1) phit2 = cheb.chebval(fX(aa[MM / 2:], p2, a2, a1), fp2) for k in range(len(t)): if k < MM / 2: ha = phit1[k] else: ha = phit2[k - MM / 2] f.write("%s %s %s\n" % (aa[k], phit[k] - ha, ha)) f.close() plt.show() print phi([2 * np.pi])
def chebfit(x, y, deg, rcond=None, full=False, w=None): from numpy.polynomial.chebyshev import chebfit return chebfit(x, y, deg, rcond, full, w)
# Get sorted element indices for x-axis variable. It is the requirement of the # function being used for Spline Fitting that the X be in an increasing order. # But the way the rotation curve was extracted was to stepping away from the centre # to the outside and then back to centre and the other side. Hence the need for # sorting the same. sortind = np.argsort( rotcurve["Row"] ) fig1 = plt.figure(1) xs = np.linspace( np.min(rotcurve["Row"]), np.max(rotcurve["Row"]), 1000) residuals = [] change=0.0 for i in range(15): interp_coeff = chebfit( rotcurve["Row"], rotcurve["lambda"], w=1/rotcurve["lambda_err"], deg=i) interp_coeff_z = chebfit( rotcurve["Row"], rotcurve["z"], w=1/rotcurve["z_err"], deg=i) # Below, we are just generating a sample curve for the user to visualize the fit. ys = chebval(xs, interp_coeff) # Residual calculation in redshift space. predicted = chebval( rotcurve["Row"], interp_coeff_z ) rms = np.sqrt( np.sum( (predicted - rotcurve["z"])**2 ) / len(predicted) ) print rms residuals.append(rms) # Now, present the plot to the user. plt.errorbar( rotcurve["Row"], rotcurve["lambda"], rotcurve["lambda_err"], fmt="o" ) plt.plot(xs, ys) plt.xlabel("Row Number", fontsize=18)
def main(): pi = np.pi N = 20 a1 = pi / 8. a2 = pi / 4. p1 = 1. / 3. p2 = 5. / 12. x = getChebNodes(N) ax1 = fA(x, p1, 0., a1) ax2 = fA(x, p2, a2, a1) theta1 = getTheta(ax1) theta2 = getTheta(ax2) phi1 = phi(theta1) phi2 = phi(theta2) fp1 = cheb.chebfit(x, phi1, N) fp2 = cheb.chebfit(x, phi2, N) # print fp1 # print fp2 print "Disagreement at pi/8 is %.15f" % (cheb.chebval(1, fp1) - cheb.chebval(1, fp2)) phim1 = cheb.chebval(1, fp2) phim2 = cheb.chebval(-1, fp2) # print phim1; # print phim2; K = 4 M = 3 * pow(2, K) M = 60 K = 5 alphas = np.array([i / float(M) for i in range(2 * K, M + 3)]) Atrue1 = np.linspace(0, pi / 8., 200) Atrue2 = np.linspace(pi / 8., pi / 4., 200) phis1 = cheb.chebval(fX(Atrue1, p1, 0, a1), fp1) phis2 = cheb.chebval(fX(Atrue2, p2, a2, a1), fp2) #alphas = [1./3.] # print cheb.chebval(fX(0,p1,0,a1),fp1) # print cheb.chebval(fX(pi/8,p1,0,a1),fp1)-phim1 r = np.zeros((len(alphas), 2)) err = np.zeros((len(alphas), 2)) for k in range(len(alphas)): p = alphas[k] sx1 = fA(x, p, 0, phim1) sx2 = fA(x, p, phim2, phim1) A1 = np.zeros(len(sx1)) A2 = np.zeros(len(sx1)) # print "p = %f" %p # print sx1 # print sx2 for i in range(1, len(sx1) - 1): A1[i] = optimize.ridder( lambda x: -cheb.chebval(fX(x, p1, 0, a1), fp1) + sx1[i], 0, pi / 8. + .1) A1[-1] = pi / 8. A2 = [optimize.ridder(lambda x: cheb.chebval( fX(x, p2, a2, a1), fp2) - sxi, pi / 8., pi / 4.) for sxi in sx2] # print A1 # print A2 fa1 = cheb.chebfit(x, A1, N) fa2 = cheb.chebfit(x, A2, N) # print fa1 # print fa2 r[k, 0] = abs(fa1[-1]) r[k, 1] = abs(fa2[-1]) err[k, 0] = np.linalg.norm( Atrue1 - cheb.chebval(fX(phis1, p, 0, phim1), fa1)) err[k, 1] = np.linalg.norm( Atrue2 - cheb.chebval(fX(phis2, p, phim2, phim1), fa2)) print "alpha r1 r2 err1 err2" for j in range(len(r)): print " %f %e %e %e %e" % (alphas[j], r[j, 0], r[j, 1], err[j, 0], err[j, 1]) plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.subplots_adjust(hspace=0.4) plt.subplot(211) plt.semilogy(alphas, r) plt.legend([r"$\phi^{-1}_1$", r"$\phi^{-1}_2$"]) xlab = ['1/6', '1/4', '1/3', '2/5', '1/2', '3/5', '2/3', '3/4', '5/6', '1'] ticks = [1. / 6., 1. / 4., 1. / 3., 2. / 5., 1. / 2., 3. / 5., 2. / 3., 3. / 4., 5. / 6., 1.] plt.xticks(ticks, xlab) plt.grid(True) plt.title(r'$\phi^{-1}(x) \approx \sum_k a_k T_k(cx^{\alpha}-1)$') # plt.ylabel(r'a_N') plt.ylabel('coefficient decay') plt.xlabel(r'$\alpha$') plt.subplot(212) plt.xlabel(r'$\alpha$') plt.ylabel('Error') # plt.ylabel(r'$\phi^{-1}_i(x)-\tilde{\phi}_i^{-1}(x)$') plt.semilogy(alphas, err) plt.xticks(ticks, xlab) plt.grid(True) plt.title('Error') plt.legend([r"$\phi^{-1}_1$", r"$\phi^{-1}_2$"]) # plt.show() p3 = 1. p4 = 3. / 5 sx1 = fA(x, p3, 0, phim1) sx2 = fA(x, p4, phim2, phim1) A1 = np.zeros(len(sx1)) A2 = np.zeros(len(sx1)) # print sx1 # print sx2 for i in range(1, len(sx1) - 1): A1[i] = optimize.ridder( lambda x: -cheb.chebval(fX(x, p1, 0, a1), fp1) + sx1[i], 0, pi / 8. + .1) A1[-1] = pi / 8. A2 = [optimize.ridder(lambda x: cheb.chebval( fX(x, p2, a2, a1), fp2) - sxi, pi / 8., pi / 4.) for sxi in sx2] # print A1 # print A2 fa1 = cheb.chebfit(x, A1, N) fa2 = cheb.chebfit(x, A2, N) # print fa1 # print fa2 print "Power for phi1 is %f" % p1 print "Power for phi2 is %f" % p2 print "Power for phi1^(-1) is %f" % p3 print "Power for phi2^(-1) is %f" % p4 fnames = ["phiofA1.txt", "phiofA2.txt", "Aofphi1.txt", "Aofphi2.txt"] coeffs = np.zeros((N + 1, 4)) coeffs[:, 0] = fp1 coeffs[:, 1] = fp2 coeffs[:, 2] = fa1 coeffs[:, 3] = fa2 for i in range(4): f = open(fnames[i], "w") for j in range(len(fa1)): f.write("%.16f, " % (coeffs[j, i])) f.close() for i in range(N + 1): print "%.16f, " % x[i]
# For spline measurement s_bot, s_cnt, s_fwhm, s_as12, s_fw13, s_as13, s_fw23, s_as23, s_err, s_ew, s_cont = np.arange(0, 11) reg = __import__("6405") flat1 = list(range(169, 184)) flat2 = list(range(913, 932)) flat3 = list(range(979, 995)) flat4 = list(range(1147, 1239)) flt1 = reg.qu1m[flat1] / reg.qu1m[flat1].mean() flt2 = reg.qu1m[flat2] / reg.qu1m[flat2].mean() flt3 = reg.qu1m[flat3] / reg.qu1m[flat3].mean() flt4 = reg.qu1m[flat4] / reg.qu1m[flat4].mean() flt = np.hstack((flt1, flt2, flt3, flt4)) pos = np.arange(0, len(flt)) chb = ch.chebfit(pos, flt, 5) # Use for normalization later bak1 = reg.qu1[:, flat1] / reg.qu1[:, flat1].mean(axis=1).reshape(-1, 1) bak2 = reg.qu1[:, flat2] / reg.qu1[:, flat2].mean(axis=1).reshape(-1, 1) bak3 = reg.qu1[:, flat3] / reg.qu1[:, flat3].mean(axis=1).reshape(-1, 1) bak4 = reg.qu1[:, flat4] / reg.qu1[:, flat4].mean(axis=1).reshape(-1, 1) bak = np.hstack((bak1, bak2, bak3, bak4)) / ch.chebval(pos, chb).T width = 1420 # Magic number found by trial bak = bak.reshape(-1, width) blmbd = reg.qu1.lmbd[:width] bmet = reg.qu1.meta bmet.lmbd = blmbd bmet.ref = None bmet.cont = (0, np.ones(bak.shape[0]))
def chebyshev_fit(fit_me, xi, xf, weights,order=7, *args, **kwargs): chebfunc = chebyshev.chebfit(xi,fit_me,deg=order,w=weights) fitted = chebyshev.chebval(xf,chebfunc) residual = chebyshev.chebval(xi, chebfunc) - fit_me return (fitted, residual)
def _fit_builtin(self,func,N): """ Return the chebyshev coefficients using the builtin chebfit """ pts = cheb.chebpts2(N) y = func(pts) coeffs = cheb.chebfit(pts,y,N) return coeffs
def test_chebfit(self): def f(x): return x*(x - 1)*(x - 2) def f2(x): return x**4 + x**2 + 1 # Test exceptions assert_raises(ValueError, cheb.chebfit, [1], [1], -1) assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) assert_raises(TypeError, cheb.chebfit, [], [1], 0) assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) assert_raises(TypeError, cheb.chebfit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = cheb.chebfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(cheb.chebval(x, coef3), y) coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(cheb.chebval(x, coef3), y) # coef4 = cheb.chebfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) # check things still work if deg is not in strict increasing coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) # coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = cheb.chebfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) # test fitting only even polynomials x = np.linspace(-1, 1) y = f2(x) coef1 = cheb.chebfit(x, y, 4) assert_almost_equal(cheb.chebval(x, coef1), y) coef2 = cheb.chebfit(x, y, [0, 2, 4]) assert_almost_equal(cheb.chebval(x, coef2), y) assert_almost_equal(coef1, coef2)
def test_chebfit(self) : def f(x) : return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, cheb.chebfit, [1], [1], -1) assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) assert_raises(TypeError, cheb.chebfit, [], [1], 0) assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1,1]) # Test fit x = np.linspace(0,2) y = f(x) # coef3 = cheb.chebfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(cheb.chebval(x, coef3), y) # coef4 = cheb.chebfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) # coef2d = cheb.chebfit(x, np.array([y,y]).T, 3) assert_almost_equal(coef2d, np.array([coef3,coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = cheb.chebfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = cheb.chebfit(x, np.array([yw,yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) #test NA y = f(x) y[10] = 100 xm = x.view(maskna=1) xm[10] = np.NA res = cheb.chebfit(xm, y, 3) assert_almost_equal(res, coef3) ym = y.view(maskna=1) ym[10] = np.NA res = cheb.chebfit(x, ym, 3) assert_almost_equal(res, coef3) y2 = np.vstack((y,y)).T y2[10,0] = 100 y2[15,1] = 100 y2m = y2.view(maskna=1) y2m[10,0] = np.NA y2m[15,1] = np.NA res = cheb.chebfit(x, y2m, 3).T assert_almost_equal(res[0], coef3) assert_almost_equal(res[1], coef3) wm = np.ones_like(x, maskna=1) wm[10] = np.NA res = cheb.chebfit(x, y, 3, w=wm) assert_almost_equal(res, coef3)
def fit_continuum(self, deg): inds=np.where(self.mask !=0 ) x_to_fit=self.xdata[inds] y_to_fit=self.ydata[inds] self.cont_fit=chebyshev.chebval(self.xdata, chebyshev.chebfit(x_to_fit,y_to_fit,deg))
def approximate_curve(x, y, xnew, curved_coef): """returns ynew - y values of some curve approximation""" if no_numpy: return None return chebval(xnew, chebfit(x, y, curved_coef))
def approximate_curve(x, y, xnew, curved_coef): """returns ynew - y values of some curve approximation""" if no_numpy: raise ValueError("No numpy found") return chebval(xnew, chebfit(x, y, curved_coef))
def GetChebyshevCoeffs(data, pvals, order=5): pmid = pvals[pvals.size / 2] prange = pvals[-1] - pvals[0] nvals = (pvals - pmid) / (prange / 2.0) return chebyshev.chebfit(nvals, data.x, order)
raise RuntimeError("%d rows is not a multiple of 64" % nrows) num_spectra = nrows/64 use_spectra = user_input("Select spectra",range(num_spectra)).split(',') avg = zeros(64) for spectrum in use_spectra: base_index = int(spectrum)*64 for index in range(64): avg[index] += data[base_index+index,1] averages = avg/num_spectra # normalize the spectrum mean = averages.mean() averages /= mean # fit the normalized spectrum freqs = data[:64,0] - data[:64,0].mean() coefs = chebfit(freqs,averages,11) model = chebval(freqs,coefs) freqstep = freqs[1]-freqs[0] bandwidth = freqs[-1]-freqs[0]+freqstep coeffile = open("baseline_coefs.pkl","rb") try: coef_dict = load(coeffile) print "Loaded:",coef_dict.keys() except EOFError: print "Empty coefficients file" coeffile.close() try: coef_dict[bandwidth] = coefs except NameError: coef_dict = {bandwidth: coefs}