def match(self): r""" return a match filter Returns ------- Notes ----- if $$H(z)=\\frac{1+b_1z^1+...+b_Nz^{-N}}{1+a_1z^1+...+a_Mz^{-M}}$$ it returns $$H_m(z)=\\frac{b_N+b_{N-1}z^1+...+z^{-N}}{1+a_1z^1+...+a_Mz^{-M}}$$ """ if self.fir: MF = DF() MF.b = np.conj(self.b[::-1]) MF.a = self.a MF.z = np.poly1d(MF.b).r MF.p = np.poly1d(MF.a).r return(MF)
def __mul__(self, df): """ extract and stack poles and zeros TODO : handling simplification """ b1 = self.b a1 = self.a b2 = df.b a2 = df.a pb1 = np.poly1d(b1) pa1 = np.poly1d(a1) pb2 = np.poly1d(b2) pa2 = np.poly1d(a2) rpb1 = pb1.r rpb2 = pb2.r rpa1 = pa1.r rpa2 = pa2.r F = DF() F.p = np.hstack((rpa1, rpa2)) F.z = np.hstack((rpb1, rpb2)) F.simplify() return F
def showExamplePolyFit(xs,ys,fitDegree1 = 1,fitDegree2 = 2): pylab.figure() pylab.plot(xs,ys,'r.',ms=2.0,label = "measured") # poly fit to noise coeeff = numpy.polyfit(xs, ys, fitDegree1) # Predict the curve pys = numpy.polyval(numpy.poly1d(coeeff), xs) se = mse(ys, pys) r2 = rSquared(ys, pys) pylab.plot(xs,pys, 'g--', lw=5,label="%d degree fit, SE = %0.10f, R2 = %0.10f" %(fitDegree1,se,r2)) # Poly fit to noise coeeffs = numpy.polyfit(xs, ys, fitDegree2) # Predict the curve pys = numpy.polyval(numpy.poly1d(coeeffs), xs) se = mse(ys, pys) r2 = rSquared(ys, pys) pylab.plot(xs,pys, 'b--', lw=5,label="%d degree fit, SE = %0.10f, R2 = %0.10f" %(fitDegree2,se,r2)) pylab.legend()
def calFinished(self,items): ADC,DAC,correct = items CHAN = self.I.DAC.CHANS[DAC] X= np.linspace(CHAN.range[0],CHAN.range[1],4096) fitvals = np.polyfit(X,correct,3) fitfn = np.poly1d(fitvals) DIFF = (fitfn(X)-correct) intercept = DIFF.min() slope = (DIFF.max()-DIFF.min())/255. OFF = np.int16((( DIFF-intercept)/slope)) # compress the errors into an unsigned BYTE each print (min(OFF),max(OFF),len(OFF)) self.p1.setData(X,correct-X) self.DACPLOT.enableAutoRange(axis = self.DACPLOT.plotItem.vb.YAxis) reply = QtGui.QMessageBox.question(self, 'Cross Check','Does the plot look okay? proceed with writing to flash?', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.No: return False self.DAC_CALS[DAC]=struct.pack('6f',slope,intercept,fitvals[0],fitvals[1],fitvals[2],fitvals[3]) self.DAC_RELOADS[DAC] = OFF print( '\n','>'*20,DAC,'<'*20) print('Offsets :',OFF[:20],'...') fitfn = np.poly1d(fitvals) YDATA = fitfn(X) - (OFF*slope+intercept) LOOKBEHIND = 100;LOOKAHEAD=100 OFF=np.array([np.argmin(np.fabs(YDATA[max(B-LOOKBEHIND,0):min(4095,B+LOOKAHEAD)]-X[B]) )- (B-max(B-LOOKBEHIND,0)) for B in range(0,4096)]) CHAN.load_calibration_table(OFF) self.tabs.setEnabled(True) self.__PVCH__(DAC,ADC,self.curdacrow,[CHAN.CodeToV(100),CHAN.CodeToV(4000),200]) #Check if fixed
def var_fit(var_mean,var_name): size_list = [] throuput_list = [] for var in var_mean: get_per,set_per = normal_ops get_per_s = "%.2f" % get_per set_per_s = "%.2f" % set_per if (var_name=='key'): upperlimit = 64 filename = 'data_collected/'+str(var)+'_'+str(normal_value)+'_'+str(normal_hash)+'_'+get_per_s+'_'+set_per_s+'.out' elif (var_name=='value'): upperlimit = 2048 filename = 'data_collected/'+str(normal_key)+'_'+str(var)+'_'+str(normal_hash)+'_'+get_per_s+'_'+set_per_s+'.out' with open(filename,'r') as f: load, time = zip(*[(int(line.strip().split(',')[0]), float(line.strip().split(',')[1])) for line in f]) z = np.polyfit(time,load,2) p = np.poly1d(z) size = var throuput = p(1) size_list.append(size) throuput_list.append(throuput) #Record raw data point with open ('data_collected/'+var_name+'_throuput','w') as g: for size,throuput in zip(size_list,throuput_list): g.write(str(size)+','+str(throuput)+'\n') #Recide fit data point z = np.polyfit(np.array(size_list),np.array(throuput_list),1) p = np.poly1d(z) size_fit_list = [i for i in range(1,upperlimit)] throuput_fit_list = [p(i) for i in size_fit_list] with open ('data_collected/'+var_name+'_throuput_fit','w') as g: for size,throuput in zip(np.array(size_fit_list),np.array(throuput_fit_list)): g.write(str(size)+','+str(throuput)+'\n') var_plot(var_name,list(z))
def testPage90(self): # from page 90 of Crandall and Pomerance f = poly1d([7, 0, 1, 0,0,0,0,0,0, 7, 0, 1]) g = poly1d([-7,0, -1,0,0,7, 0, 1]) p = 13 d = poly_prime_gcd(f,g,p) h = poly1d([1,0,2]) self.assertEqual(h,d) p = 7 d = poly_prime_gcd(f,g,p) h = poly1d([1]) self.assertEqual(h,d) p = 2 d = poly_prime_gcd(f,g,p) h = poly1d([1,1,1,1]) self.assertEqual(h,d)
def test_polydiv(self): b = np.poly1d([2, 6, 6, 1]) a = np.poly1d([-1j, (1+2j), -(2+1j), 1]) q, r = np.polydiv(b, a) assert_equal(q.coeffs.dtype, np.complex128) assert_equal(r.coeffs.dtype, np.complex128) assert_equal(q*a + r, b)
def compute_Cd(self, alpha): """compute Cd aile ou stabilisateur dans repere sol""" alpha_abscissa=np.array([-45., -35., -25., -15., -10., -5., 0, 5, 10, 15, 25, 35, 45, 55, 70, 90, 110, 125, 135]) * np.pi / 180 CdAtCl0=0.02 CdAtClmax=0.05 Cd=[np.sin(alpha_abscissa[6+6]), \ np.sin(alpha_abscissa[5+6]), \ np.sin(alpha_abscissa[4+6]), \ CdAtClmax, \ (CdAtClmax-CdAtCl0)*10/15+CdAtCl0, \ (CdAtClmax-CdAtCl0)*5/15+CdAtCl0, \ CdAtCl0, \ (CdAtClmax-CdAtCl0)*5/15+CdAtCl0, \ (CdAtClmax-CdAtCl0)*10/15+CdAtCl0, \ CdAtClmax, \ np.sin(alpha_abscissa[4+6]), \ np.sin(alpha_abscissa[5+6]), \ np.sin(alpha_abscissa[6+6]), \ np.sin(alpha_abscissa[7+6]), \ np.sin(alpha_abscissa[8+6]), \ np.sin(alpha_abscissa[9+6]), \ np.sin(alpha_abscissa[8+6]), \ np.sin(alpha_abscissa[7+6]), \ np.sin(alpha_abscissa[6+6])] ##print('alpha', alpha) polynomial_coef = np.polyfit(alpha_abscissa, Cd, 7) return np.poly1d(polynomial_coef)(alpha)-np.poly1d(polynomial_coef)(0.)+CdAtCl0
def plot(self): self.worksheet() fig, ax = plt.subplots() axes = [ax, ax.twinx(), ax.twinx()] fig.subplots_adjust(right=0.75) axes[-1].spines['right'].set_position(('axes', 1.2)) colors = ('Green', 'Red', 'Blue') cur=np.poly1d(np.polyfit(self.DISCHARGE,self.current,2)) eff=np.poly1d(np.polyfit(self.DISCHARGE,self.EFFICIENCY,2)) head=np.poly1d(np.polyfit(self.DISCHARGE,self.del_head,2)) dis=np.linspace(self.DISCHARGE[0],self.DISCHARGE[9],500) #Head Axis Plotting axes[2].plot(dis,eff(dis), color=colors[0]) axes[2].plot(self.DISCHARGE,self.EFFICIENCY,'ko',color=colors[0]) axes[2].set_ylabel('Efficiency (%)', color=colors[0]) axes[2].tick_params(axis='y', colors=colors[0]) #Current Axis Plotting axes[1].plot(dis,cur(dis), color=colors[1]) axes[1].plot(self.DISCHARGE,self.current,'k+',color=colors[1]) axes[1].set_ylabel('Current (A)', color=colors[1]) axes[1].tick_params(axis='y', colors=colors[1]) #Efficiency Axis Plotting axes[0].plot(dis,head(dis), color=colors[2]) axes[0].plot(self.DISCHARGE,self.del_head,'kx',color=colors[2]) axes[0].set_ylabel('Head (m)', color=colors[2]) axes[0].tick_params(axis='y', colors=colors[2]) axes[0].set_xlabel('Discharge in lps') plt.grid() plt.show() self.DISCHARGE = [] self.EFFICIENCY= []
def pade_exp(k,j): """ Return the Pade approximation to the exponential function with numerator of degree k and denominator of degree j. **Example**:: >>> from nodepy import stability_function >>> p, q = stability_function.pade_exp(2,3) >>> p poly1d([1/20, 2/5, 1], dtype=object) >>> q poly1d([-1/60, 3/20, -3/5, 1], dtype=object) """ from sympy import Rational one = Rational(1) Pcoeffs=[one] Qcoeffs=[one] for n in range(1,k+1): newcoeff=Pcoeffs[0]*(k-n+one)/(j+k-n+one)/n Pcoeffs=[newcoeff]+Pcoeffs P=np.poly1d(Pcoeffs) for n in range(1,j+1): newcoeff=-one*Qcoeffs[0]*(j-n+one)/(j+k-n+one)/n Qcoeffs=[newcoeff]+Qcoeffs Q=np.poly1d(Qcoeffs) return P,Q
def extractfeatures(buf): global yp1, yp2, yp3, x x = range(len(buf)) features = [] link1 = [] link2 = [] link3 = [] for p in buf: link1.append(p[0]) link2.append(p[1]) link3.append(p[2]) features.append(avgdelta(link1)) features.append(avgdelta(link2)) features.append(avgdelta(link3)) features.append(abs(link1[0]-link1[len(link1)-1])) features.append(abs(link2[0]-link2[len(link2)-1])) features.append(abs(link3[0]-link3[len(link3)-1])) z1 = np.polyfit(x,link1,2) z2 = np.polyfit(x,link2,2) z3 = np.polyfit(x,link3,2) abslink1 = [abs(z1[i]) for i in range(len(z1)-1)] abslink2 = [abs(z2[i]) for i in range(len(z2)-1)] abslink3 = [abs(z3[i]) for i in range(len(z3)-1)] for a in abslink1: features.append(a) for a in abslink2: features.append(a) for a in abslink3: features.append(a) yp1 = np.poly1d(z1) yp2 = np.poly1d(z2) yp3 = np.poly1d(z3) return features
def exit_fitting(self,textbox): #exit fitting window and store the normalized spectrum in outfile for j in range(len(self.orders)): check_fit=False #checks whether fit was performed for this order if self.fittype=="individual": #if fitting individual orders if type(self.coefficients_tab[j]) is np.ndarray: #if the fitting was performed for this order func=np.poly1d(self.coefficients_tab[j]) check_fit=True else: func=np.poly1d(self.coefficients_tab[0]) #if fitting all the orders simultaneously the fitting coefficients are always stored in the first #element of coefficients_tab check_fit=True fitted=np.polyval(func,self.warr) i_order=np.where(self.oarr==self.orders[j]) if check_fit: self.farr[i_order]=self.farr[i_order]/fitted[i_order] else: self.farr[i_order]=self.farr[i_order] c1 = fits.Column(name='Wavelength', format='D', array=self.warr, unit='Angstroms') c2 = fits.Column(name='Flux', format='D', array=self.farr, unit='Counts') c3 = fits.Column(name='Order', format='I', array=self.oarr) tbhdu = fits.BinTableHDU.from_columns([c1,c2,c3]) tbhdu.writeto(outfile, clobber=True) self.close()
def fit(data, nz): x = [0 for iz in range(0, nz, 1)] y = [0 for iz in range(0, nz, 1)] z = [iz for iz in range(0, nz, 1)] for iz in range(0, nz, 1): x[iz], y[iz] = ndimage.measurements.center_of_mass(np.array(data[:,:,iz])) #Fit centerline in the Z-X plane using polynomial function print '\nFit centerline in the Z-X plane using polynomial function...' coeffsx = np.polyfit(z, x, 1) polyx = np.poly1d(coeffsx) x_fit = np.polyval(polyx, z) print 'x_fit' print x_fit #Fit centerline in the Z-Y plane using polynomial function print '\nFit centerline in the Z-Y plane using polynomial function...' coeffsy = np.polyfit(z, y, 1) polyy = np.poly1d(coeffsy) y_fit = np.polyval(polyy, z) #### 3D plot fig1 = plt.figure() ax = Axes3D(fig1) ax.plot(x,y,z,zdir='z') ax.plot(x_fit,y_fit,z,zdir='z') plt.show() return x, y, x_fit, y_fit
def bsa_count(diams, style='single'): ''' Returns bsa molecules per unit surface area given a particle diameter, and a fitting style. Essentially just returns the y value of a fit curve given x (diamter).''' if style=='single': z=np.polyfit(x, y, 1) p=np.poly1d(z) return p(diams) elif style=='dual': dout=[] x1=x[0:2] #Make x[0:2] y1=y[0:2]# ditto z1=np.polyfit(x1, y1, 1) p1=np.poly1d(z1) x2=x[1:3] #Make x[1:3] y2=y[1:3] # ditto z2=np.polyfit(x2, y2, 1) p2=np.poly1d(z2) for d in diams: if d < x[1]: #If d < 30 dout.append(p1(d)) else: dout.append(p2(d)) return dout else: raise AttributeError('syle must be "single" or "dual", not %s'%style)
def daub(p): """ The coefficients for the FIR low-pass filter producing Daubechies wavelets. p>=1 gives the order of the zero at f=1/2. There are 2p filter coefficients. Parameters ---------- p : int Order of the zero at f=1/2, can have values from 1 to 34. """ sqrt = np.sqrt if p < 1: raise ValueError("p must be at least 1.") if p==1: c = 1/sqrt(2) return np.array([c,c]) elif p==2: f = sqrt(2)/8 c = sqrt(3) return f*np.array([1+c,3+c,3-c,1-c]) elif p==3: tmp = 12*sqrt(10) z1 = 1.5 + sqrt(15+tmp)/6 - 1j*(sqrt(15)+sqrt(tmp-15))/6 z1c = np.conj(z1) f = sqrt(2)/8 d0 = np.real((1-z1)*(1-z1c)) a0 = np.real(z1*z1c) a1 = 2*np.real(z1) return f/d0*np.array([a0, 3*a0-a1, 3*a0-3*a1+1, a0-3*a1+3, 3-a1, 1]) elif p<35: # construct polynomial and factor it if p<35: P = [comb(p-1+k,k,exact=1) for k in range(p)][::-1] yj = np.roots(P) else: # try different polynomial --- needs work P = [comb(p-1+k,k,exact=1)/4.0**k for k in range(p)][::-1] yj = np.roots(P) / 4 # for each root, compute two z roots, select the one with |z|>1 # Build up final polynomial c = np.poly1d([1,1])**p q = np.poly1d([1]) for k in range(p-1): yval = yj[k] part = 2*sqrt(yval*(yval-1)) const = 1-2*yval z1 = const + part if (abs(z1)) < 1: z1 = const - part q = q * [1,-z1] q = c * np.real(q) # Normalize result q = q / np.sum(q) * sqrt(2) return q.c[::-1] else: raise ValueError("Polynomial factorization does not work " "well for p too large.")
def lagrange(x, w): """ Return a Lagrange interpolating polynomial. Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating polynomial through the points ``(x, w)``. Warning: This implementation is numerically unstable. Do not expect to be able to use more than about 20 points even if they are chosen optimally. Parameters ---------- x : array_like `x` represents the x-coordinates of a set of datapoints. w : array_like `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`). """ M = len(x) p = poly1d(0.0) for j in xrange(M): pt = poly1d(w[j]) for k in xrange(M): if k == j: continue fac = x[j]-x[k] pt *= poly1d([1.0,-x[k]])/fac p += pt return p
def optimum_polyfit(x, y, score=functoolz.compose(np.max, np.abs), max_degree=50, stop_at=1e-10): """ Optimize the degree of a polyfit polynomial so that score(y - poly(x)) is minimized. :param max_degree: The maximum degree to try. LinAlgErrors are automatically ignored. :param stop_at: If a score lower than this is reached, the function returns early :param score: The score function that is applied to y - poly(x). Default: max deviation. :return A tuple (poly1d object, degree, score) """ scores = np.empty(max_degree - 1, dtype=np.float64) # Ignore rank warnings now, but do not ignore for the final polynomial if not early returning with warnings.catch_warnings(): warnings.simplefilter('ignore', np.RankWarning) for deg in range(1, max_degree): # Set score to max float value try: poly = np.poly1d(np.polyfit(x, y, deg)) except np.linalg.LinAlgError: scores[deg - 1] = np.finfo(np.float64).max continue scores[deg - 1] = score(y - poly(x)) # Early return if we found a polynomial that is good enough if scores[deg - 1] <= stop_at: return poly, deg, scores[deg - 1] # Find minimum score deg = np.argmin(scores) + 1 # Compute polyfit for that degreet poly = np.poly1d(np.polyfit(x, y, deg)) return poly, deg, np.min(scores)
def fitPlotFW(nGraphs, nNodesIni, nNodesFin, step,sparseFactor =0.25): times1 = timeDijkstraMAllPairs(nGraphs, nNodesIni, nNodesFin, step, sparseFactor) times2 = timeFloydWarshall(nGraphs, nNodesIni, nNodesFin, step, sparseFactor) x = [] for z in range(nNodesIni, nNodesFin, step): x.append(z) ydata1 = np.polyfit(x, times1, 2) f1 = np.poly1d(ydata1) yfit1 = np.linspace(nNodesIni, nNodesFin,nNodesFin - nNodesIni / step) ydata2 = np.polyfit(x, times2, 2) f2 = np.poly1d(ydata2) yfit2 = np.linspace(nNodesIni, nNodesFin,nNodesFin - nNodesIni / step) plt.plot(x,times1,"b.", label="valor real Dijkstra") plt.plot(yfit1, f1(yfit1),"r-", label='Dijkstra') plt.plot(x,times2, "r.", label="valor real FloydWarshall") plt.plot(yfit2,f2(yfit2),"b-", label ='FloydWarshall') plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=2, mode="expand", borderaxespad=0.) plt.show() return
def _make_quality(self, seq): """ Simulates read quality from an error function. Qualities are in Sanger Fastq format (Phred+33), i.e. quality is represented by an integer from 0 to 93, represented by the ascii characters 33-126. Errors are represented as 10^-0.0 (random base) to 10^-9.3 (super accurate). ref: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2847217/?tool=pubmed This might be re-written in the future using Biopythons QualityIO, http://www.biopython.org/DIST/docs/api/Bio.SeqIO.QualityIO-module.html """ output = "" for i, q in enumerate(seq): if len(self.quality_cache) <= i: f = numpy.poly1d(self.settings['quality mean']) self.quality_cache += [f(len(self.quality_cache))] if len(self.variance_cache) <= i: v = numpy.poly1d(self.settings['quality var']) self.variance_cache += [v(len(self.variance_cache))] quality = self.quality_cache[i] quality += numpy.random.normal(0, numpy.sqrt(self.variance_cache[i])) quality = min(93, max(int(quality), 0)) output += "%c" % (33+quality) return output
def characteristic_polynomials(self): r""" Returns the characteristic polynomials (also known as generating polynomials) of a linear multistep method. They are: `\rho(z) = \sum_{j=0}^k \alpha_k z^k` `\sigma(z) = \sum_{j=0}^k \beta_k z^k` **Examples**:: >>> from nodepy import lm >>> ab5 = lm.Adams_Bashforth(5) >>> rho,sigma = ab5.characteristic_polynomials() >>> print(rho) 5 4 1 x - 1 x >>> print(sigma) 4 3 2 2.64 x - 3.853 x + 3.633 x - 1.769 x + 0.3486 **References**: #. [hairer1993]_ p. 370, eq. 2.4 """ rho=np.poly1d(self.alpha[::-1]) sigma=np.poly1d(self.beta[::-1]) return rho, sigma
def Pumpeff(Qo, Ho, p, Qin, coffH): P, A = [], [] g = 9.81 # m/s^2 ro = 1000 # kg/m^3 with open(p, "rb") as f: r = csv.reader(f) for row in r: Row = [float(row[0]), float(row[1])] P.append(Row) P1 = numpy.asarray(P) [Q, N] = [P1[:, 0], P1[:, 1]] # Gor den importerade pump kurvan till 2 vectorer coffP1 = numpy.polyfit(Q, N, 2) polyP1 = numpy.poly1d(coffP1) # skapar ett polynom Qo = float(Qo) Ho = float(Ho) # Nin = float(Nin) ys = polyP1(Qo) eff1 = (g * ro * Ho * Qo) / (ys * 3600 * 1000) # Nny = (Qin*Nin)/Qo # coffH = float(coffH) polyH = numpy.poly1d(coffH) Qnew = numpy.arange(round(Q[0], 2), round(Q[-1], 2), 0.1) for j in Qnew: Hh = polyH(j) J = polyP1(j) effplot = (g * ro * j * Hh) / (J * 3600 * 1000) A.append(effplot) return eff1, coffP1, A, Qnew
def smooth_regress(path, dt, order): """ path: data frame with at least columns 't', 'lat', 'lon' dt: resampling time interval order: order of the polynomial to use return: interpolated path """ import pandas as pd import numpy as np from numpy import polyfit, poly1d start = path.t.iloc[0] end = path.t.iloc[-1] # new ts sequence nt = start + np.linspace(0, end - start, (end - start) / dt + 1) avg_t = np.mean(path['t']) avg_lat = np.mean(path['lat']) avg_lon = np.mean(path['lon']) lat = np.poly1d(np.polyfit(path['t'] - avg_t, path['lat'] - avg_lat, order)) lon = np.poly1d(np.polyfit(path['t'] - avg_t, path['lon'] - avg_lon, order)) r = pd.DataFrame(columns = ('t', 'lat', 'lon')) r['t'] = nt r['lat'] = list(map(lat, nt - avg_t)) r['lon'] = list(map(lon, nt - avg_t)) # Repair path r['lat'] += avg_lat r['lon'] += avg_lon r.set_index('t', inplace=True) return r
def Q(dim, dfd=np.inf): """ Q polynomial If dfd == inf (the default), then Q(dim) is the (dim-1)-st Hermite polynomial H_j(x) = (-1)^j * e^{x^2/2} * (d^j/dx^j e^{-x^2/2}) If dfd != inf, then it is the polynomial Q defined in Worsley, K.J. (1994). 'Local maxima and the expected Euler characteristic of excursion sets of \chi^2, F and t fields.' Advances in Applied Probability, 26:13-42. """ m = dfd j = dim if j > 0: poly = hermitenorm(j - 1) poly = np.poly1d(np.around(poly.c)) if np.isfinite(m): for l in range((j - 1) / 2 + 1): f = np.exp( gammaln((m + 1) / 2.0) - gammaln((m + 2 - j + 2 * l) / 2.0) - 0.5 * (j - 1 - 2 * l) * (np.log(m / 2.0)) ) poly.c[2 * l] *= f return np.poly1d(poly.c) else: raise ValueError, "Q defined only for dim > 0"
def StepFit(df_timeser): # receives the temporal series as a dataframe with 3 columns: time, nflux, Q # NaN must be already droped # df must be already sorted by time # An array of quarters, non duplicate items. nondup = np.unique(df_timeser['Q'].values) # Go through time series, quarter by quarter if len(nondup) > 1: # Save the first quarter of the LC df_Fit = df_timeser[ df_timeser.Q == nondup[0] ] # Iterate up to the n-1 element for index,q_item in enumerate( nondup[:len(nondup)-1] ): # indexing is OK!!! df_tmp1 = df_timeser[ df_timeser.Q == q_item ] df_tmp2 = df_timeser[ df_timeser.Q == nondup[index+1] ] # fit the 2 linear fits using: np.polyfit, np.polyval, p = np.poly1d(z) p1 = np.poly1d( np.polyfit(df_tmp1['time'].values, df_tmp1['nflux'].values,1) ) p2 = np.poly1d( np.polyfit(df_tmp2['time'].values, df_tmp2['nflux'].values,1) ) # then evaluate the borders of each piece, in the corresponding fit # and determine the offset. Offset = p1(df_tmp1['time'].values[-1]) - p2(df_tmp2['time'].values[-1]) # now add the offset to the second piece df_tmp2['nflux'] += Offset # and concatenate the 2nd piece with the previous df_Fit = concat( [df_Fit, df_tmp2] ) else: df_Fit = df_timeser print 'no fit made, only ONE quarter in LC' #return out of ELSE statement return df_Fit
def debye_T(self, x): self.__EoB = [] self.__GoB = [] self.__B = [] self.__V = [] dic = self.get_Cij() for scale in sorted(dic.keys()): (a, b, c) = self.calculate_moduli(scale) self.__EoB.append(c) self.__GoB.append(b) self.__B.append(a) self.__V.append(float(scale)**3./2.) c1= np.polyfit(self.__V, self.__EoB, 4) p_EoB = np.poly1d(c1) c2= np.polyfit(self.__V, self.__GoB, 4) p_GoB = np.poly1d(c2) c3= np.polyfit(self.__V, self.__B, 4) p_B = np.poly1d(c3) Const = 1. theta = Const * ( 1./3.*(p_EoB(x))**(-3./2.) + 2./3.*(p_GoB(x))**(-3./2.) ) return theta
def bch_18_6(symbol_version): """Calculate BCH(18,6) on symbol version number. This function is not used as in the specs we have a reference table covering all the symbol version. It was just to test if I would have obtained the same results. """ data_bit_string = to_binstring(symbol_version, 6) numerator = ( poly1d([int(x) for x in data_bit_string]) * poly1d([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])) generator_polynomial = poly1d([1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1]) _quotient, remainder = numerator / generator_polynomial coeff_list = [abs(int(x)) for x in remainder.coeffs] # don't know why i have some 2 and 3 coefficients. used a modulo operation # to obtain the expected results coeff_list = [x % 2 for x in coeff_list] while len(coeff_list) < 12: coeff_list.insert(0, 0) coeff_string = '' for coeff in coeff_list: coeff_string += str(coeff) result = data_bit_string + coeff_string return result
def polynomial_fitting(input_array, degree=4, verbose=False, skip_col=[], skip_row=[]): array_org = input_array.copy() shape = input_array.shape # apply on rows for row in range(shape[0]): if row in skip_row: pass else: poly = np.poly1d(np.polyfit(x=np.linspace(1, shape[1], shape[1]), y=input_array[row,:], deg=degree)) input_array[row,:] = poly(np.linspace(1, shape[1], shape[1])) # apply on columns for col in range(shape[1]): if col in skip_col: pass else: poly = np.poly1d(np.polyfit(x=np.linspace(1, shape[0], shape[0]), y=input_array[:,col], deg=degree)) input_array[:,col] = poly(np.linspace(1, shape[0], shape[0])) if verbose: print("Polynomial fitting") print("Degree of the fitting polynomial : {}".format(degree)) print("-----Normalized Table-------") print(input_array) print("-----Original Table-------") print(array_org) print("") return input_array
def __init__(self, args): ''' Constructor, sets up the data and performs the fits ''' super(App, self).__init__() self.args = args #self.ydata = np.array([9, 9.25, 9.5, 10, 10.5, 11, 11.5, #12, 12.5, 13, 9.75, 10.25, 10.75, 11.25, 11.75, 12.25, #12.75, 8.75, 8.5, 8.25, #]) #self.brightxdata = np.log10([11.1, 13.55, 17.67, 26.33, 36.71, #54.70, 71.36, 93.09, 113.63, 129.78, 21.57, 32.14, 44.81, #62.48, 81.5, 99.48, 121.43, 9.09, 7.45, 5.71, #]) #self.darkxdata = np.log10([11.86, 14.48, 18.89, 28.14, 44.81, #66.77, 99.48, 138.70, 193.36, 269.58, 23.06, 36.71, 54.7, #81.5, 121.43, 169.30, 236.03, 9.72, 7.45, 6.1, #]) self.ydata, self.brightxdata, self.darkxdata = GetData() with tables.openFile('saturation_vs_exposure.h5', 'w') as outfile: outfile.createArray('/', 'mags', self.ydata) outfile.createArray('/', 'dark', self.darkxdata) outfile.createArray('/', 'bright', self.brightxdata) assert self.ydata.size == self.brightxdata.size assert self.ydata.size == self.darkxdata.size self.brightFit = np.poly1d(np.polyfit(self.brightxdata, self.ydata, 4)) self.darkFit = np.poly1d(np.polyfit(self.darkxdata, self.ydata, 2))
def drawStats2(prices, period): ps2 = [p['close'] for p in prices][-140:-115] ps = [p['close'] for p in prices][-140:-120] phs = [p['high'] for p in prices][-140:-120] pls = [p['low'] for p in prices][-140:-120] l = len(prices) ts = np.arange(20) pz = np.poly1d(np.polyfit(ts, ps, 1)) phz = np.poly1d(np.polyfit(ts, phs, 1)) plz = np.poly1d(np.polyfit(ts, pls, 1)) fig = plt.figure() ax1 = fig.add_subplot(311) ax1.plot(ts, ps, '-', ts, pz(ts), '-', ts, phz(ts), '--', ts, plz(ts), '--') #plt.plot(ts, ps, 'o', label='Original data', linestyle='-', markersize=2) #plt.plot(ts, m * ts + c, 'r', label='Fitted line') #plt.legend() ax2 = fig.add_subplot(312) ax2.plot(ts, (pz(ts) - plz(ts)) - (phz(ts) - pz(ts)), '-') ax2.grid() ts2 = np.arange(len(ps2)) ax3 = fig.add_subplot(313) ax3.plot(ts2, ps2, '-') multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1, horizOn=False, vertOn=True) plt.show() return
def plot_reml(C_eta, sel, sel_label, ax, ax2, sigma_diag): coh_arr = np.linspace(0.00, 0.30, 16) reml_arr = reml(C_eta, coh_arr, sel, sel_label, ax2, sigma_diag) ax.plot(coh_arr, reml_arr, 'm+', label=sel_label) ax.set_xlabel(r'$\sigma_i$') ax.set_ylabel('REML') ax.set_xlim(-0.02,0.30) # Fit a polynomial to the points coeff=np.polyfit(coh_arr, reml_arr, 4) p=np.poly1d(coeff) coh_arr2=np.linspace(0.00, 0.20, 201) fit=p(coh_arr2) ax.plot(coh_arr2,fit) # Determine where the minumum occurs minimum=sp.fmin(p,0.1,disp=False) print "Miminum at %4.3f" % (minimum[0]) # The uncetainty occurs whwn the the REML increases by 1 - need to double check # To find where the the REML increases by 1, we look for the roots coeff=np.polyfit(coh_arr, reml_arr-p(minimum[0])-1, 4) p=np.poly1d(coeff) sol=sp.root(p,[0.0,0.2]) m=minimum[0] upper=sol.x[1]-m lower=m-sol.x[0] ax.plot(coh_arr2,fit,label="Min at %4.2f+%4.2f-%4.2f" % (m,upper,lower)) return
def fit_c_median(sim_name_prefix, prod_dir, store_dir, redshift, cosmology, halo_type='Rockstar', N_cut=70, phases=range(16)): ''' median concentration as a function of log halo mass in Msun/h c_med(log(m)) = poly(log(m)) ''' # check if output file already exists poly_path = os.path.join(store_dir, sim_name_prefix, 'c_median_poly') if os.path.isfile(poly_path + '.txt'): return np.poly1d(np.loadtxt(poly_path + '.txt')) # load 16 phases for the given cosmology and redshift print('Loading halo catalogues to fit c_median...') halocats = abacus_ht.make_catalogs( sim_name=sim_name_prefix, products_dir=prod_dir, redshifts=[redshift], cosmologies=[cosmology], phases=phases, halo_type=halo_type, load_halo_ptcl_catalog=True, # this loads 10% particle subsamples load_ptcl_catalog=False, # this loads uniform subsamples, dnw load_pids='auto')[0][0] htabs = [halocat.halo_table for halocat in halocats] htab = table.vstack(htabs) # combine all phases into one halo table mask = (htab['halo_N'] >= N_cut) & (htab['halo_upid'] == -1) htab = htab[mask] # mass cut halo_logm = np.log10(htab['halo_mvir'].data) halo_nfw_conc = htab['halo_rvir'].data / htab['halo_klypin_rs'].data # set up bin edges, the last edge larger than the most massive halo logm_bins = np.linspace(np.min(halo_logm), np.max(halo_logm) + 0.001, endpoint=True, num=100) logm_bins_centre = (logm_bins[:-1] + logm_bins[1:]) / 2 N_halos, _ = np.histogram(halo_logm, bins=logm_bins) c_median = np.zeros(len(logm_bins_centre)) c_median_std = np.zeros(len(logm_bins_centre)) for i in range(len(logm_bins_centre)): mask = (logm_bins[i] <= halo_logm) & (halo_logm < logm_bins[i + 1]) c_median[i] = np.median(halo_nfw_conc[mask]) c_median_std[i] = np.std(halo_nfw_conc[mask]) # when there is 0 data point in bin, median = std = nan, remove # when there is only 1 data point in bin, std = 0 and w = inf # when there are few data points, std very small (<1), w large # rescale weight by sqrt(N-1) mask = c_median_std > 0 N_halos, logm_bins_centre, c_median, c_median_std = \ N_halos[mask], logm_bins_centre[mask], \ c_median[mask], c_median_std[mask] weights = 1 / np.array(c_median_std) * np.sqrt(N_halos - 1) coefficients = np.polyfit(logm_bins_centre, c_median, 3, w=weights) print('Polynomial found as : {}.'.format(coefficients)) # save coefficients to txt file if not os.path.exists(os.path.dirname(poly_path)): os.makedirs(os.path.dirname(poly_path)) np.savetxt(poly_path + '.txt', coefficients, fmt=b'%.30e') c_median_poly = np.poly1d(coefficients) # plot fit fig, ax = plt.subplots(figsize=(8, 6)) ax.errorbar(logm_bins_centre, c_median, yerr=c_median_std, capsize=3, capthick=0.5, fmt='.', ms=4, lw=0.2) ax.plot(logm_bins_centre, c_median_poly(logm_bins_centre), '-') ax.set_title('Median Concentration Polynomial Fit from {} Boxes'.format( len(phases))) fig.savefig(poly_path + '.pdf') return c_median_poly
delta = [0, 0, -100, 0, 700, 0, 0, 0, 0, 0] c = [0, 500, 0, 0, 0, 0, 0, 1100, 0, 0] ci = [0, 0, 0, 0, 400, 300, 0, 0, 0, 400] ma = [2.8, 3, 3.2, 3.2, 4, 4, 4.5, 4.3, 5, 5] mi = [1, 1, 1.3, 1.5, 2, 2, 2.3, 2.5, 3, 3] B0 = [] dB0 = [] for i, a2 in enumerate(a2_txt): pr = Probe(a2, str(int(freq[i])) + 'MHz', freq[i], delta[i], c[i], ci[i], ma[i], mi[i]) pr.plot() B0.append(pr.B) dB0.append(pr.dB) coefB0 = np.polyfit(B0, freq, 1) poly1d_fnB0 = np.poly1d(coefB0) h = 6.626 * 10 ** (-34) muhB = 9.274 * 10 ** (-24) dist = 0 for i, B in enumerate(B0): dist += abs(freq[i] - poly1d_fnB0(B)) print('Dist = ' + str(dist / len(B0))) print(poly1d_fnB0) print('g = ' + str(h / muhB * coefB0[0] * 10 ** 9)) plt.figure() plt.plot(B0, freq, 'bo') plt.plot(np.linspace(0,3), poly1d_fnB0(np.linspace(0,3)), 'b-')
def analyze(connection): cursor = connection.cursor() # First, we will count the number of jobs per day for "us" (CSC108). query = """ SELECT strftime("%m-%d-%Y", completed.CompletionTime, "unixepoch") AS day, count(DISTINCT csc108.JobID) AS us, count(DISTINCT completed.JobID) - count(DISTINCT csc108.JobID) AS them FROM completed LEFT OUTER JOIN ( SELECT CompletionTime, JobID FROM completed WHERE Account = "CSC108" AND User = "******" AND JobName LIKE "SAGA-Python-PBSJobScript.%" ) csc108 ON csc108.JobID = completed.JobID WHERE (completed.ReqNodes IS NULL AND (completed.ReqProcs / 16) <= 125) OR (completed.ReqNodes IS NOT NULL AND completed.ReqNodes <= 125) GROUP BY day -- Something is definitely wrong with part of the data, so for now, -- we have to filter it a little bit. HAVING (0 < us) AND (us < 1000) ; """ jobs_us = [] jobs_them = [] for row in cursor.execute(query): jobs_us.append(row["us"]) jobs_them.append(row["them"]) print("%s: (%s, %s)" % (row["day"], row["us"], row["them"])) fig = pyplot.figure() ax = fig.add_subplot(111) x = jobs_us y = jobs_them fit = numpy.polyfit(x, y, 1) fit_fn = numpy.poly1d(fit) ax.plot(x, y, "bo", x, fit_fn(x), "--r") ax.set( xlabel="CSC108 Throughput (jobs per day)", ylabel="Non-CSC108 Throughput (jobs per day)", title="Dependency of Other Projects' \"Bin 5\" Throughput on CSC108") ax.grid() current_script = os.path.basename(__file__) fig.savefig(os.path.splitext(current_script)[0] + ".png", dpi=300)
def regr_plot(x, y, ttle, step=0.1, xlabel="", ylabel="", freq_counter=False, order=1,bottomLogo=False): # run some regressions to gain a sense of relationship, levels and diffs x_= np.arange(x.min(),x.max(),step) z = np.polyfit(x,y,2) p = np.poly1d(z) fig=plt.figure(figsize=(12,10)) plt.title(ttle) ax=fig.add_subplot(111) if freq_counter: ax.xaxis.set_major_locator(MaxNLocator(symmetric=True)) y_recent_y = []; x_recent_y = []; y_recent_q=[]; x_recent_q =[] x_recent_y = x.tail(int(0.20*x.shape[0])) # current year y_recent_y = y[x_recent_y.index] # if x.shape[0] > 252: # x_recent_y = x.tail(252) # current year # y_recent_y = y[x_recent_y.index] # # x_recent_q = x.tail(63) # current quarter # y_recent_q = y[x_recent_q.index] # if x.shape[0] > 63: # x_recent_q = x.tail(63) # current quarter # y_recent_q = y[x_recent_q.index] # elif x.shape[0] > 12: # x_recent_q = x.tail(12) # current quarter # y_recent_q = y[x_recent_q.index] sc=ax.scatter(x.values,y.values,s=75,c=y.index.year,cmap=cm.cmaps['viridis']) cbar=plt.colorbar(sc) cbar.ax.get_yaxis().set_ticks([]) ax.scatter(x.tail(1).values,y.tail(1).values,c='red',s=85) ax.plot(x.values,p(x.values),'g-') #need to plot latest click #need to plot scatter line xmin,xmax = ax.get_xlim() ax.set_xlim([xmin,x.max()*1.15]) ymin,ymax = ax.get_ylim() ax.set_ylim([ymin,y.max()*1.15]) # plt.setp(lines[0],'label','regr') # plt.setp(lines[1],'label','data') # plt.setp(lines[2],'label','recent data') #xmin,xmax = ax.get_xlim() #ax.set_xlim([x.min()*1.15,x.max()*1.15]) #ax.set_xlabel('%s [ m:%.3f, b:%.3f 1/m: %.3f ]' % (xlabel,m,b,1./m),fontsize=12,style='italic') ax.set_xlabel(xlabel,fontsize=12,style='italic') if freq_counter: ax.text(0.25,0.75,'N = %d' % sum(np.logical_and(x<0,y>0)),transform=ax.transAxes ) ax.text(0.75,0.75,'N = %d' % sum(np.logical_and(x>0,y>0)),transform=ax.transAxes ) plt.grid() plt.ylabel(ylabel) plt.tight_layout(w_pad=0.5,h_pad=0.5) plt.legend(prop={'size':10}) #plot_logo(ax,bottomLogo) plt.show() return ax
header=2, usecols="B,D,G,J,N") pop_Seoul.rename(columns={ pop_Seoul.columns[0]: "구별", pop_Seoul.columns[1]: "인구수", pop_Seoul.columns[2]: "한국인", pop_Seoul.columns[3]: "외국인", pop_Seoul.columns[4]: "고령자" }, inplace=True) pop_Seoul.drop([26], inplace=True) data_result = pd.merge(CCTV_Seoul, pop_Seoul, on="구별") data_result.set_index("구별", inplace=True) fp1 = np.polyfit(data_result["인구수"], data_result["소계"], 1) f1 = np.poly1d(fp1) fx = np.linspace(100000, 700000, 100) data_result["오차"] = np.abs(data_result["소계"] - f1(data_result["인구수"])) df_sort = data_result.sort_values(by="오차", ascending=False) df_sort.head() plt.figure(figsize=(14, 10)) plt.scatter(data_result["인구수"], data_result["소계"], c=data_result["오차"], s=50) plt.plot(fx, f1(fx), ls="dashed", lw=3, color="g") for n in range(10): plt.text(df_sort["인구수"][n] * 1.02, df_sort["소계"][n] * 0.98, df_sort.index[n], fontsize=15) plt.xlabel("인구수") plt.ylabel("CCTV") plt.colorbar()
sd[res_ind][frame_ind-loop_ind].append(np.sum(disp**2)) xyz_pre = xyz_com if sub_ind%10 == 9: print('Reading chunk', chunk_index+1, 'and frame',chunk_index*chunk_size+sub_ind+1) # if chunk_index == 9: # break time = np.arange(nframe-nframe//2+1)*dt data = time.copy() pt_start = len(time)//2 msd = [] print('frames read:', chunk_index*chunk_size+sub_ind+1) for i, item in enumerate(sd): sd[i][0] = 0 msd.append([]) for j in item: msd[-1].append(np.mean(j)) msd[i] = np.array(msd[i])/len(res_targ[i]) coef = np.polyfit(time[pt_start:], msd[i][pt_start:], 1) print(coef/4/1e6) fit = np.poly1d(coef) plt.plot(time, msd[i], label = str(i)) plt.plot(time, fit(time), '--', label = 'fit'+str(i)) data = np.column_stack((data, msd[i])) plt.legend(loc = 'best') plt.xlabel('time (ps)') plt.ylabel('MSD (nm^2)') plt.savefig(outname+'.pdf') np.savetxt(outname+'.txt', data, fmt = ['%12.4f' for i in range(len(data[0]))])
from matplotlib import patches #if using termux import subprocess import shlex #co-efficients of numerator and denominator input_signal, fs = sf.read('../data/Sound_Noise.wav') sampl_freq = fs order = 4 cutoff_freq = 4000 Wn = 2 * cutoff_freq / sampl_freq b, a = signal.butter(order, Wn, 'low') #calculating zeros zeros = np.poly1d(b).r print("Zeros: ", zeros) real_z = np.real(zeros) imag_z = np.imag(zeros) #calculating poles poles = np.poly1d(a).r print("Poles: ", poles) real_p = np.real(poles) imag_p = np.imag(poles) x = np.linspace(-3, 3, 1000) y1 = np.sqrt(16 - x**2) y2 = -np.sqrt(16 - x**2) plt.fill_between(x, y1, y2, color='#539ecd')
def plot_linear_relation(x, y, x_err=None, y_err=None, title=None, point_label=None, legend=None, plot_range=None, plot_range_y=None, x_label=None, y_label=None, y_2_label=None, marker_style='-o', log_x=False, log_y=False, size=None, filename=None): ''' Takes point data (x,y) with errors(x,y) and fits a straight line. The deviation to this line is also plotted, showing the offset. Parameters ---------- x, y, x_err, y_err: iterable filename: string, PdfPages object or None PdfPages file object: plot is appended to the pdf string: new plot file with the given filename is created None: the plot is printed to screen ''' fig = Figure() canvas = FigureCanvas(fig) ax = fig.add_subplot(111) if x_err is not None: x_err = [x_err, x_err] if y_err is not None: y_err = [y_err, y_err] ax.set_title(title) if y_label is not None: ax.set_ylabel(y_label) if log_x: ax.set_xscale('log') if log_y: ax.set_yscale('log') if plot_range: ax.set_xlim((min(plot_range), max(plot_range))) if plot_range_y: ax.set_ylim((min(plot_range_y), max(plot_range_y))) if legend: fig.legend(legend, 0) ax.grid(True) ax.errorbar(x, y, xerr=x_err, yerr=y_err, fmt='o', color='black') # plot points # label points if needed if point_label is not None: for X, Y, Z in zip(x, y, point_label): ax.annotate('{}'.format(Z), xy=(X, Y), xytext=(-5, 5), ha='right', textcoords='offset points') # line fit line_fit, pcov = np.polyfit(x, y, 1, full=False, cov=True) # print pcov # chi_squared = np.sum((np.polyval(line_fit, x) - y) ** 2) fit_fn = np.poly1d(line_fit) ax.plot(x, fit_fn(x), '-', lw=2, color='gray') line_fit_legend_entry = 'line fit: ax + b\na=$%.2f\pm%.2f$\nb=$%.2f\pm%.2f$' % (line_fit[0], np.absolute(pcov[0][0]) ** 0.5, abs(line_fit[1]), np.absolute(pcov[1][1]) ** 0.5) # fig.legend(["data", line_fit_legend_entry], 0) setp(ax.get_xticklabels(), visible=False) # remove ticks at common border of both plots divider = make_axes_locatable(ax) ax_bottom_plot = divider.append_axes("bottom", 2.0, pad=0.0, sharex=ax) ax_bottom_plot.bar(x, y - fit_fn(x), align='center', width=np.amin(np.diff(x)) / 2, color='gray') # plot(x, y - fit_fn(x)) ax_bottom_plot.grid(True) if x_label is not None: ax.set_xlabel(x_label) if y_2_label is not None: ax.set_ylabel(y_2_label) ax.set_ylim((-np.amax(np.abs(y - fit_fn(x)))), (np.amax(np.abs(y - fit_fn(x))))) ax.plot(ax.set_xlim(), [0, 0], '-', color='black') setp(ax_bottom_plot.get_yticklabels()[-2:-1], visible=False) # print ax_bottom_plot.get_yticklabels()[1] # ax.set_aspect(2) # ax_bottom_plot.set_aspect(2) if size is not None: fig.set_size_inches(size) if not filename: fig.show() elif isinstance(filename, PdfPages): filename.savefig(fig) elif filename: fig.savefig(filename, bbox_inches='tight') return fig
pktnum2 = pktnum[maske2] delta_l_u2 = delta_l_u[maske2] delta_l_o2 = delta_l_o[maske2] h0 = 0.584 print(pktnum1) print(pktnum2) xnew1 = np.linspace(pktnum1.values.min(), 2200, 1000) xnew2 = np.linspace(2200, pktnum2.values.max(), 1000) xnew_all = np.linspace(0, 4000, 500) parameter1 = np.polyfit(pktnum1.values, h0 + delta_l_u1.values, 3) parameter2 = np.polyfit(pktnum2.values, h0 + delta_l_u2.values, 2) parameter3 = np.polyfit(pktnum1.values, h0 + delta_l_o1.values, 3) parameter4 = np.polyfit(pktnum2.values, h0 + delta_l_o2.values, 2) p1 = np.poly1d(parameter1) p2 = np.poly1d(parameter2) p3 = np.poly1d(parameter3) p4 = np.poly1d(parameter4) print(p1) print(p2) print(p3) print(p4) def piecewise_linear_u(x, p1, p2): return np.piecewise(x, [x < 2300, x >= 2300], [ lambda x: 3.332e-11 * x**3 - 1.233e-07 * x**2 + 0.000419 * x + 0.5850, lambda x: -4.999e-08 * x**2 + 0.0003831 * x + 0.6856 ])
def plot_windspeed_slev(df, filename, wind_threshold, tide_threshold, time_frame): # Allows ouy to print entire numpy array np.set_printoptions(threshold='nan') print("\nGenerating plots for %s and assembled_tides.csv..." % (filename)) #Convert all the string objects in Date/Time index to datetime objects dates = [dt.datetime.strptime(string, '%Y-%m-%d %H:%M') for string in df.index] df.index = dates if(len(df) == 0): print("The dataframe is empty. Nothing to show here.") sys.exit(0) try: df.rename(columns = {'Data':'Wind Spd (km/h)'}, inplace = True) except KeyError as e: pass print(e) # Replace NaNs with zeros otherwise find-peaks function call will not work wind_speeds = df['Wind Spd (km/h)'].tolist() slevs = df['SLEV'].tolist() dates = df.index.tolist() for n, i in enumerate(wind_speeds): if (np.isnan(i)): wind_speeds[n] = 0 # Underscore is used to ignore unwanted return values while unpacking objects peak_wind_indices, _ = find_peaks(wind_speeds, height = wind_threshold, distance = time_frame) peak_slev_indices, _ = find_peaks(slevs, height = tide_threshold, distance = time_frame) # Map the peak indices to the values and dates for wind speeds peak_wind_speeds = [wind_speeds[index] for index in peak_wind_indices] peak_wind_dates = [dates[index] for index in peak_wind_indices] # Map the peak indices to the values and dates for storm surges peak_slevs = [slevs[index] for index in peak_slev_indices] peak_slev_dates = [dates[index] for index in peak_slev_indices] # Wind speed peaks for surge window----------------------------------------------------------------- # index_windows = indices_of_window_above_threshold(peak_slev_indices.tolist(), slevs, tide_threshold) # peaks_in_window = find_max_per_window(index_windows, wind_speeds, dates) # window_max = [peaks_in_window[index][0] for index, item in enumerate(peaks_in_window)] # window_dates = [peaks_in_window[index][1] for index, item in enumerate(peaks_in_window)] # # # Use a masked array to suppress slev values that are below threshold, only used for plotting # slevs_masked = np.ma.masked_less(slevs, tide_threshold) # plt.scatter(window_max, peak_slevs) # plt.xlabel('Max Wind Speed (km/h) Per Window over Threshold') # plt.ylabel('Max Surge (cm) per Window over Threshold') # # Line of best fit # plt.plot(np.unique(window_max), np.poly1d(np.polyfit(window_max, peak_slevs, 1))(np.unique(window_max)),color = 'r') # plt.show() # # plt.scatter(window_dates, window_max, color = 'k') # #plt.scatter(peak_slev_dates, peak_slevs, color = 'g') # plt.plot(df.index, df['Wind Spd (km/h)'], label = 'Wind Speed (km/h)') # plt.plot(df.index, df['SLEV'], label = 'Storm Surge (Metres * 100)') # plt.plot(df.index, slevs_masked, 'r', linewidth = 2) # plt.title("Wind SPeed Peaks for %d Hour Periods Over %dcm Storm Surge Windows\nfor %s" % (time_frame, wind_threshold, filename)) # plt.legend(bbox_to_anchor=(0.5, -0.1), loc = 8, ncol = 2, mode = "expand", borderaxespad = 0.) # plt.show() #----------------------------------------------------------------------------------------------------------- # # Surge peaks for wind speed window----------------------------------------------------------------------- index_windows = indices_of_window_above_threshold(peak_wind_indices.tolist(), wind_speeds, wind_threshold) peaks_in_window = find_max_per_window(index_windows, slevs, dates) window_max = [peaks_in_window[index][0] for index, item in enumerate(peaks_in_window)] window_dates = [peaks_in_window[index][1] for index, item in enumerate(peaks_in_window)] # Use a masked array to suppress values that are below threshold, only used for plotting wind_speeds_masked = np.ma.masked_less(wind_speeds, wind_threshold) plt.scatter(window_max, peak_wind_speeds) plt.xlabel('Max Surge (cm) Per Window over Threshold') plt.ylabel('Max Wind Speed (km/h) per Window over Threshold') plt.title("Max window wind speed over max surge for window over %dkm/h\nfor %s" % (wind_threshold, filename)) # Line of best fit plt.plot(np.unique(window_max), np.poly1d(np.polyfit(window_max, peak_wind_speeds, 1))(np.unique(window_max)), color = 'r') plt.show() plt.scatter(window_dates, window_max, color = 'k') #plt.scatter(peak_wind_dates, peak_wind_speeds, color = 'r') plt.plot(df.index, df['Wind Spd (km/h)'], label = 'Wind Speed (km/h)') plt.plot(df.index, df['SLEV'], label = 'Storm Surge (Metres * 100)') plt.plot(df.index, wind_speeds_masked, 'r', linewidth = 2) plt.title("Surge Peaks for %d Hour Periods Over %dkm/h Wind Speed Windows\nfor %s" % (time_frame, wind_threshold, filename)) plt.legend(bbox_to_anchor=(0.5, -0.1), loc = 8, ncol = 2, mode = "expand", borderaxespad = 0.) plt.show() #----------------------------------------------------------------------------------------------------------- print('The number of peaks over windspeed threshold is: %s' % str(len(peak_wind_speeds))) print('The number of peaks over storm surge threshold is: %s' % str(len(peak_slevs)))
d = np.eye(2) # Create a 2x2 identity matrix print(d) # Prints "[[ 1. 0.][ 0. 1.]]" arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) # slicing van also be done here sub_arr = arr[:2, 1:3] #rows x columns #below are the same statements print(arr[[0, 1, 2], [0, 1, 0]]) #prints (0,0) (1,1) (2,0) poi = np.arange(4) #gives array from 0 to 3 #math funcs + - * / sqrt mean can be called #dot function is used to make matrix multiplication l = np.array([1, 5, 8, 6]) m = np.array([[1], [5], [6], [8]]) print(l.dot(m)) # both are same print(np.dot(l, m)) print(np.sum(arr)) # Compute sum of all elements; print(np.sum(arr, axis=0)) # Compute sum of each column; print(np.sum(arr, axis=1)) # Compute sum of each row arr.transpose() fgt = np.empty_like(arr) # Create an empty matrix with the same shape as arr npoints = 20 slope = 2 offset = 3 x = np.arange(npoints) y = slope * x + offset + np.random.normal(size=npoints) print(y) p = np.polyfit(x, y, 2) print(p) #gives a coefficients of polynomial according to given values p = np.poly1d([1, 2, 3]) print(np.poly1d(p)) #creates a polynomial
def eval(self, fit, x): return np.poly1d(fit)(x)
def learn(self, my_dict, aplot=None): if my_dict is None: logging.critical("Cannot learn function with empty dict") return lambda _: 1, 0 d_dict = dict() samples, thresholds = [], [] for k, v in six.iteritems(my_dict): for o in (_ for _ in v if _): dnearest = np.array(np.load("{}.npz".format(o))['X']).reshape( -1, 1) var = np.var(dnearest) if var == 0: continue med = np.median(dnearest) mean, _, _, h = mean_confidence_interval(dnearest) samples.append(dnearest.shape[0]) d_dict.setdefault(o.split('/')[0], dict()).setdefault(k, [med, h]) # for the threshold, fit a gaussian (unused for AP) thresholds.append(_gaussian_fit(dnearest)) if len(d_dict) < 1: logging.critical("dictionary is empty") return lambda _: 1, 0 for k, v in six.iteritems(d_dict): # there is only one xdata = np.array(sorted(v)) ydata = np.array([np.mean(v[x][0]) for x in xdata]) yerr = np.array([np.mean(v[x][1]) for x in xdata]) # Take only significant values, higher than 0 mask = ydata > 0 xdata = xdata[mask] if xdata.shape[0] < 2: logging.critical("Too few points to learn function") # no correction can be applied return lambda _: 1, 0 ydata = ydata[mask] ydata = ydata[0] / ydata # normalise yerr = yerr[mask] order = min(self.order, xdata.shape[0] - 1) warnings.filterwarnings("ignore") with warnings.catch_warnings(): warnings.filterwarnings('error') try: poly = np.poly1d( np.polyfit(xdata, ydata, order, w=1. / (yerr + 1e-15))) except np.RankWarning: logging.critical( "Cannot fit polynomial with degree %d, npoints %d", order, xdata.shape[0]) return lambda _: 1, 0 if self.aplot is not None: plot_learning_function(xdata, ydata, yerr, order, self.aplot, poly) # poly = partial(model, res.x) return poly, 1 - (filter( lambda x: x > 0, np.array(thresholds)[np.array(samples).argsort()[::-1]]) or [0])[0]
df_young['AbsW2_err'] = AbsW2y_err # ------------------------------------------------------------------------------------- # ------------------------- Polynomial fits ----------------------------------------- # ------------------------------------------------------------------------------------- # ------ Fit polynomial for subdwarfs ------ # Need to drop nans df_sub2 = df_sub.drop(df_sub.index[[1, 10]]) # --- Get uncertainites for upper and lower teff limits ---- df_sub2['AbsW2_up'] = df_sub2['MW2'] + df_sub2['MW2_unc'] df_sub2['AbsW2_d'] = df_sub2['MW2'] - df_sub2['MW2_unc'] # ------ Fit the values -------- coeffs = np.polyfit(df_sub2['SpT'], df_sub2['MW2'], 1) line = np.poly1d(coeffs) coeffs_up = np.polyfit(df_sub2['SpT'], df_sub2['AbsW2_up'], 1) line_up = np.poly1d(coeffs_up) coeffs_d = np.polyfit(df_sub2['SpT'], df_sub2['AbsW2_d'], 1) line_d = np.poly1d(coeffs_d) # ---- print values to screen ------- print coeffs print coeffs_up print coeffs_d # ---- Plot the fit lines ----- # xp = np.linspace(5, 30, 100) #
def _func(self, volume, params): x = volume**(-2.0 / 3.0) return np.poly1d(list(params))(x)
def smoothTrend(df, pollutant, Type): """ Plots a connected scatter plot of the average value of the pollutant every month of every year. Then plots a smooth line of best fit through the plot showing the user the overall trend of the pollutant through the years. Parameters ---------- df: data frame minimally containing date and at least one other pollutant pollutant: type string A pollutant name correspoinding to a variable in a data frame, ex: 'pm25' Type: type int Value can be either 1 or 2 1: normal trend line 2: bootstrap uncertainties """ import datetime as dt import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np import pandas as pd from numpy import array import seaborn as sns import scipy from scipy import stats import math from scipy.ndimage.filters import gaussian_filter1d # ============================================================================= # df = pd.read_csv("mydata.csv") # ============================================================================= df.index = pd.to_datetime(df.date) unique_years = np.unique(df.index.year) # df = df[pd.notnull(df[pollutant])] i = 0 year = [] while i < len(unique_years): year.append(str(unique_years[i])) i = i + 1 num_unique_years = len(year) i = 0 values = [] while i < num_unique_years: df_v = df[year[i]].resample("1D").mean() df_v = df_v.fillna(method="ffill") df_v["month"] = df_v.index.month # df_new.index.dayofweek nox = df_v[pollutant].mean() values.append(nox) i = i + 1 values = gaussian_filter1d(values, sigma=0.75) # df = df.drop("date", axis=1) # print(df) i = 0 x = 0 j = 0 var2 = [] while i < num_unique_years: df_new = df[year[j]].resample("1D").mean() df_new = df_new.fillna(method="ffill") df_new["month"] = df_new.index.month # df_new['day']=df_new.index.dayofweek # df_new['hour']=df_new.index.hour i = i + 1 j = j + 1 x = 0 while x < 12: a = df_new[df_new.month == x] mean_var2 = a[pollutant].mean() var2.append(mean_var2) x = x + 1 i = 0 while i < len(var2): if pd.notnull(var2[i]) == False: var2[i] = (var2[i - 1] + var2[i + 1]) / 2 i = i + 1 scatterX = [] t = 0 while t < num_unique_years: r = 0 while r < 12: scatterX.append(t + (r / 12)) r = r + 1 t = t + 1 y = var2 x = scatterX def best_fit(X, Y): xbar = sum(X) / len(X) ybar = sum(Y) / len(Y) n = len(X) # or len(Y) numer = sum([xi * yi for xi, yi in zip(X, Y)]) - n * xbar * ybar denum = sum([xi ** 2 for xi in X]) - n * xbar ** 2 b = numer / denum a = ybar - b * xbar # print('best fit line:\ny = {:.2f} + {:.2f}x'.format(a, b)) return a, b a, b = best_fit(x, y) fig = plt.figure() ax = fig.add_subplot(111) # print(len(x)) ax.plot(x, y, "-o", color="red") ax.set_xlabel("Year") ax.set_ylabel("concentration") ax.set_title("monthly mean " + pollutant) if Type == 1: plt.plot( np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), color="black" ) else: plt.plot(values, color="black") plt.show()
def plot_j_C(filename): file = Document.query.filter_by(filename=filename).first() xdata, ydata = readfile(filepath=filepath(filename), u_amp=file.amplification) xrange = np.linspace(np.amin(xdata), np.amax(xdata), 100) p11 = np.polyfit(xdata, ydata, 11) if file.remove_offset == True: for i in range(0, len(ydata)): ydata[i] -= p11[-1] if file.remove_ohm: #ohmic_res_p1 = ohmig_res(xdata,ydata) ohmic_res_p1 = calc_res_from_border(xdata, ydata, file.res_border) for i in range(0, len(ydata)): ydata[i] -= ohmic_res_p1[0] * xdata[i] p11 = np.polyfit(xdata, ydata, 11) p = np.poly1d(p11) ohmic_res_p1 = calc_res_from_border(xdata, ydata, file.res_border) roots = solve_for_y_real(p11, p11[-1] + 10) froots = filter_roots_by_range(roots=roots, min=np.amin(xdata), max=np.amax(xdata)) #ohmic_res_p1 = ohmig_res(xdata,ydata) #discrete derivate #dy = np.zeros(xrange.shape,np.float) #dy[0:-1] = np.diff(p(xrange))/np.diff(xrange) #dy[-1] = dy[-2] #d2y = np.zeros(xrange.shape,np.float) #d2y[0:-1] = np.diff(dy)/np.diff(xrange) #d2y[-1] = d2y[-2] plt.plot(xdata, ydata, ".b", label="U(I), amplification " + str(file.amplification)) plt.plot(xrange, p(xrange), "-r", label="polyfit: n=11") plt.plot(xrange, [p11[-1] + 10] * 100, "--r") if len(froots) == 1: plt.plot([froots[0]] * 2, [np.amin(ydata), np.amax(ydata)], "--r", label="j_C = %.3fmA" % froots[0]) file.j_C = froots[0] plt.plot(xrange, ohmic_res_p1[0] * xrange + ohmic_res_p1[-1], "--g", label="R = %.5fmOhm" % ohmic_res_p1[0]) p_res = np.poly1d(ohmic_res_p1) plt.plot(xrange, p_res(xrange), "-g") file.res = ohmic_res_p1[0] db.session.add(file) db.session.commit() #plt.plot(xrange,dy,"--y", label = "derivate of p11") #plt.plot(xrange,d2y,"g", label = "2nd derivate of p11") plt.xlabel('I in mA') plt.ylabel('U in uV') plt.legend() plt.grid(which="both", axis="both") plt.savefig(plotpath(filename, "_j_C.png"))
def fit(self, min_ndata_factor=3, max_poly_order_factor=5, min_poly_order=2): """ Fit the input data to the 'numerical eos', the equation of state employed in the quasiharmonic Debye model described in the paper: 10.1103/PhysRevB.90.174107. credits: Cormac Toher Args: min_ndata_factor (int): parameter that controls the minimum number of data points that will be used for fitting. minimum number of data points = total data points-2*min_ndata_factor max_poly_order_factor (int): parameter that limits the max order of the polynomial used for fitting. max_poly_order = number of data points used for fitting - max_poly_order_factor min_poly_order (int): minimum order of the polynomial to be considered for fitting. """ warnings.simplefilter("ignore", np.RankWarning) def get_rms(x, y): return np.sqrt(np.sum((np.array(x) - np.array(y))**2) / len(x)) # list of (energy, volume) tuples e_v = list(zip(self.energies, self.volumes)) ndata = len(e_v) # minimum number of data points used for fitting ndata_min = max(ndata - 2 * min_ndata_factor, min_poly_order + 1) rms_min = np.inf # number of data points available for fit in each iteration ndata_fit = ndata # store the fit polynomial coefficients and the rms in a dict, # where the key=(polynomial order, number of data points used for # fitting) all_coeffs = {} # sort by energy e_v = sorted(e_v, key=lambda x: x[0]) # minimum energy tuple e_min = e_v[0] # sort by volume e_v = sorted(e_v, key=lambda x: x[1]) # index of minimum energy tuple in the volume sorted list emin_idx = e_v.index(e_min) # the volume lower than the volume corresponding to minimum energy v_before = e_v[emin_idx - 1][1] # the volume higher than the volume corresponding to minimum energy v_after = e_v[emin_idx + 1][1] e_v_work = deepcopy(e_v) # loop over the data points. while (ndata_fit >= ndata_min) and (e_min in e_v_work): max_poly_order = ndata_fit - max_poly_order_factor e = [ei[0] for ei in e_v_work] v = [ei[1] for ei in e_v_work] # loop over polynomial order for i in range(min_poly_order, max_poly_order + 1): coeffs = np.polyfit(v, e, i) pder = np.polyder(coeffs) a = np.poly1d(pder)(v_before) b = np.poly1d(pder)(v_after) if a * b < 0: rms = get_rms(e, np.poly1d(coeffs)(v)) rms_min = min(rms_min, rms * i / ndata_fit) all_coeffs[(i, ndata_fit)] = [coeffs.tolist(), rms] # store the fit coefficients small to large, # i.e a0, a1, .. an all_coeffs[(i, ndata_fit)][0].reverse() # remove 1 data point from each end. e_v_work.pop() e_v_work.pop(0) ndata_fit = len(e_v_work) logger.info("total number of polynomials: {}".format(len(all_coeffs))) norm = 0.0 fit_poly_order = ndata # weight average polynomial coefficients. weighted_avg_coeffs = np.zeros((fit_poly_order, )) # combine all the filtered polynomial candidates to get the final fit. for k, v in all_coeffs.items(): # weighted rms = rms * polynomial order / rms_min / ndata_fit weighted_rms = v[1] * k[0] / rms_min / k[1] weight = np.exp(-(weighted_rms**2)) norm += weight coeffs = np.array(v[0]) # pad the coefficient array with zeros coeffs = np.lib.pad(coeffs, (0, max(fit_poly_order - len(coeffs), 0)), "constant") weighted_avg_coeffs += weight * coeffs # normalization weighted_avg_coeffs /= norm weighted_avg_coeffs = weighted_avg_coeffs.tolist() # large to small(an, an-1, ..., a1, a0) as expected by np.poly1d weighted_avg_coeffs.reverse() self.eos_params = weighted_avg_coeffs self._set_params()
plt.ylabel('R') plt.show() plt.plot(W, J, color='red', label='2') plt.legend() # 显示图例 plt.xlabel('s[0-2 * pi]') plt.ylabel('J') plt.show() plt.plot(W, Z, color='red', label='3') plt.legend() # 显示图例 plt.xlabel('A[0-2 * pi]') plt.ylabel('trend') plt.show() z1 = np.polyfit(W, R, 3) # 用3次多项式拟合 p1 = np.poly1d(z1) print(p1) # 在屏幕上打印拟合多项式 yvals = p1(W) # 也可以使用yvals=np.polyval(z1,x) z2 = np.polyfit(W, J, 3) # 用3次多项式拟合 p2 = np.poly1d(z2) print(p2) # 在屏幕上打印拟合多项式 yvals2 = p2(W) # 也可以使用yvals=np.polyval(z1,x) plot1 = plt.plot(W, R, '*', label='original values') plot2 = plt.plot(W, yvals, 'r', label='polyfit values') plt.xlabel('W axis') plt.ylabel('R axis') plt.legend(loc=4) # 指定legend的位置,读者可以自己help它的用法 plt.title('polyfitting') plt.show()
def _func(self, volume, params): return np.poly1d(list(params))(volume)
def ForwardPolyFactory(coeff): """generate a 1-D polynomial instance from a list of coefficients""" from numpy import poly1d return poly1d(coeff)
peaks, _ = find_peaks(data[1, :], height=3, distance=5) print('==========================================') print("Expected number of wavelength points: %d" % int(laser.wavelength_logging_number())) print("Actual wavelength points measured: %d" % len(peaks)) print('==========================================') device_data = data[0, peaks[0]:peaks[-1]] device_time = np.arange(0, device_data.size) / sample_rate modPeaks = peaks - peaks[0] modTime = modPeaks / sample_rate z = np.polyfit(modTime, wavelength_logging, 2) p = np.poly1d(z) device_wavelength = p(device_time) device_wavelength_mod = hash(tuple(device_wavelength)) device_data_mod = hash(tuple(device_data)) # ---------------------------------------------------------------------------- # # Visualize and save results # ---------------------------------------------------------------------------- # plt.figure() plt.plot(device_wavelength, device_data) plt.xlabel('Wavelength (nm)') plt.ylabel('Power (au)') plt.grid(True) plt.tight_layout()
import numpy as np p = np.poly1d([1, 1, 2], variable="z") print(p)
import numpy as np import pandas as pd import matplotlib.pyplot as plt name1 = sys.argv[1].split(os.sep)[-2] fpkm1 = pd.read_csv(sys.argv[1], sep="\t", index_col="t_name").loc[:, "FPKM"] log_fpkm1 = np.log(fpkm1 + 1) name2 = sys.argv[2].split(os.sep)[-2] fpkm2 = pd.read_csv(sys.argv[2], sep="\t", index_col="t_name").loc[:, "FPKM"] log_fpkm2 = np.log(fpkm2 + 1) df = pd.read_csv(sys.argv[1], sep="\t", index_col="t_name") fig, ax = plt.subplots() p = np.polyfit(log_fpkm1, log_fpkm2, 1) x = np.linspace(0, 8) curve = np.poly1d(p) ax.scatter(log_fpkm1, log_fpkm2, alpha=0.1) ax.set_title("FPKM comparison") ax.set_xlabel(name1 + " log FPKM") ax.set_ylabel(name2 + " log FPKM") # ax.set_yscale('log') # ax.set_xscale('log') ax.set_ylim([-1, 10]) ax.set_xlim([-1, 10]) plt.plot(x, curve(x), 'k') plt.text(7, 0, str(curve)) fig.savefig("FPKM_plot.png") plt.close(fig)
if dicV['%d' % i][counter - 6] >= dicV['%d' % i][ counter - 5] and dicV['%d' % i][counter - 6] > dicV[ '%d' % i][counter - 7]: coordinate_down[y_coordinate[ i - 1]] = (1 + float(lines[counter - 6])) * 30 ycu = [] for key in list(coordinate_up.keys()): ycu.append(float(key)) ycufinal = np.array(ycu) xcu = [] for key in list(coordinate_up.values()): xcu.append(float(key)) xcufinal = np.array(xcu) z1 = np.polyfit(xcufinal, ycufinal, 2) # 用2次多项式拟合 p1 = np.poly1d(z1) print(p1) plot1 = plt.plot(xcufinal, ycufinal, '*', label='points') plt.xlabel('φ') plt.ylabel('α') plt.legend(loc=4) plt.title('Upper surface X_Ctr graph') X = np.linspace(float(list(coordinate_up.values())[0]) - 2, 30, 150) Y = p1(X) plt.plot(X, Y) plt.show() #the lower surface curve generally does not possess a analystic valye '''ycd=[] for key in list(coordinate_down.keys()): ycd.append(float(key))
# Adjust location of the plots to make space for legend and save plt.subplots_adjust(right = 0.6, hspace=0.2) plt.show() def find_nearest(array, value): idx = np.argmin(np.abs(array - value)) return idx # Pareto Front revenue= calculate_tot_profit([landuse_map_in], 400) area= calculate_area([landuse_map_in], 400) f1, ax1 = plt.subplots(1) plt.scatter(-results[:,0],-results[:,1]) coef = np.polyfit(-results[:,0],-results[:,1],1) print(coef[0]*1000000) poly1d_fn = np.poly1d(coef) plt.plot(-results[:,0],poly1d_fn(-results[:,0]), "--k") plt.plot(revenue[0],area[0], "sr") ax1.set_title("Objective Space") ax1.set_xlabel('Total profit [US$]') ax1.set_ylabel('Natural Vegetation [hectar]') #plt.savefig(default_directory+"/results2016N6/pareto_front.png") plt.show() #find the maps, where the profit or natural vegetation was optimized idx1= find_nearest(results[:,0],- revenue[0]) landuse_nearest_revenue = maps[idx1] idx2= find_nearest(results[:,1],- area[0]) landuse_nearest_area = maps[idx2]
# =========================================================================== # iterate over both lists # =========================================================================== for n, (i, j) in enumerate(zip(a, b)): # get x and y x = numpy.array(tout[i:j]) y = numpy.array(radon[i:j]) # set axis ranges plt.ylim(0, 100) plt.xlim(-20, 20) # perform regressions z = numpy.polyfit(x, y, 1) p = numpy.poly1d(z) # Generated linear fit slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) r2 = round(r_value**2, 4) line = slope * x + intercept # plot data plt.plot(x, y, marker[n], markeredgewidth=0.75, markeredgecolor='k', markerfacecolor=marker_facecolor[n], alpha=transparency[n], label='%s%s' % (names[n], str(r2)))
# polynomial regression and prediction of speed of car at time 20:00 # a list consists of time of day # b list consists of car speed # r2_score is for relationship(r squared value) import matplotlib.pyplot as plt from sklearn.metrics import r2_score import numpy as np x = [1, 2, 3, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 18, 19, 21, 22] y = [100, 90, 80, 60, 60, 55, 60, 65, 70, 70, 75, 76, 78, 79, 90, 99, 99, 100] mymodel = np.poly1d(np.polyfit(x, y, 3)) myline = np.linspace(1, 22, 100) plt.scatter(x, y) plt.plot(myline, mymodel(myline)) plt.show() speed = mymodel() print(speed) print(r2_score(y, mymodel(x)))
def pltRepresentation(self,pitchHz=False, figurePlt=False, figureFilename='test.png', pitchtrackFilename=None, refinedSegFilename=None, evaluation=False): if figurePlt: plt.figure() ii = 0 # a counter for interleave plot the pitch contours # plot original pitch track for comparison # if pitchtrackFilename: # frameStartingTime, pt = self.ptSeg1.readPyinPitchtrack(pitchtrackFilename) # frameInd = frameStartingTime*self.nc1.samplerate/self.nc1.hopsize # ptmidi = uf.pitch2midi(pt) # plt.plot(frameInd,ptmidi) representation = [] startFrames = [] endFrames =[] sortedKey = sorted(self.representationTargetDict.keys()) for key in sortedKey: leny = len(self.representationSegmentPts[key]) startFrame = self.representationBoundariesDict[key][0] # end frame self.representationBoundariesDict[key][1] is not correct endFrame = startFrame + leny xboundary = np.linspace(startFrame,endFrame-1,leny) startFrames.append(startFrame) endFrames.append(endFrame) if self.representationTargetDict[key] == 0 or self.representationTargetDict[key] == 1: p = self.representationFeaturesDict[key][0] if p is not None: x = np.linspace(0,leny-1,leny) pc = np.poly1d(p) # polynomial class y = pc(x) self.pltRepresentationHelper(xboundary,y,representation,pitchHz) if figurePlt: # plot linear curve color = 'k' if ii%2 == 0: color = 'r' plt.plot(xboundary,y,color=color) # angle of the curve, only using in condition first degree regression slope = self.representationFeaturesDict[key][0][0]*self.nc1.samplerate/self.nc1.hopsize slope = np.arctan(slope)*180/np.pi plt.annotate('{0:.2f}'.format(slope), xy = (xboundary[0], y[0]), xytext=(xboundary[0]+0.5, y[0]),fontsize=5) # annotation of boundary #plt.annotate(xboundary[0], xy = (xboundary[0], y[0]), # xytext=(xboundary[0]+0.5, y[0]),fontsize=5) #plt.annotate(xboundary[-1], xy = (xboundary[-1], y[-1]), # xytext=(xboundary[-1]+0.5, y[-1]),fontsize=5) ii += 1 else: y = self.representationSegmentPts[key] self.pltRepresentationHelper(xboundary,y,representation,pitchHz) if figurePlt: # plot vibrato plt.plot(xboundary,y,'b') if figurePlt: # regulate the figure size fig = plt.gcf() dpi = 180.0 print figureFilename, ', plot length: ',len(representation) fig.set_size_inches(int(len(representation)*2/dpi),10.5) fig.savefig(figureFilename, dpi=dpi) if refinedSegFilename and not evaluation: with open(refinedSegFilename, 'w+') as outfile: outfile.write('startFrame'+','+'endFrame'+','+'startTime'+','+'endTime'+'\n') for ii in range(len(startFrames)): outfile.write(str(int(startFrames[ii]))+',' +str(int(endFrames[ii]))+',' +str(startFrames[ii]*self.nc1.hopsize/float(self.nc1.samplerate))+',' +str(endFrames[ii]*self.nc1.hopsize/float(self.nc1.samplerate))+'\n') return representation
import numpy as np import matplotlib.pyplot as pp from scipy import optimize as opt p = np.poly1d([1, 1, -4, 4]) dp = np.polyder(p) #Como la salida del paso anterior es un roots = np.roots(dp.coef) #print(dp) #print(roots) xp = np.arange(-10, 10, 0.1) yp = p(xp) xdp = np.arange(-10, 10, 0.1) ydp = dp(xdp) xr1 = roots[0] yr1 = p(xr1) #print(xr1,yr1) xr2 = roots[1] yr2 = p(xr2) #print(xr2,yr2) data = np.array([xp, yp]) data = data.T