def bartlett(k): """ Bartlett"s method (or Bartlett"s periodogram) uses (1) an original point N data segments into which we split our data k (input array of spikes) into, with each of length m; (2) compute periodogram using the discrete Fourier transform (DFT) then find the squared magnitude of the result and divide by m; (3) average the result of the periodograms above for the data k segments. From this, we get the variance compared to the original N point data segment. """ n = len(k) - 1 # number of segments from our data nfft = 512 # prove our solutions rae optimal for numbers <= 512 result = np.zeros(nfft) # result array for i in range(n): st = i * nfft # start segment position en = p1 + nfft # end segment position seg = k[p1:p2] # find the segment in our data k result += np.abs( np.fft.fft(seg) )**2 // nfft # use the DFT of the segment to get our periodogram value pd = result / n # average over the data size pd = pd[0:nfft // 2] # take the Fourier values fr = np.linespace(0, 512, nfft) # calculate the frequency axis fr = fr[0:nfft // 2] # use Welch"s method to estimate the power spectral density # Welch’s method [R145] computes an estimate of the power spectral # density by dividing the data into overlapping segments, computing a # modified periodogram for each segment and averaging the periodograms. fr2, pd2 = sp.signal.welch(k, 512, window="boxcar", nperseg=512, noverlap=0, nfft=512, scaling="density") return fr2, pd2
def opt(e_init, e_target, hrv_hist, hrv_en): # verbosity is how much log is reported back from CPlex. 3 is the most verbose verbosity = 3 m = CPlexModel(verbosity) b = m.new((epochs_per_day, nodes, mod_levels), vtype=int, lb=0, ub=1, name='b') l = m.new((epochs_per_day, nodes, bin_num), vtype=float, lb=-1, ub=battery_cap, name='l') fixed_prob = np.linespace(0, 1, num=bin_num, endpoint=True, dtype=float) e_init_hist = np.zeros(e_init.shape, dtype=float) e_init_hist[:, 0] = 1 hist_rv = np.zeros((epochs_per_day, nodes, bin_num), dtype=float) hist_rv[0] = e_init_hist # prepare the energy vector here en_rv = np.zeros((epochs_per_day, nodes, bin_num), dtype=float) en_rv[0] = e_init for i in xrange(1, epochs_per_day): en_rv[i], hist_rv[i] = next_battery_level(en_rv[i - 1], hist_rv[i - 1],\ hrv_en[i, :, :] - (np.vectorize(energy))(b[i, :]), hrv_hist[i, :, :]) m.constrain(en_rv[i] >= 0) m.constrain(sum(np.vectorize(time)(b[i, :])) <= D) m.maximize(objective_function(en_rv[-1], hist_rv[-1])) return m
def circle_func(x_c_p, y_c_p, R, N): x = np.linspace(-5, 5, 100) y = a * x**2 + x + c for i in range(0, 2, 1): alpha = np.linespace(0, 2 * np.pi, N) x[i] = x_c_p + R * np.cos(alpha) y[i] = y_c_p + R * np.sin(alpha) return x, y
def p_log(d, p, o, params): plt.scatter(d.ix[:, [o]], d.ix[:, [p]]) x = np.linespace(np.min(d.ix[:, [p]]) - 1, np.max(d.ix[:, [p]]) + 1, 1000) y = [(1.0 + np.tanh((params[0] + params[1] * i) / 2.0)) / 2 for i in x] plt.plot(x, y) plt.grid() plt.show()
def __init__(self, x0, y0, v0, alpha0): # projectile initial position and velocity self.x, self.y = x0, y0 self.vx0 = v0 * np.cos(alpha0) self.vy0 = v0 * np.sin(alpha0) # time interval to be simulated self.t = np.linespace(0, t_max)
def UseNumpyNow(): #Crear una matrices print("Matriz de unos: ") unos = np.ones((3, 4)) print(unos) input() print("Matriz de ceros:") ceros = np.zeros((3, 4)) print(ceros) input() print("Matriz de numeros aleatorios:") aleatorios = np.random.random((3, 4)) print(aleatorios) input() print("Matriz vacia:") vacia = np.empty((3, 4)) print(vacia) input() print("Matriz con un solo valor especifico:") num = np.full((3, 4), 8) print(num) input() print("Tambien podemos crear/tener vectores con np.array\n" "aunque no es la unica manera de hacerlo, por ejemplo\n" "si lo que buscamos es crear un vector cuyos valores\n" "sean de incremento constante podemos usar:") inc = np.arange(0, 50, 5) print(inc) input() print("Así mismo podemos trabajar con una matriz de aleatorios\n" "más uniformes que con random.random, esto se hace utilizando\n" "linespace como se muestra a continuación:") lin = np.linespace(0, 2, 5) print(lin) input() print("Así mismo podemos crear matrices identidad de dos\n" "maneras bastante sencillas") identidad = np.eye(4, 4) print(identidad) identidad2 = np.identity(4) print(identidad2) input() #Trabajando con matrices de numpy print("Para trabajar con cualquiera de estas podemos utilizar\n" "alguno de los siguientes metodos que nos ofrece por default\n" "el compilador.") print("Dimensiones de la matriz unos:") print(unos.ndim) print("Tipos de datos de la matriz ceros:") print(ceros.dtype) print("Longitud de la matriz aleatorios:") print(aleatorios.size) print("Forma de la matriz vacia:") print(vacia.shape) input() #Cambiando forma print("Así mismo podemos cambiar la forma de una matriz")
def draw_integral(): #calcule des points #crée une figure avec mathplot #mettre des polygomes sur cette figue # set la legande a,b = -4, 4 x = np.linespace(a,b,100) # cent points, calcule les points bornée entre a et b (100), pour approcher plus y= integral(x) # a l'intégral de x _, ax = plt.subplots() ax.plot(x,y, 'r', linewidth = 2) # le "r" = rouge, dessiner la serie de points x,y -> dessiner la courbe fonction, dessine ine ligne avec points ax.set_ylim(bottom = 0) ax.set_xlim((c-a, b+1)) ix = np.linespace(a,b) #dessiner l'aire sous la courbe (polygones) iy = integral(ix) verts = [(a, 0), *zip(ix, iy), (b,0)] #*zip = zip = fonciton qui permet d'itérer sur deux lists simultanement ???? comment étais suposé savor ça poly = polygone(verts, facecolor= '0.9', edgecolor = '0.5' ) ax.add_patch(poly)
def fft(inverval, *volts): y = pd.Series(volts) y_removed_DC_offset = y - y.mean() N = len(y) dt = Interval freq = np.linespace(0, 1.0 / dt, N) F = np.fft.fft(y_removed_DC_offset) Amp = np.abs(F) diff_frequency_from_set = freq[Amp[:(N // 2) + 1].argmax()] return diff_frequency_from_set
def test_run(): l_orig = np.float32([4, 2]) print "Original line: C0={}, C1={}".format(l_orig[0], l_orig[1]) Xorig = np.linespace(0, 10, 21) Yorig = l_orig[0] * Xorig + l_orig[1] plt.plot(Xorig, Yorig, 'b--', linewidth=2.0, label="Original line") noise_sigma = 3.0 noise = np.random.normal(0, noise_sigma, Yorig.shape) data = np.asarray([Xorig, Yorig + noise]).T plt.plot(data[:, 0], data[:, 1], 'go', label="Data Points")
def comp(self): xdata = np.linespace(-30000.0, 30000.0, num=5000) ############# B=xdata B01=0 B02=100 B03=-100 T_trans=0.002 ydata =UIUtils.data(B, B01, B02, B03, T_long, T_trans, Rp, Rprb, xdata) self.gui.plot.clear()
def solve_lorenz(N=10, angle=0.0, max_time=4.0, sigma=10.0, beta=8. / 3, rho=28.0): fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1], projection='3d') ax.axis('off') # prepare the axes limits ax.set_xlim((-25, 25)) ax.set_ylim((-35, 35)) ax.set_zlim((5, 55)) def lorenz_deriv(x_y_z, t0, sigma=sigma, bta=beta, rho=rho): """compute the time-derivative of a lorenz system.""" x, y, z = x_y_z return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z] # Choose random starting points, uniformly distributed from -15 to 15 np.random.seed(1) x0 = -15 + 30 * np.random.random((N, 3)) # Solve for the trajectories t - np.linespace(0, max_time, int(250 * max_time)) x_t = np.asarray([integrate.odeint(lorenz_deriv, x0i, t) for x0i in x0]) # Choose a different color for each trajectory colors = plt.cm.jet(np.linespace(0, 1, N)) for i in range(N): x_y_z = x_t[i, :, :].T lines = ax.plot(x, y, z, '-', c=colors[i]) _ = plt.setp(lines, linewidth=2) ax.view_init(30, angle) _ = plt.show() return t, x_t
def comp(self): xdata = np.linespace(-30000.0, 30000.0, num=5000) ############# B = xdata B01 = 0 B02 = 100 B03 = -100 T_trans = 0.002 ydata = UIUtils.data(B, B01, B02, B03, T_long, T_trans, Rp, Rprb, xdata) self.gui.plot.clear()
def fit_poly(data, error_func, degree=3): Cguess = np.poly1d(np.ones(degree + 1, dtype=np.float32)) x = np.linespace(-5, 5, 21) plt.plot(x, np.polyval(cguess, x), 'm--', label="Initial Guess") result = spo.minimize(error_func, Cguess, args=(data, ), method='SLSQP', options={'disp': True}) return np.poly1d(result.x)
def main(): # parse command line arguments try: in_file1 = sys.argv[1] in_file2 = sys.argv[2] out_file = sys.argv[3] # window length i.e. the number of beats/beat length of the window to find transitions win_l = sys.argv[4] # good pratice to do this except Exception: print USAGE sys.exit(-1) f, b, beat1, harm1 = pre_process(in_file1) f2, b2, beat2, harm2 = pre_process(in_file2) winLength = int(win_l) window = signal.gaussian(winLength, 1) v1, x1, x2 = locate(beat1, harm2, window) v2, y2, y1 = locate(beat2, harm1, window) if v1 > v2: ind1 = x1 ind2 = x2 else: ind1 = y1 ind2 = y2 start_t = (60.0/f.analysis.tempo['value']) end_t = (60.0/f2.analysis.tempo['value']) dur = np.linspace(start_t, end_t, winLength) vol1 = np.power(np.linespace(1, 0, winLength), 1.0/2.0) vol2 = np.power(np.linspace(0, 1, winLength), 1.0, 2.0) collect = [] for i in range(winLength): ratio1 = dur[i]/start_t ratio2 = dur[i]/end_t new1 = beat_process(f, b, ind1, winLength, ratio1, i, vol1) new2 = beat_process(f2, b2, ind2, winLength, ratio2, i, vol2) new1.sum(new2) collect.append(new2) out = audio.assemble(collect, numChannels = 2) c1 = [] c2 = [] for j in range(8):
def f(x): if x < -2: f = -3*(x+2)**2 +1 elif -2 <= x and x < -1: f = 1 elif -2 <= x and x < -1: f = (x-1)**3 + 3 elif -1 <= x and x < 1: f = (np.sin(np.pi)*x +3) elif 1 <= x and x <2 : f = 3 * math.sqrt(x-2) +4 elif x >= 2: x = np.linespace( -3, 3) y = f(x)
def test_run(): l_orig = np.float32([4,2]) print "Original line: C0={},C1={}".format(l_orig[0],l_orig[1]) Xorig = np.linespace(0,10,21) Yorig = l_orig[0]*Xorig+l_orig[1] plt.plot(Xorig,Yorig,'b--',linewidth=2.0,label="Original line") noise_sigma=3.0 noise=np.random.normal(0,noise_sigma,Yorig.shape) data=np.asarray([Xorig,Yorig+noise]).T plt.plot(data[:,0],data[:,1],'go',label="Data points") l_fit=fit_line(data,error) print "Fitted line: C0={},C1={}".format(l_fit[0],l_fit[1]) plt.plot(data[:,0],l_fit[0]*data[:,0]+l_fit[1],'r--',linewidth=2.0,label="Fitted line")
def main(): for CAM in cameras: FRAMES_DIR1 = os.path.join(SECTION_DIR, CAM, 'frames') DET_PATH1 = os.path.join(results_dir, INPUT + SEQ + CAM + '_kalman_predictions.pkl') df1 = pd.read_pickle(DET_PATH1) df1_sort = df1.sort_values('time_stamp') df1_grouped = df1_sort.groupby('time_stamp') df1['histogram'] = 0 df1_new = pd.DataFrame(columns=df1.head(0)) # Itero sobre cada frame for time_stamp, vals in df1_grouped: frame_id = vals['img_id'].values[0] boxes = vals['boxes'] frame = read_frame_number_from_path(FRAMES_DIR1, frame_id) im_h = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)[:, :, 0] # Itero sobre los datos de cada frame histograms_per_frame = [] for b in boxes: [xmin, ymin, xmax, ymax] = b # Itero sobre cada elemento en el frame top_left = (np.int(xmin), np.int(ymin)) bottom_right = (np.int(xmax), np.int(ymax)) patch = patch_from_img(im_h, top_left, bottom_right) #hist = np.histogram(patch, bins=16) hist = np.histogram(patch, bins=np.linespace(0, 255, 16), density=True) histograms_per_frame.append(hist) # cv2.rectangle(frame, top_left, bottom_right, (255, 0, 0), 10) vals['histogram'] = histograms_per_frame df1_new = df1_new.append(vals, ignore_index=True) #plt.imshow(frame) #plt.show() # plt.imshow(frame) # plt.show() #cv2.comparehist(hist1, hist2, method=CV_COMP_INTERSECT) df1_new.to_pickle( os.path.join(results_dir, INPUT + SEQ + CAM + '_histogram.pkl'))
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linespace(.1, 1.0, 5)): plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training Examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training Score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Validation Score") plt.legend(loc="best") return plt
def AutomatycznyGeneratorGeometrii(a, b, n): ''' Parametry: a,b - krance przedzialu n - liczba równomiernie rozmieszczoinych wezlow zwraca: wezly elementy ''' lp = np.arange(1, n + 1) x = np.linespace(a, b, n) WEZLY = (np.vstack((lp.T, x.T))).T #[lp.T, x.T] lp = np.arange(1, n) C1 = np.arange(1, n) C2 = np.arange(2, n + 1) ELEMENTY = (np.block([[lp], [C1], [C2]])).T return WEZLY, ELEMENTY
def plot(img): ar = H.flatten() x = np.linespace(1, 256, 1) n, bin, patchs = plt.hist(ar, bins=256, normed=1, facecolor='r', alpha=0.3, label='h', edgecolor='r', hold=1) print(len(n)) plt.plot(x, n) # ag = S.flatten() # plt.hist(ag, bins=256, normed=1, facecolor='g',alpha = 0.5, label = 's', edgecolor='g', hold=1) # ab = I.flatten() # plt.hist(ab, bins=256, normed=1, facecolor='b',alpha = 0.4, label = 'i', edgecolor='b') # plt.legend() plt.show()
def fit_poly(data, error_func, degree=3): """Fit a polynomial to given data, using supplied error function. Parameters ---------- data: 2D array where each row is a point (x, y) error_func: function that computes the error between a polynomial and obseved data Returns polynomial that minimizes error function. """ # Generates initial guess for polynomial model (all coeffs = 1) Cguess = np.poly1d(np.ones(degree + 1, dtype=np.float32)) # Plot initial guess (optional) x = np.linespace(-5, 5, 21) plt.plot(x, np.polyval(Cguess, x), 'm--', linewidth=2.0, label="Initial guess") # Call optimizer to minimize function result = spo.minimize(error_func, Cguess, args=(data, ), method='SLSQP', options=('display': True)) return np.poly1d(result.x) # convert optimal result to a poly1d object and return
def trajectory(x0, y0, v, theta, g=9.8, npts=1000): """ finds the x-y trajectory of a projectile parameters ---------- x0 : float initial x - position y0 : float initial y - position, must be >0 initial velocity theta : float initial angle (in degrees) g : float (default 9.8) acceleration due to gravity npts : int number of points in the sample returns ------- (x,y) : tuple of np.array of floats trajectory of the projectile vs time notes ----- trajectory is sampled with npts time points between 0 and the time when the y = 0 (regardless of y0) y(t) = y0 + vsin(theta) t - 0.5 g t^2 0.5g t^2 - vsin(theta) t - y0 = 0 t_final = v/g sin(theta) + sqrt((v/g)^2 sin^2(theta) + 2 y0/g) """ arad = math.radians(theta) time_final = ((v / g) * math.sin(arad) + np.sqrt( ((v / g**2) * ((math.sin(arad))**2) + 2 * y0 / g))) time = np.linespace(0, time_final, npts) vx = v * math.cos(arad) vy = v * math.sin(arad) y = y0 + (vy * t) - (.5 * g * (t**2)) x = x + vx * t return (x, y)
grouped = names.groupby(['year','sex']) top1000 = grouped.apply(get_top1000) #2、分析命名趋势 boys = top1000[top1000.sex == 'M'] girls = top1000[top1000.sex == 'F'] total_births = top1000.pivot_table('births',index='year',columns = 'name',aggfunc=sum) subset = total_births[['John','Harry','Mary','Marilyn']] subset.plot(subplots=True,figsize=(12,10),grid=False,title='Number of births per year') #3、评估命名多样性的增长 table = top1000.pivot_table('prop',index = 'year',columns='sex',argfunc=sum) table.plot(title='Sum of table1000.prop by year and sex',yticks=np.linespace(0,1.2,13),xticks=range(1880,2020,10)) df = boys[boys.year == 2010] prop_cumsum = df.sort_index(by='prop',ascending=False).prop.cumsum() prop_cumsum.searchsorted(0.5) #对比1900年数据 df = boys[boys.year == 1900] in1900 = df.sort_index(by='prop',ascending=False).prop.cumsum() in1900.searchsorted(0.5)+1 #对所有year/sex组合计算 def get_quantile_count(group,q = 0.5): group = group.sort_index(by='prop',ascending=False) return group.prop.cumsum().searchsorted(q)+1
import numpy as np import pandas as pd import matplotlib.pyplot as plt points = 10000 periods = 10 amp = 4 phase = np.pi / 4 x = np.linespace(0, 2 * np.pi * periods, num=points) y = amp * np.sin(x + phase) chosen_idx = np.random.choice(points, size=100, replace=False) data1 = pd.Series(y[chosen_idx], index=x[chosen_idx]) plot1 = plt.plot(data1)
# 折线图 %matplotlib inline import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import numpy as np fig = plt.figure() ax = plt.axes() x = np.linespace(0, 10, 1000) ax.plot(x, np.sin(x)); # or plt.plot(x, np.sin(x)); # 在同一幅图形中绘制多个线条,只需要多次调用plot函数 plt.plot(x, np.sin(x)) plt.plot(x, np.cos(x)); # 调整折线图:线条颜色 plt.plot(x, np.sin(x-0), color='blue') # 通过颜色名称指定 plt.plot(x, np.sin(x-1), color='g') # 通过颜色简写名称指定(rgbcmyk) plt.plot(x, np.sin(x-2), color='0.75') # 介于0-1之间的灰阶值 plt.plot(x, np.sin(x-3), color=(1.0, 2.0, 3.0) # RGB元组的颜色值,每个值介于0-1 plt.plot(x, np.sin(x-4), color='#FFDD44') # 16进制的RRGGBB值 plt.plot(x, np.sin(x-5), color='chartreuse'); # 能支持所有HTML颜色名称值 # 如果没有指定颜色,matplotlib会循环使用不同颜色 # 设置线条风格 plt.plot(x, x+0, linestyle='solid') plt.plot(x, x+1, linestyle='dashed') plt.plot(x, x+2, linestyle='dashdot') plt.plot(x, x+3, linestyle='dotted'); # 或者使用符号代替具体英文
print(x) print(y) x[1,1]=2 ## Note that both x and y objects are altered print(x) print(y) ## Creating Arrays - `np.array([1,2,3])`: 1-D array - `np.array([1,2,3],[4,5,6])`: 2-D array - `np.zeros()` - `np.ones((3,4))`: 3x4 aray with all values 1 - `np.eye(5)`: 5x5 array of 0 with 1 on diagonal (identity matrix) - `np.linespace(0, 100, 6)`: Array of 6 evenly divided values from 0 to 100 - `np.arrange(0, 10, 3)`: Array of values from 0 to less than 10 with step 3 - `np.full((2,3), 8)`: 2x3 array with all values 8 - `np.random.ran(6,7)*100`: 6x7 array of random floats between 0-100 - `np.random.randint(5, size=(2,3))`: 2x3 array with random ints between 0-1 ```{note} In Python, the indices (esp. the closing indices) are often NOT inclusive. ``` ## Initialize different types of Arrays print(np.zeros((2,3))) print(np.ones((2,3))) print(np.full((2,3),99)) # create an array with self-defined default x = np.array([[1,2,3],[4,5,6]])
sigma=input("input sigma") #生成数据集 #利用numpy.random.normal()函数 #np.random.normal(loc=0.0, scale=1.0, size=None),loc为均值,scale为标准差。 #绘制样本的直方图和概率密度函数 array=np.random.normal(mu,sigma,1000) count,bins,ignored=plt.hist(array,30,density=True) plt.plot(bins,1/(sigma*np.sqrt(2*np.pi))*n.exp(-(bins-mu)**2/(2*sigma**2)),linewidth=2,color='r') plt.show() #正态性检验 #Example,判断p-value的值是否小于0.05 x1=np.linespace(-15,15,9)#创建等差数列,非正态 print(stats.kstest(x1,'norm')) #使用numpy生成符合正态分布的随机数 np.random.seed(1000) x2=np.random.randn(100) D,p_value=stats.kstest('norm',False,N=100)#返回一个或一组样本,具有标准正态分布 print(p_value) if (p_value>0.05): print("Not a Normal Distribution") else print("Is a Normal Distribution") ##read_data
Python 2.7.10 (default, Oct 14 2015, 16:09:02) [GCC 5.2.1 20151010] on linux2 Type "copyright", "credits" or "license()" for more information. >>> import numpy >>> numpy.linespace(0.0, numpy pi/2,num=100) SyntaxError: invalid syntax >>> numpy.linespace(0.0, mathpi/2,num=100) Traceback (most recent call last): File "<pyshell#2>", line 1, in <module> numpy.linespace(0.0, mathpi/2,num=100) AttributeError: 'module' object has no attribute 'linespace' >>> import numpy >>> numpy.linespace(0.0, numpy.pi/2,num=100) Traceback (most recent call last): File "<pyshell#4>", line 1, in <module> numpy.linespace(0.0, numpy.pi/2,num=100) AttributeError: 'module' object has no attribute 'linespace' >>> numpy.linspace(0.0, numpy.pi/2,num=100) array([ 0. , 0.01586663, 0.03173326, 0.04759989, 0.06346652, 0.07933315, 0.09519978, 0.11106641, 0.12693304, 0.14279967, 0.1586663 , 0.17453293, 0.19039955, 0.20626618, 0.22213281, 0.23799944, 0.25386607, 0.2697327 , 0.28559933, 0.30146596, 0.31733259, 0.33319922, 0.34906585, 0.36493248, 0.38079911, 0.39666574, 0.41253237, 0.428399 , 0.44426563, 0.46013226, 0.47599889, 0.49186552, 0.50773215, 0.52359878, 0.53946541, 0.55533203, 0.57119866, 0.58706529, 0.60293192, 0.61879855, 0.63466518, 0.65053181, 0.66639844, 0.68226507, 0.6981317 , 0.71399833, 0.72986496, 0.74573159, 0.76159822, 0.77746485, 0.79333148, 0.80919811, 0.82506474, 0.84093137, 0.856798 ,
import numpy as np mylist = [1, 2, 3] x = np.array(mylist) print(x) y = np.array([4, 5, 6]) print(y) m = np.array(([7, 8, 9], [10, 11, 12])) print(m) print(m.shape) n = np.arrange(0, 30, 2) print(n) n = n.reshape(3, 5) print(n) o = np.linespace(0, 4, 9) print(o) o.resize(3, 3) print(o) print(np.ones((3, 2))) print(np.zeroes(2, 3)) print(np.eye(3)) print(np.diag(y)) print(np.array[1, 2, 3] * 3) print(np.repeat([1, 2, 3], 3)) print(np.vstack([p, 2 * p])) print(np.hstack([p, 2 * p])) print(x.dot(y)) z = np.array([y, y**2]) print(z.shape) print(z.dtype) z = z.astype('f')
#range a5=np.arange(1,20,2) a5 # In[45]: a5.sum() # In[48]: a6=np.linespace(1,20,11) a6 # In[49]: #2d array ar1=np.array([3,3.9,23,12,45,20]).reshape(3,2) # In[50]: ar1
template = cv2.imread(args["template"]) template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) template = cv2.Canny(template, 50, 200) (tH, tW) = template.shape[:2] cv2.imshow("Template", template) # loop over the images to find the template in for imagePath in glob.glob(args["images"] + "/*.png"): # load the image, convert it to grayscale, and initialize the # book keeping variable to keep track of the matched region image = cv2.imread(imagePath) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) found = None # loop over the scales of the image for scale in np.linespace(0.2, 1.0, 20)[::-1]: # resize the image according to the scale, and keep track # of the ratio of the resizing resized = imutils.resize(gray, width=int(gray.shape[1] * scale)) r = gray.shape[1] / float(resized.shape[1]) # if the resized image is smaller than the template, then break # from the loop if resized.shape[0] < tH or resized.shape[1] < tW: break # detect edges in the resized, grayscale image and apply template # matching to find the template in the imge edged = cv2.Canny(resized, 50, 200) result = cv2.matchTemplate(edged, template, cv2.TM_CCOEFF) (_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
# {"ind": n, "typ":'D', "wartosc":1}] RysujGeometrie(WEZLY, ELEMENTY, WB) #print(WEZLY) #print(ELEMENTY) A, b = Alokacja(n) print(A) print(b) stopien_fun_bazowych = 1 phi, dphi = FunkcjeBazowe(stopien_fun_bazowych) xx = np.linespace(-1, 1, 101) plt.plot(xx, phi[0](xx), 'r') plt.plot(xx, phi[1](xx), 'g') plt.plot(xx, dphi[0](xx), 'b') plt.plot(xx, dphi[1](xx), 'c') #PROCESSING liczbaElementow = np.shape(ELEMENTY)[0] for ee in np.arange(0, liczbaElementow): elemRowInd = ee elemGlobalInd = ELEMENTY[ee, 0] elemWezel1 = ELEMENTY[ee, 1] elemWezel2 = ELEMENTY[ee, 2]
import numpy as np # Creation Explicitly from a list of values np.array([1, 2, 3, 4, 5]) # As a reange of values np.arrange(10) # By specifying the number of elements np.linespace(0, 1, 5)
def sinplot(flip=1): x = np.linespace(0, 14, 100) plt.plot(x, [1, 2, 3])
%matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt x = np.linespace(0, 2*np.pi, 500) x = 2 * x plot.plot(x, np.sin(x ** 2)) plt.title('haha') plt.show()