def show(self, w): """ illustrate the learning curve Parameters ---------- w : int, window size for smoothing the curve """ # plot data plt.subplot(121) plt.plot(self.tn_it, self.tn_err, 'b.', alpha=0.2) plt.plot(self.tt_it, self.tt_err, 'r.', alpha=0.2) # plot smoothed line xne,yne = self._smooth( self.tn_it, self.tn_err, w ) xte,yte = self._smooth( self.tt_it, self.tt_err, w ) plt.plot(xne, yne, 'b') plt.plot(xte, yte, 'r') plt.xlabel('iteration'), plt.ylabel('cost energy') plt.subplot(122) plt.plot(self.tn_it, self.tn_cls, 'b.', alpha=0.2) plt.plot(self.tt_it, self.tt_cls, 'r.', alpha=0.2) # plot smoothed line xnc, ync = self._smooth( self.tn_it, self.tn_cls, w ) xtc, ytc = self._smooth( self.tt_it, self.tt_cls, w ) plt.plot(xnc, ync, 'b', label='train') plt.plot(xtc, ytc, 'r', label='test') plt.xlabel('iteration'), plt.ylabel( 'classification error' ) plt.legend() plt.show() return
def MRI(): T1 = 800*10**-3 T2 = 100*10**-3 a = pi/2 t = np.linspace(0, 1, 100) # Initalize and Rotate M = np.array([0,0,1]) R = np.array([[1, 0, 0], [0, cos(a), sin(a)], [0, -sin(a), cos(a)]]) M = R.dot(M) # T1/T2 Relaxationq M_z = 1 + (M[2] - 1) * np.exp(-t/T1) M_x = M[0]*np.exp(-t/T2) M_y = M[1]*np.exp(-t/T2) M_xy = np.sqrt(M_x*M_x + M_y*M_y) plt.subplot(121) plt.plot(t, M_xy) plt.subplot(122) plt.plot(t, M_z) plt.show()
def test_psv_dataset_tfm_segmentation_cropped(): from psv.ptdataset import PsvDataset, TFM_SEGMENTATION_CROPPED ds = PsvDataset(transform=TFM_SEGMENTATION_CROPPED) assert len(ds) == 956, "The dataset should have this many entries" mb = ds[0] image = mb['image'] mask = mb['mask'] assert isinstance(image, torch.Tensor) assert isinstance(mask, torch.Tensor) assert mask.shape[-2:] == image.shape[-2:] # Hard to test due to randomness.... PLOT=True if PLOT: from matplotlib.pylab import plt import torchvision.transforms.functional as F a = ds.get_annotation(0) plt.figure() plt.suptitle('Visualizing test_psv_dataset_tfm_segmentation_cropped, close if ok') plt.subplot(121) plt.imshow(F.to_pil_image(image)) plt.title('image') plt.subplot(122) plt.imshow(a.colors[mask.numpy()]) plt.title('mask') plt.show()
def getGraph(): for i, clf in enumerate((svm, rbf_svc, rbf_svc_tunning)): # Se grafican las fronteras plt.subplot(2, 2, i + 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = clf.predict(np.c_[x_matrizSetEntrenamientoVect, y_clases]) #Color en las gráficas Z = Z.reshape(x_matrizSetEntrenamientoVect.shape) plt.contourf(x_matrizSetEntrenamientoVect, y_clases, Z, cmap=plt.cm.Paired, alpha=0.8) #Puntos de entrenamiento plt.scatter(x_matrizSetEntrenamientoVect[:, 0], x_matrizSetEntrenamientoVect[:, 1], c=y_clases, cmap=plt.cm.Paired) plt.xlabel('Longitud Sepal') plt.ylabel('Peso Sepal') plt.xlim(x_matrizSetEntrenamientoVect.min(), x_matrizSetEntrenamientoVect.max()) plt.ylim(y_clases.min(), y_clases.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show()
def show(self, w): """ illustrate the learning curve Parameters ---------- w : int, window size for smoothing the curve """ # plot data plt.subplot(121) plt.plot(self.tn_it, self.tn_err, 'b.', alpha=0.2) plt.plot(self.tt_it, self.tt_err, 'r.', alpha=0.2) # plot smoothed line xne, yne = self._smooth(self.tn_it, self.tn_err, w) xte, yte = self._smooth(self.tt_it, self.tt_err, w) plt.plot(xne, yne, 'b') plt.plot(xte, yte, 'r') plt.xlabel('iteration'), plt.ylabel('cost energy') plt.subplot(122) plt.plot(self.tn_it, self.tn_cls, 'b.', alpha=0.2) plt.plot(self.tt_it, self.tt_cls, 'r.', alpha=0.2) # plot smoothed line xnc, ync = self._smooth(self.tn_it, self.tn_cls, w) xtc, ytc = self._smooth(self.tt_it, self.tt_cls, w) plt.plot(xnc, ync, 'b', label='train') plt.plot(xtc, ytc, 'r', label='test') plt.xlabel('iteration'), plt.ylabel('classification error') plt.legend() plt.show() return
def test_psv_dataset_crop_and_pad(): import psv.ptdataset as P TFM_SEGMENTATION_CROPPED = psv.transforms.Compose( psv.transforms.ToSegmentation(), # Crop in on the facades psv.transforms.SetCropToFacades(pad=20, pad_units='percent', skip_unlabeled=True, minsize=(512, 512)), psv.transforms.ApplyCrop('image'), psv.transforms.ApplyCrop('mask'), # Resize the height to fit in the net (with some wiggle room) # THIS is the test case -- the crops will not usually fit anymore psv.transforms.Resize('image', height=400), psv.transforms.Resize('mask', height=400, interpolation=P.Image.NEAREST), # Reandomly choose a subimage psv.transforms.SetRandomCrop(512, 512), psv.transforms.ApplyCrop('image'), psv.transforms.ApplyCrop('mask', fill=24), # 24 should be unlabeled psv.transforms.DropKey('annotation'), psv.transforms.ToTensor('image'), psv.transforms.ToTensor('mask', preserve_range=True), ) ds = P.PsvDataset(transform=TFM_SEGMENTATION_CROPPED) assert len(ds) == 956, "The dataset should have this many entries" mb = ds[0] image = mb['image'] mask = mb['mask'] assert isinstance(image, torch.Tensor) assert isinstance(mask, torch.Tensor) assert mask.shape[-2:] == image.shape[-2:] # Hard to test due to randomness.... PLOT=True if PLOT: from matplotlib.pylab import plt import torchvision.transforms.functional as F a = ds.get_annotation(0) plt.figure() plt.suptitle('Visualizing test_psv_dataset_tfm_segmentation_cropped,\n' 'close if ok \n ' 'confirm boundary is marked unlabeled') plt.subplot(121) plt.imshow(F.to_pil_image(image)) plt.title('image') plt.subplot(122) plt.imshow(a.colors[mask.numpy()]) plt.title('mask') plt.show()
def fxcor_subplot(result, GCs, stars): ''' Makes subplot of TDR vs VERR and VREL. Returns a figure object ''' plt.close('all') fig = plt.figure(figsize=(6,6)) gs = gridspec.GridSpec(70,40,bottom=0.10,left=0.15,right=0.98, top = 0.95) plt.rc('text', usetex=True) plt.rc('font', family='serif') R_vel = plt.subplot(gs[10:40,0:40]) R_err = plt.subplot(gs[40:70,0:40]) R_hist = plt.subplot(gs[0:10,0:40]) R_hist.axis('off') plt.setp(R_vel.get_xticklabels(), visible=False) x = result.TDR y = result.VREL_helio R_vel.scatter(x, y, s=10, c='gray', edgecolor='none', alpha = 0.6, label = 'All') x = GCs.TDR y = GCs.VREL_helio R_vel.scatter(x, y, s=11, c='orange', edgecolor='none', alpha = 0.8, label = 'GCs') x = stars.TDR y = stars.VREL_helio R_vel.scatter(x, y, s=11, c='green', edgecolor='none', alpha = 0.8, label = 'Stars') R_vel.set_xlim(1,20) R_vel.set_ylim(-2000,5000) R_vel.set_ylabel(r'$v$ $[km \, s^{-1}]$') plt.setp(R_vel.get_yticklabels()[0], visible=False) x = result.TDR y = result.VERR R_err.scatter(x, y,s=10, c='gray', edgecolor='none', alpha = 0.6) x = GCs.TDR y = GCs.VERR R_err.scatter(x, y,s=11, c='orange', edgecolor='none', alpha = 0.8) x = stars.TDR y = stars.VERR R_err.scatter(x, y,s=11, c='green', edgecolor='none', alpha = 0.8) R_err.set_ylim(2,80) R_err.set_xlim(1,20) R_err.set_ylabel(r'$\delta v$ $[km \, s^{-1}]$') R_err.set_xlabel(r'TDR') plt.setp(R_err.get_yticklabels()[-1], visible=False) R_vel.legend() R_hist.hist([GCs.TDR,stars.TDR], range = (1,20), bins = 50, normed=True, color=['orange','green']) return fig
def plot_gallery(images, titles, h, w, n_row=3,n_col=4): plt.figure(figsize=(1.8*n_col, 2.4*n_row)) plt.subplots_adjust(bottom=0,left=.01,right=.99,top=.90,hspace=.35) for i in range(n_row * n_col): plt.subplot(n_row,n_col,i+1) plt.imshow(images[i].reshape(h,w),cmap=plt.cm.gray) plt.title(titles[i],size=12) plt.xticks(()) plt.yticks(())
def plotMagnitudePhaseImage(self, image): mag = absolute(image).astype('float') phase = angle(image).astype('float') plt.subplot(211) plt.imshow(mag, cmap = cm.Greys_r) plt.axis('off') plt.subplot(212) plt.imshow(phase, cmap = cm.Greys_r) plt.axis('off') plt.show()
def saveMagPhaseImage(self, image, filename): mag = absolute(image).astype('float') phase = angle(image).astype('float') plt.subplot(211) plt.imshow(mag, cmap=cm.Greys_r) plt.title('Magnitude') plt.axis('off') plt.subplot(212) plt.imshow(phase, cmap=cm.Greys_r) plt.title('Phase') plt.axis('off') plt.savefig(filename)
def save_plot(filter_bank, name): rows , cols = filter_bank.shape[0:2] plt.figure() sub = 1 for row in range(rows): for col in range(cols): plt.subplot(rows, cols, sub) plt.imshow(filter_bank[row][col], cmap='gray') plt.axis('off') sub += 1 plt.savefig(name)
def test2DpyEI(self): f = lambda x: sum(sin(x)) bounds = [[0., 5.], [0., 5.]] X = lhcSample(bounds, 5, seed=24) Y = [f(x) for x in X] kernel = GaussianKernel_ard(array([1.0, 1.0])) GP = GaussianProcess(kernel, X, Y) maxei = maximizeEI(GP, bounds) if False: figure(1) c0 = [(i/50.)*(bounds[0][1]-bounds[0][0])+bounds[0][0] for i in xrange(51)] c1 = [(i/50.)*(bounds[1][1]-bounds[1][0])+bounds[1][0] for i in xrange(51)] z = array([[GP.ei(array([i, j])) for i in c0] for j in c1]) ax = plt.subplot(111) cs = ax.contour(c0, c1, z, 10, alpha=0.5, cmap=cm.Blues_r) plot([x[0] for x in X], [x[1] for x in X], 'ro') for i in xrange(len(X)): annotate('%2f'%Y[i], X[i]) plot(maxei[1][0], maxei[1][1], 'ko') show()
def saveMagPhasePlot(self, t, x, filename): mag = absolute(x).astype('float') phase = angle(x).astype('float') plt.subplot(211) plt.plot(t, mag) plt.ylabel('Magitude') plt.title('SSPF Sequence') plt.grid(True) plt.subplot(212) plt.plot(t, phase) plt.xlabel('Off-Resonance (Hz)') plt.ylabel('Phase') plt.grid(True) plt.savefig(filename)
def showMagPhasePlot(self, t, x): mag = absolute(x) phase = angle(x) plt.subplot(211) plt.plot(t, mag) plt.ylabel('Magitude') plt.title('SSPF Sequence') plt.grid(True) plt.subplot(212) plt.plot(t, phase) plt.xlabel('Off-Resonance (Hz)') plt.ylabel('Phase') plt.grid(True) plt.show()
def test2DpyEI(self): f = lambda x: sum(sin(x)) bounds = [[0., 5.], [0., 5.]] X = lhcSample(bounds, 5, seed=24) Y = [f(x) for x in X] kernel = GaussianKernel_ard(array([1.0, 1.0])) GP = GaussianProcess(kernel, X, Y) maxei = maximizeEI(GP, bounds) if False: figure(1) c0 = [(i / 50.) * (bounds[0][1] - bounds[0][0]) + bounds[0][0] for i in range(51)] c1 = [(i / 50.) * (bounds[1][1] - bounds[1][0]) + bounds[1][0] for i in range(51)] z = array([[GP.ei(array([i, j])) for i in c0] for j in c1]) ax = plt.subplot(111) cs = ax.contour(c0, c1, z, 10, alpha=0.5, cmap=cm.Blues_r) plot([x[0] for x in X], [x[1] for x in X], 'ro') for i in range(len(X)): annotate('%2f' % Y[i], X[i]) plot(maxei[1][0], maxei[1][1], 'ko') show()
def show_failures(self, x_test, t_test): y = self.predict(x_test) y = np.argmax(y, axis=1) if t_test.ndim != 1: t_test = np.argmax(t_test, axis=1) failures = [] for idx in range(x_test.shape[0]): if y[idx] != t_test[idx]: failures.append((x_test[idx], y[idx], t_test[idx])) for i in range(min(len(failures), 60)): img, y, _ = failures[i] if (i % 10 == 0): print() print(y, end=", ") img = img.reshape(28, 28) plt.subplot(6, 10, i + 1) plt.imshow(img, cmap='gray') print() plt.show()
def augmentation_visualize_and_save(config, images, images_names, path, times: int = 2): """ Visualization of image enhancements. :param config: configuration from yaml file. :param images: images to be augmented. :param images_names: corresponding names of the images. :param path: the root where the augmented pictures will be saved. :param times: how many times each image getting augmented. :return: """ rows = len(images) cols = times + 1 for (index, image), name in zip(enumerate(images), images_names): plt.subplot(rows, cols, index * cols + 1) plt.axis('off') plt.title(name) _image = bgr2rgb_using_opencv(image) plt.imshow(_image) for col in range(1, cols): plt.subplot(rows, cols, index * cols + col + 1) plt.axis('off') plt.title("Augmented NO. " + str(col)) # augment image augmented_image = augment_image_using_imgaug(_image, config) plt.imshow(augmented_image) # Save the full figure isExists = os.path.exists(path) if not isExists: os.makedirs(path) now_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') savefig(os.path.join(path, "%s_Comp.png" % now_time), dpi=600) # Clear the current figure plt.clf() plt.cla() plt.close()
def plot_result(y, result, threshold, title): """ :param title: :return: 蓝色为原始数据, 亮绿色为均值, 绿色为正常范围上界,蓝色为正常范围下界 """ avg = result[AVG_FILTER] std = result[STD_FILTER] upper_bound = avg + threshold * std lower_bound = avg - threshold * std print("upper_bound: %s, lower_bound: %s" % (upper_bound, lower_bound)) signals = result[Z_SCORE_SIGNALS] data_points_number = np.arange(1, len(y) + 1) plt.subplot(211) plt.plot(data_points_number, y) plt.plot(data_points_number, avg, color="cyan", lw=2) plt.plot(data_points_number, upper_bound, color="green", lw=2) plt.plot(data_points_number, lower_bound, color="blue", lw=2) plt.subplot(212) plt.step(data_points_number, signals, color="red", lw=2) plt.ylim(-1.5, 1.5) plt.savefig(title) plt.show()
def plot_result(filename='glances.csv'): import pandas as pd from matplotlib.pylab import plt data = pd.read_csv(filename) mem_used = data['mem_used'].apply(lambda x: float(x) / (10**9)) plt.figure(1) plt.subplot(121) mem_used.plot(color="r", linestyle="-", linewidth=1) plt.xlabel('time step') plt.ylabel('GB') plt.title('mem_used') plt.subplot(122) gpu_0_proc = data['gpu_0_proc'] gpu_0_proc.plot(color="b", linestyle="-", linewidth=1) plt.xlabel('time step') plt.ylabel('proc') plt.title('gpu_0_proc') plt.show() print("mean mem_used:{},mean_gpu_0_proc:{}".format(mem_used.mean(), gpu_0_proc.mean()))
def saveMagPhaseImage2(self, image1, image2, filename): mag = absolute(image1).astype('float') phase = angle(image1).astype('float') mag2 = absolute(image2).astype('float') phase2 = angle(image2).astype('float') plt.subplot(221) plt.imshow(mag, cmap=cm.Greys_r) plt.title('Magnitude') plt.axis('off') plt.subplot(222) plt.imshow(phase, cmap=cm.Greys_r) plt.title('Phase') plt.axis('off') plt.subplot(223) plt.imshow(mag2, cmap=cm.Greys_r) plt.title('Magnitude') plt.axis('off') plt.subplot(224) plt.imshow(phase2, cmap=cm.Greys_r) plt.title('Phase') plt.axis('off') plt.savefig(filename)
def saveWaterFatImage(self, filename): mag = np.absolute(self.water).astype('float') phase = np.angle(self.water).astype('float') mag2 = np.absolute(self.fat).astype('float') phase2 = np.angle(self.fat).astype('float') plt.subplot(221) plt.imshow(mag, cmap=cm.Greys_r) plt.title('Water Magnitude') plt.axis('off') plt.subplot(222) plt.imshow(phase, cmap=cm.Greys_r) plt.title('Water Phase') plt.axis('off') plt.subplot(223) plt.imshow(mag2, cmap=cm.Greys_r) plt.title('Fat Magnitude') plt.axis('off') plt.subplot(224) plt.imshow(phase2, cmap=cm.Greys_r) plt.title('Fat Phase') plt.axis('off') plt.show()
def make_radar(skill: int, strength: int, defence: int, willpower: int, attack: int, stamina: int): ''' :return: PNG type image binary content ''' value = [skill, strength, defence, willpower, attack, stamina] if not all(map(lambda x: isinstance(x, int) and 0 < x <= 100, value)): return font = FontProperties(fname=settings.PINGFANG_FONT, size=23) plt.figure(figsize=(4.8, 4.8)) # 图片大小 name = [ '技术\n ', '力量 ', '防守 ', '\n意志力', ' 进攻 ', ' 耐力 ' ] # 标签 theta = np.linspace(0, 2 * np.pi, len(name), endpoint=False) # 将圆周根据标签的个数等比分 theta = np.concatenate((theta, [theta[0]])) # 闭合 value = np.concatenate((value, [value[0]])) # 闭合 ax = plt.subplot(111, projection='polar') # 构建图例 ax.set_theta_zero_location('N') # 设置极轴方向 ax.fill(theta, value, color="#EF2D55", alpha=0.35) # 填充色,透明度 for i in [20, 40, 60, 80, 100]: # 绘等分线 ax.plot(theta, [i] * (6 + 1), 'k-', lw=1, color='#8989A3') # 之所以 n +1,是因为要闭合! ax.plot(theta, value, 'ro-', 'k-', lw=1, alpha=0.75, color='#FF465C') # 绘数据图 ax.set_thetagrids(theta * 180 / np.pi, name, fontproperties=font, color='#8989A3') # 替换标签 ax.set_ylim(0, 100) # 设置极轴的区间 ax.spines['polar'].set_visible(False) # 去掉最外围的黑圈 ax.grid(True, color='#8989A3', linestyle='-', linewidth=1) ax.set_yticks([]) buf = io.BytesIO() plt.savefig(buf, transparent=True) # 透明 plt.close('all') # 关闭所有绘图 buf.seek(0) return buf
def SSFP(): T1 = 800*10**-3 T2 = 100*10**-3 a = pi/2 Tr = .01 t = np.linspace(0, Tr, 100) M = np.array([0,0,1]) R = np.array([[1, 0, 0], [0, cos(a), sin(a)], [0, -sin(a), cos(a)]]) Mx = [] My = [] Mz = [] Nr = 10 for n in range(Nr): # Initalize and Rotate M = R.dot(M) # T1/T2 Relaxationq M_z = 1 + (M[2] - 1) * np.exp(-t/T1) M_x = M[0]*np.exp(-t/T2) M_y = M[1]*np.exp(-t/T2) M_xy = np.sqrt(M_x*M_x + M_y*M_y) M[0] = M_x[-1] M[1] = M_y[-1] M[2] = M_z[-1] Mx = np.append(Mx, M_x) My = np.append(My, M_y) Mz = np.append(Mz, M_z) tnew = np.linspace(0, 10*Tr, 1000) plt.subplot(131) plt.plot(tnew, Mx) plt.subplot(132) plt.plot(tnew, My) plt.subplot(133) plt.plot(tnew, Mz) plt.show()
# create the integrator object ta = hy.taylor_adaptive( # The ODEs. [(x, dxdt), (y, dydt), (z, dzdt), (px, dpxdt), (py, dpydt), (pz, dpzdt)], # The initial conditions. [-0.45, 0.80, 0.00, -0.80, -0.45, 0.58], # Operate below machine precision # and in high-accuracy mode. tol=1e-18, high_accuracy=True) # integrate the RTBP up to time unit t_grid = np.linspace(0, 200, 2500) out = ta.propagate_grid(t_grid) print(out) # plot from matplotlib.pylab import plt plt.rcParams["figure.figsize"] = (12, 6) plt.subplot(1, 2, 1) plt.plot(out[4][:, 0], out[4][:, 1]) plt.xlabel("x") plt.ylabel("y") plt.subplot(1, 2, 2) plt.plot(out[4][:, 0], out[4][:, 2]) plt.xlabel("x") plt.ylabel("z") plt.show()
for key in ('train_label', 'test_label'): dataset[key] = _load_label(files[key]) if normalize: for key in ('train_img', 'test_img'): dataset[key] = dataset[key].astype(np.float32) dataset[key] /= 255.0 if one_hot_label: for key in ('train_label', 'test_label'): dataset[key] = _change_one_hot_label(dataset[key]) if not flatten: dataset[key] = _change_one_hot_label(dataset[key]) return ((dataset['train_img'], dataset['train_label']), (dataset['test_img'], dataset['test_label'])) from matplotlib.pylab import plt (x_train, y_train), (x_test, y_test) = load_mnist() for i in range(10): img = x_train[i] label = np.argmax(y_train[i]) print(label, end=", ") img = img.reshape(28, 28) plt.subplot(1, 10, i + 1) plt.imshow(img) print() plt.show()
allY = [] for file_name in args.files: Y = [] with open(file_name) as fp: for idx, line in enumerate(fp): if len(Y) > args.size: break if args.type == 'epoch': if 'Epoch' not in line: continue else: if 'Episode' not in line: continue Y.append(float(line.split(' ')[-1][:-1])) allY.append(Y) fig = plt.figure() ax = plt.subplot(111) plt.xlabel('epochs' if args.type == 'epoch' else 'episodes') plt.ylabel('reward per epoch' if args.type == 'epoch' else 'reward per episode') for idx, Y in enumerate(allY): ax.plot(Y, label=LABELS[idx]) plt.title('Instruction following learning performance one hot') ax.legend(loc='upper center', shadow=True, ncol=2) # plt.legend() plt.show()
def show(self, w): """ illustrate the learning curve Parameters ---------- w : int, window size for smoothing the curve """ if len(self.tn_mc) > 0: # malis training, increase number of subplots nsp = 5 else: nsp = 3 # print the maximum iteration self.print_max_update() # using K as iteration unit tn_it = self.tn_it for i in range(len(tn_it)): tn_it[i] = tn_it[i] / float(1000) tt_it = self.tt_it for i in range(len(tt_it)): tt_it[i] = tt_it[i] / float(1000) # plot data plt.subplot(1, nsp, 1) plt.plot(tn_it, self.tn_err, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_err, 'r.', alpha=0.2) # plot smoothed line xne, yne = self._smooth(tn_it, self.tn_err, w) xte, yte = self._smooth(tt_it, self.tt_err, w) plt.plot(xne, yne, 'b') plt.plot(xte, yte, 'r') plt.xlabel('iteration (K)'), plt.ylabel('cost energy') plt.subplot(1, nsp, 2) plt.plot(tn_it, self.tn_cls, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_cls, 'r.', alpha=0.2) # plot smoothed line xnc, ync = self._smooth(tn_it, self.tn_cls, w) xtc, ytc = self._smooth(tt_it, self.tt_cls, w) plt.plot(xnc, ync, 'b', label='train') plt.plot(xtc, ytc, 'r', label='test') plt.xlabel('iteration (K)'), plt.ylabel('classification error') if len(tn_it) == len(self.tn_re): plt.subplot(1, nsp, 3) plt.plot(tn_it, self.tn_re, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_re, 'r.', alpha=0.2) # plot smoothed line xnr, ynr = self._smooth(tn_it, self.tn_re, w) xtr, ytr = self._smooth(tt_it, self.tt_re, w) plt.plot(xnr, ynr, 'b', label='train') plt.plot(xtr, ytr, 'r', label='test') plt.xlabel('iteration (K)'), plt.ylabel('rand error') if len(tn_it) == len(self.tn_mc): plt.subplot(1, nsp, 4) plt.plot(tn_it, self.tn_mc, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_mc, 'r.', alpha=0.2) # plot smoothed line xnm, ynm = self._smooth(tn_it, self.tn_mc, w) xtm, ytm = self._smooth(tt_it, self.tt_mc, w) plt.plot(xnm, ynm, 'b', label='train') plt.plot(xtm, ytm, 'r', label='test') plt.xlabel('iteration (K)'), plt.ylabel( 'malis weighted cost energy') if len(tn_it) == len(self.tn_me): plt.subplot(1, nsp, 5) plt.plot(tn_it, self.tn_me, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_me, 'r.', alpha=0.2) # plot smoothed line xng, yng = self._smooth(tn_it, self.tn_me, w) xtg, ytg = self._smooth(tt_it, self.tt_me, w) plt.plot(xng, yng, 'b', label='train') plt.plot(xtg, ytg, 'r', label='test') plt.xlabel('iteration (K)'), plt.ylabel( 'malis weighted pixel error') plt.legend() plt.show() return
import numpy as np from matplotlib.pylab import plt def getf(i): return lambda x:x**i xs = np.linspace(0, 1, 5000) fs = [] for i in range(1, 10): fs.append(getf(i)) yss = [[f(x) for x in xs] for f in fs] ax = plt.subplot() for i, ys in enumerate(yss): plt.plot(xs, ys, label="$x^{}$".format(i+1)) plt.legend() plt.plot(np.repeat(1, 1000), np.linspace(0, 1, 1000), "k--") plt.plot(0, 0, "o") plt.plot(1, 1, "o") ax.spines['left'].set_position('zero') ax.spines['right'].set_color('none') ax.spines['bottom'].set_position('zero') ax.spines['top'].set_color('none') plt.show()
x, x_process = process(function_1, init_x=np.array([-7.0, 2.0]), I=30) from matplotlib.pylab import plt X = np.arange(-10, 10, 0.01) Y = np.arange(-10, 10, 0.01) X, Y = np.meshgrid(X, Y) Z = function_1(np.array([X, Y])) # 외곽선 단순화 mask = Z > 10 Z[mask] = 0 plt.subplot(2, 3, idx) idx += 1 plt.plot(x_process[:, 0], x_process[:, 1], '.-', color="blue") plt.contour(X, Y, Z) plt.ylim(-10, 10) plt.xlim(-10, 10) plt.plot(0, 0, '+', color="red") # 극소점 plt.title(key) plt.show() #graph comparision _2 (2 dimensional) idx = 1
import math data_path = "/Users/shimizujunichi/Desktop/BlinkDetection/data/" data_name = "60380EFFF2A7_20160519114203.csv" # data_name = "jun_video.csv" df = pd.DataFrame.from_csv(data_path + data_name) # df.index = pd.to_datetime(df['DATE'], unit='s') recorded_v = df['EOG_V'].as_matrix() recorded_h = df['EOG_H'].as_matrix() classifier = cal_mod.OrbitClassify(300, 200, 0.6, 0.5) classifier.setLowPassfilter(2, 0.17) rec_v = classifier.LowPassFilter(df['EOG_V'].as_matrix()) plt.subplot(3, 1, 1) plt.plot(rec_v) maxtab, mintab = peak_example.peakdet(rec_v, .3) plt.subplot(3, 1, 2) plt.plot(rec_v) plt.scatter(np.array(maxtab)[:, 0], np.array(maxtab)[:, 1], color='blue') plt.scatter(np.array(mintab)[:, 0], np.array(mintab)[:, 1], color='red') plt.xlim(0, 900) """ buffer Murtaza """ # buffer_vv = np.array([]) # for i in range(0,len(rec_v)): # if len(buffer_vv) < 50: # buffer_vv = np.append(buffer_vv,rec_v) # else:
id = data['id'][0] count = 0 maxx = 0 minn = 10000 tot = 0 mean = 0.0 ids = [] counts = [] maxxs = [] minns = [] means = [] fig = plt.figure(facecolor='white') ax1 = plt.subplot(1, 1, 1, facecolor='white') for col in range(data.shape[0]): if (id == data['id'][col]): count += 1 maxx = max(maxx, data['ele'][col]) minn = min(minn, data['ele'][col]) tot += data['ele'][col] else: mean = (1.0 * tot) / count ids.append(id) counts.append(count) maxxs.append(maxx) minns.append(minn) means.append(mean) print("id:", id, " count:", count, " maxx:", maxx, " minn:", minn,
plt.plot(x_process[:,0], x_process[:,1], '.-') #수렴과정 plt.plot(0, 0, '+' , color = "red") #극소점 plt.xlabel("X0") plt.ylabel("X1") plt.show() #graph 2 (contour graph) x = np.arange(-5, 5, 0.01) y = np.arange(-5, 5, 0.01) X, Y = np.meshgrid(x, y) Z = function_1(np.array([X, Y])) idx = 1 plt.subplot(2, 2, idx) idx += 1 plt.plot( x_process[:,0], x_process[:,1], '.-', color="blue") #수렴과정 plt.contour(X, Y, Z) plt.ylim(-5, 5) plt.xlim(-5, 5) plt.plot(0, 0, '+', color = "red") #극소점 plt.show() #graph 3 (3 dimensional graph) x, x_process = gradient_descent_process(function_1, np.array([-3.0, 4.0]), L = 0.1, I = 50 ) x = np.arange(-5, 5, 0.01)
def show(self, w): """ illustrate the learning curve Parameters ---------- w : int, window size for smoothing the curve """ if len(self.tn_mc) > 0: # malis training, increase number of subplots nsp = 5 else: nsp = 3 # print the maximum iteration self.print_max_update() # using K as iteration unit tn_it = self.tn_it for i in xrange(len(tn_it)): tn_it[i] = tn_it[i] / float(1000) tt_it = self.tt_it for i in xrange(len(tt_it)): tt_it[i] = tt_it[i] / float(1000) # plot data plt.subplot(1,nsp, 1) plt.plot(tn_it, self.tn_err, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_err, 'r.', alpha=0.2) # plot smoothed line xne,yne = self._smooth( tn_it, self.tn_err, w ) xte,yte = self._smooth( tt_it, self.tt_err, w ) plt.plot(xne, yne, 'b') plt.plot(xte, yte, 'r') plt.xlabel('iteration (K)'), plt.ylabel('cost energy') plt.subplot(1,nsp,2) plt.plot(tn_it, self.tn_cls, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_cls, 'r.', alpha=0.2) # plot smoothed line xnc, ync = self._smooth( tn_it, self.tn_cls, w ) xtc, ytc = self._smooth( tt_it, self.tt_cls, w ) plt.plot(xnc, ync, 'b', label='train') plt.plot(xtc, ytc, 'r', label='test') plt.xlabel('iteration (K)'), plt.ylabel( 'classification error' ) if len(tn_it) == len( self.tn_re ): plt.subplot(1, nsp, 3) plt.plot(tn_it, self.tn_re, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_re, 'r.', alpha=0.2) # plot smoothed line xnr, ynr = self._smooth( tn_it, self.tn_re, w ) xtr, ytr = self._smooth( tt_it, self.tt_re, w ) plt.plot(xnr, ynr, 'b', label='train') plt.plot(xtr, ytr, 'r', label='test') plt.xlabel('iteration (K)'), plt.ylabel( 'rand error' ) if len(tn_it) == len( self.tn_mc ): plt.subplot(1, nsp, 4) plt.plot(tn_it, self.tn_mc, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_mc, 'r.', alpha=0.2) # plot smoothed line xnm, ynm = self._smooth( tn_it, self.tn_mc, w ) xtm, ytm = self._smooth( tt_it, self.tt_mc, w ) plt.plot(xnm, ynm, 'b', label='train') plt.plot(xtm, ytm, 'r', label='test') plt.xlabel('iteration (K)'), plt.ylabel( 'malis weighted cost energy' ) if len(tn_it) == len( self.tn_me ): plt.subplot(1, nsp, 5) plt.plot(tn_it, self.tn_me, 'b.', alpha=0.2) plt.plot(tt_it, self.tt_me, 'r.', alpha=0.2) # plot smoothed line xng, yng = self._smooth( tn_it, self.tn_me, w ) xtg, ytg = self._smooth( tt_it, self.tt_me, w ) plt.plot(xng, yng, 'b', label='train') plt.plot(xtg, ytg, 'r', label='test') plt.xlabel('iteration (K)'), plt.ylabel( 'malis weighted pixel error' ) plt.legend() plt.show() return