def draw_img_for_viewing_ice(self): #print "Press 'p' to save PNG." global colmax global colmin fig = P.figure(num=None, figsize=(13.5, 5), dpi=100, facecolor='w', edgecolor='k') cid1 = fig.canvas.mpl_connect('key_press_event', self.on_keypress_for_viewing) cid2 = fig.canvas.mpl_connect('button_press_event', self.on_click) canvas = fig.add_subplot(121) canvas.set_title(self.filename) self.axes = P.imshow(self.inarr, origin='lower', vmax = colmax, vmin = colmin) self.colbar = P.colorbar(self.axes, pad=0.01) self.orglims = self.axes.get_clim() canvas = fig.add_subplot(122) canvas.set_title("Angular Average") maxAngAvg = (self.inangavg).max() numQLabels = len(eDD.iceHInvAngQ.keys())+1 labelPosition = maxAngAvg/numQLabels for i,j in eDD.iceHInvAngQ.iteritems(): P.axvline(j,0,colmax,color='r') P.text(j,labelPosition,str(i), rotation="45") labelPosition += maxAngAvg/numQLabels P.plot(self.inangavgQ, self.inangavg) P.xlabel("Q (A-1)") P.ylabel("I(Q) (ADU/srad)") pngtag = original_dir + "peakfit-gdvn_%s.png" % (self.filename) P.savefig(pngtag) print "%s saved." % (pngtag) P.close()
def showPairDeformationDist(model, coords, ind1, ind2, *args, **kwargs): """Show distribution of deformations in distance contributed by each mode for selected pair of residues *ind1* *ind2* using :func:`~matplotlib.pyplot.plot`. """ import matplotlib import matplotlib.pyplot as plt if not isinstance(model, NMA): raise TypeError('model must be a NMA instance, ' 'not {0}'.format(type(model))) elif not model.is3d(): raise TypeError('model must be a 3-dimensional NMA instance') elif len(model) == 0: raise ValueError('model must have normal modes calculated') elif model.getStiffness() is None: raise ValueError('model must have stiffness matrix calculated') d_pair = calcPairDeformationDist(model, coords, ind1, ind2) with plt.style.context('fivethirtyeight'): matplotlib.rcParams['font.size'] = '16' fig = plt.figure(num=None, figsize=(12,8), dpi=100, facecolor='w') #plt.title(str(model)) plt.plot(d_pair[0], d_pair[1], 'k-', linewidth=1.5, *args, **kwargs) plt.xlabel('mode (k)', fontsize = '18') plt.ylabel('d$^k$' '($\AA$)', fontsize = '18') if SETTINGS['auto_show']: showFigure() return plt.show
def showScaledSqFlucts(modes, *args, **kwargs): """Show scaled square fluctuations using :func:`~matplotlib.pyplot.plot`. Modes or mode sets given as additional arguments will be scaled to have the same mean squared fluctuations as *modes*.""" import matplotlib.pyplot as plt sqf = calcSqFlucts(modes) mean = sqf.mean() args = list(args) modesarg = [] i = 0 while i < len(args): if isinstance(args[i], (VectorBase, ModeSet, NMA)): modesarg.append(args.pop(i)) else: i += 1 show = [plt.plot(sqf, *args, label=str(modes), **kwargs)] plt.xlabel('Indices') plt.ylabel('Square fluctuations') for modes in modesarg: sqf = calcSqFlucts(modes) scalar = mean / sqf.mean() show.append(plt.plot(sqf * scalar, *args, label='{0} (x{1:.2f})'.format(str(modes), scalar), **kwargs)) if SETTINGS['auto_show']: showFigure() return show
def plot(self, nbins=100, range=None): plt.plot([self.F_[0], self.F_[0]], [0, 100], '--r', lw=2) h = plt.hist(self.F_, nbins, range) plt.xlabel('F-value') plt.ylabel('Count') plt.grid() return h
def showNormedSqFlucts(modes, *args, **kwargs): """Show normalized square fluctuations via :func:`~matplotlib.pyplot.plot`. """ import matplotlib.pyplot as plt sqf = calcSqFlucts(modes) args = list(args) modesarg = [] i = 0 while i < len(args): if isinstance(args[i], (VectorBase, ModeSet, NMA)): modesarg.append(args.pop(i)) else: i += 1 show = [plt.plot(sqf/(sqf**2).sum()**0.5, *args, label='{0}'.format(str(modes)), **kwargs)] plt.xlabel('Indices') plt.ylabel('Square fluctuations') for modes in modesarg: sqf = calcSqFlucts(modes) show.append(plt.plot(sqf/(sqf**2).sum()**0.5, *args, label='{0}'.format(str(modes)), **kwargs)) if SETTINGS['auto_show']: showFigure() return show
def rscplot(i,tcodnt, rsdlsc, rsdlpctgc, sqrpctgc,path): #mp.figure(figsize=[20,10]) #mp.gcf().set_facecolor(np.ones(3) * 240 / 255) #mp.subplot(311) mp.plot(tcodnt, rsdlsc[:,i], color='red',label='residuals') #mp.title('The Residuls After Correction Of Signal %d' %i, fontsize=16) leg=mp.legend(fontsize=size)
def plot_predict_is(self,h=5,**kwargs): """ Plots forecasts with the estimated model against data (Simulated prediction with data) Parameters ---------- h : int (default : 5) How many steps to forecast Returns ---------- - Plot of the forecast against data """ figsize = kwargs.get('figsize',(10,7)) plt.figure(figsize=figsize) date_index = self.index[-h:] predictions = self.predict_is(h) data = self.data[-h:] t_params = self.transform_z() plt.plot(date_index,np.abs(data-t_params[-1]),label='Data') plt.plot(date_index,predictions,label='Predictions',c='black') plt.title(self.data_name) plt.legend(loc=2) plt.show()
def scree_plot(pca_obj, fname=None): ''' Scree plot for variance & cumulative variance by component from PCA. Arguments: - pca_obj: a fitted sklearn PCA instance - fname: path to write plot to file Output: - scree plot ''' components = pca_obj.n_components_ variance = pca.explained_variance_ratio_ plt.figure() plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance') plt.plot(np.arange(1, components + 1), variance, label='Variance') plt.xlim([0.8, components]); plt.ylim([0.0, 1.01]) plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11) plt.legend(loc='best') plt.tight_layout() if fname is not None: plt.savefig(fname) plt.close() else: plt.show() return
def draw_ranges_for_parameters(data, title='', save_path='./pictures/'): parameters = data.columns.values.tolist() # remove flight name parameter for idx, parameter in enumerate(parameters): if parameter == 'flight_name': del parameters[idx] flight_names = np.unique(data['flight_name']) print len(flight_names) for parameter in parameters: plt.figure() axis = plt.gca() # ax.set_xticks(numpy.arange(0,1,0.1)) axis.set_yticks(flight_names) axis.tick_params(labelright=True) axis.set_ylim([94., 130.]) plt.grid() plt.title(title) plt.xlabel(parameter) plt.ylabel('flight name') colors = iter(cm.rainbow(np.linspace(0, 1,len(flight_names)))) for flight in flight_names: temp = data[data.flight_name == flight][parameter] plt.plot([np.min(temp), np.max(temp)], [flight, flight], c=next(colors), linewidth=2.0) plt.savefig(save_path+title+'_'+parameter+'.jpg') plt.close()
def plotResults(datasetName, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix): """ Plots the errors for a particular dataset on a bar graph. """ for k in range(len(sampleMethods)): outfileName = outputDir + datasetName + sampleMethods[k] + fileNameSuffix + ".npz" data = numpy.load(outfileName) errors = data["arr_0"] meanMeasures = numpy.mean(errors, 0) for i in range(sampleSizes.shape[0]): plt.figure(k*len(sampleMethods) + i) plt.title("n="+str(sampleSizes[i]) + " " + sampleMethods[k]) for j in range(errors.shape[3]): plt.plot(foldsSet, meanMeasures[i, :, j]) plt.xlabel("Folds") plt.ylabel('Error') labels = ["VFCV", "PenVF+"] labels.extend(["VFP s=" + str(x) for x in cvScalings]) plt.legend(tuple(labels)) plt.show()
def plotAlphas(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix): """ Plot the variation in the error with alpha for penalisation. """ for i, datasetName in enumerate(datasetNames): #plt.figure(i) for k in range(len(sampleMethods)): outfileName = outputDir + datasetName + sampleMethods[k] + fileNameSuffix + ".npz" data = numpy.load(outfileName) errors = data["arr_0"] meanMeasures = numpy.mean(errors, 0) foldInd = 4 for i in range(sampleSizes.shape[0]): plt.plot(cvScalings, meanMeasures[i, foldInd, 2:8], next(linecycler), label="m="+str(sampleSizes[i])) plt.xlabel("Alpha") plt.ylabel('Error') xmin, xmax = cvScalings[0], cvScalings[-1] plt.xlim((xmin,xmax)) plt.legend(loc="upper left") plt.show()
def work(self): self.worked = True kwargs = dict( weights=self.weights, mus=self.mus, sigmas=self.sigmas, low=self.low, high=self.high, q=self.q, ) samples = GMM1(rng=self.rng, size=(self.n_samples,), **kwargs) samples = np.sort(samples) edges = samples[::self.samples_per_bin] #print samples pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs)) dx = edges[1:] - edges[:-1] y = 1 / dx / len(dx) if self.show: plt.scatter(edges[:-1], y) plt.plot(edges[:-1], pdf) plt.show() err = (pdf - y) ** 2 print np.max(err) print np.mean(err) print np.median(err) if not self.show: assert np.max(err) < .1 assert np.mean(err) < .01 assert np.median(err) < .01
def work(self, **kwargs): self.__dict__.update(kwargs) self.worked = True samples = LGMM1(rng=self.rng, size=(self.n_samples,), **self.LGMM1_kwargs) samples = np.sort(samples) edges = samples[::self.samples_per_bin] centers = .5 * edges[:-1] + .5 * edges[1:] print edges pdf = np.exp(LGMM1_lpdf(centers, **self.LGMM1_kwargs)) dx = edges[1:] - edges[:-1] y = 1 / dx / len(dx) if self.show: plt.scatter(centers, y) plt.plot(centers, pdf) plt.show() err = (pdf - y) ** 2 print np.max(err) print np.mean(err) print np.median(err) if not self.show: assert np.max(err) < .1 assert np.mean(err) < .01 assert np.median(err) < .01
def compare_chebhist(dname, mylambda, c, Nbin = 25): if mylambda == 'Do not exist': print('--!!Warning: eig file does not exist, can not display compare histgram') else: mylambda = 1 - mylambda lmin = max(min(mylambda), -1) lmax = min(max(mylambda), 1) # print c cheb_file_content = '\n'.join([str(st) for st in c]) x = np.linspace(lmin, lmax, Nbin + 1) y = plot_chebint(c, x) u = (x[1:] + x[:-1]) / 2 v = y[1:] - y[:-1] plt.clf() plt.hold(True) plt.hist(mylambda,Nbin) plt.plot(u, v, "r.", markersize=10) plt.hold(False) plt.show() filename = 'data/' + dname + '.png' plt.savefig(filename) cheb_filename = 'data/' + dname + '.cheb' f = open(cheb_filename, 'w+') f.write(cheb_file_content) f.close()
def default_run(self): """ Plots the results, saves the figure, and finally displays it from simulating codewords with Sum-prod and Max-prod algorithms across variance levels. This combines the results in one plot. :return: """ if not os.path.exists("./graphs"): os.makedirs("./graphs") self.save_time = str(int(time.time())) self.simulate(Decoder.SUM_PROD) self.compute_error() plt.plot([math.log10(x) for x in self.variance_levels], [math.log10(y) for y in self.bit_error_probability], "ro-", label="Sum-Prod") self.simulate(Decoder.MAX_PROD) self.compute_error() plt.plot([math.log10(x) for x in self.variance_levels], [math.log10(y) for y in self.bit_error_probability], "g^--", label="Max-Prod") plt.legend(loc=2) plt.title("Hamming Decoder Factor Graph Simulation Results\n" + r"$\log_{10}(\sigma^2)$ vs. $\log_{10}(P_e)$" + " for Max-Prod & Sum-Prod Algorithms\n" + "Sample Size n = %(codewords)s Codewords \n Variance Levels = %(levels)s" % {"codewords": str(self.iterations), "levels": str(self.variance_levels)}) plt.xlabel("$\log_{10}(\sigma^2)$") plt.ylabel(r"$\log_{10}(P_e)$") plt.savefig("graphs/%(time)s-max-prod-sum-prod-%(num_codewords)s-codewords-variance-bit_error_probability.png" % {"time": self.save_time, "num_codewords": str(self.iterations)}, bbox_inches="tight") plt.show()
def statistics_charts(self): if plt is None: return for chart in self.stats_charts: if chart["type"] == "plot": fig = plt.figure(figsize=(8, 2)) for xdata, ydata, label in chart["data"]: plt.plot(xdata, ydata, "-", label=label) plt.legend(loc="center left", bbox_to_anchor=(1, 0.5)) elif chart["type"] == "timeline": fig = plt.figure(figsize=(16, 2)) for i, (starts, stops, label) in enumerate(chart["data"]): plt.hlines([i] * len(starts), starts, stops, label=label) plt.ylim(-1, len(chart["data"])) elif chart["type"] == "bars": fig = plt.figure(figsize=(16, 4)) plt.bar(range(len(chart["data"])), chart["data"]) elif chart["type"] == "boxplot": fig = plt.figure(figsize=(16, 4)) plt.boxplot(chart["data"]) else: raise Exception("Unknown chart") png = serialize_fig(fig) yield chart["name"], html_embed_img(png)
def plot(): elements_list = get_elements() x = range(0, len(elements_list)) y = elements_list print(x) plt.plot(x, y) plt.show()
def plotJ(J_history,num_iters): x = np.arange(1,num_iters+1) plt.plot(x,J_history) plt.xlabel(u"迭代次数",fontproperties=font) # 注意指定字体,要不然出现乱码问题 plt.ylabel(u"代价值",fontproperties=font) plt.title(u"代价随迭代次数的变化",fontproperties=font) plt.show()
def plotIterationResult(train_err_list): x = range(1,len(train_err_list) + 1) fig = plt.figure() plt.plot(x,train_err_list) plt.xlabel('iterations') plt.ylabel('training error') plt.show()
def display(spectrum): template = np.ones(len(spectrum)) #Get the plot ready and label the axes pyp.plot(spectrum) max_range = int(math.ceil(np.amax(spectrum) / standard_deviation)) for i in range(0, max_range): pyp.plot(template * (mean + i * standard_deviation)) pyp.xlabel('Units?') pyp.ylabel('Amps Squared') pyp.title('Mean Normalized Power Spectrum') if 'V' in Options: pyp.show() if 'v' in Options: tokens = sys.argv[-1].split('.') filename = tokens[0] + ".png" input = '' if os.path.isfile(filename): input = input("Error: Plot file already exists! Overwrite? (y/n)\n") while input != 'y' and input != 'n': input = input("Please enter either \'y\' or \'n\'.\n") if input == 'y': pyp.savefig(filename) else: print("Plot not written.") else: pyp.savefig(filename)
def draw_stat(actual_price, action): price_list = [] x_list = [] # idx = np.where(actual_price == 0)[0] # print idx # print actual_price[np.where(actual_price < 2000)] # idx = [0] + idx.tolist() # print idx # for i in range(len(idx)-1): # price_list.append(actual_price[idx[i]+1:idx[i+1]-1]) # x_list.append(range(idx[i]+i+1, idx[i+1]+i-1)) # for i in range(len(idx)-1): # print x_list[i] # print price_list[i] # plt.plot(x_list[i], price_list[i], 'r') x_list = range(1,50) price_list = actual_price[1:50] plt.plot(x_list, price_list, 'k') for i in range(1, 50): style = 'go' if action[i] == 1: style = 'ro' plt.plot(i, actual_price[i], style) plt.ylim(2140, 2144.2) # plt.show() plt.savefig("action.png")
def tuning(x, y, err=None, smooth=None, ylabel=None, pal=None): """ Plot a tuning curve """ if smooth is not None: xs, ys = smoothfit(x, y, smooth) plt.plot(xs, ys, linewidth=4, color="black", zorder=1) else: ys = asarray([0]) if pal is None: pal = sns.color_palette("husl", n_colors=len(x) + 6) pal = pal[2 : 2 + len(x)][::-1] plt.scatter(x, y, s=300, linewidth=0, color=pal, zorder=2) if err is not None: plt.errorbar(x, y, yerr=err, linestyle="None", ecolor="black", zorder=1) plt.xlabel("Wall distance (mm)") plt.ylabel(ylabel) plt.xlim([-2.5, 32.5]) errTmp = err errTmp[isnan(err)] = 0 rng = max([nanmax(ys), nanmax(y + errTmp)]) plt.ylim([0 - rng * 0.1, rng + rng * 0.1]) plt.yticks(linspace(0, rng, 3)) plt.xticks(range(0, 40, 10)) sns.despine() return rng
def scatter(x, y, equal=False, xlabel=None, ylabel=None, xinvert=False, yinvert=False): """ Plot a scatter with simple formatting options """ plt.scatter(x, y, 200, color=[0.3, 0.3, 0.3], edgecolors="white", linewidth=1, zorder=2) sns.despine() if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) if equal: plt.axes().set_aspect("equal") plt.plot([0, max([x.max(), y.max()])], [0, max([x.max(), y.max()])], color=[0.6, 0.6, 0.6], zorder=1) bmin = min([x.min(), y.min()]) bmax = max([x.max(), y.max()]) rng = abs(bmax - bmin) plt.xlim([bmin - rng * 0.05, bmax + rng * 0.05]) plt.ylim([bmin - rng * 0.05, bmax + rng * 0.05]) else: xrng = abs(x.max() - x.min()) yrng = abs(y.max() - y.min()) plt.xlim([x.min() - xrng * 0.05, x.max() + xrng * 0.05]) plt.ylim([y.min() - yrng * 0.05, y.max() + yrng * 0.05]) if xinvert: plt.gca().invert_xaxis() if yinvert: plt.gca().invert_yaxis()
def LinRegTest(XTrain, YTrain, close, filename): ''' Using RandomForest learner to predict how much the price will change in 5 days @filename: the file's true name is ML4T-filename @XTrain: the train data for feature @YTrain: the train data for actual price after 5 days @close: the actual close price of Test data set @k: the number of trees in the forest ''' XTest, YTest = TestGenerator(close) #plot thge feature plt.clf() fig = plt.figure() fig.suptitle('The value of features') plt.plot(range(100), XTest[0:100, 0], 'b', label = 'One day price change') plt.plot(range(100), XTest[0:100, 1], 'r', label = 'difference between two day price change') plt.legend(loc = 4) plt.ylabel('Price') filename4 = 'feature' + filename + '.pdf' fig.savefig(filename4, format = 'pdf') LRL = LinRegLearner() cof = LRL.addEvidence(XTrain, YTrain) YLearn = LRL.query(XTest, cof) return YLearn
def visualize(segmentation, expression, visualize=None, store=None, title=None, legend=False): notes = [] onsets = [] values = [] param = ['Dynamics', 'Articulation', 'Tempo'] converter = NoteList() converter.bpm = 100 if not visualize: visualize = selectSubset(param) for segment, expr in zip(segmentation, expression): for note in segment: onsets.append(converter.ticks_to_milliseconds(note.on)/1000.0) values.append([expr[i] for i in visualize]) import matplotlib.pyplot as plt fig = plt.figure(figsize=(12, 4)) for i in visualize: plt.plot(onsets, [v[i] for v in values], label=param[i]) plt.ylabel('Deviation') plt.xlabel('Score time (seconds)') if legend: plt.legend(bbox_to_anchor=(0., 1), loc=2, borderaxespad=0.) if title: plt.title(title) #dplot = fig.add_subplot(111) #sodplot = fig.add_subplot(111) #dplot.plot([i for i in range(len(deltas[0]))], deltas[0]) #sodplot.plot([i for i in range(len(sodeltas[0]))], sodeltas[0]) if store: fig.savefig('plots/{0}.png'.format(store)) else: plt.show()
def roc_plot(y_true, y_pred): """Plots a receiver operating characteristic. Parameters ---------- y_true : array_like Observed labels, either 0 or 1. y_pred : array_like Predicted probabilities, floats on [0, 1]. Notes ----- .. plot:: pyplots/roc_plot.py References ---------- .. [1] Pedregosa, F. et al. "Scikit-learn: Machine Learning in Python." *Journal of Machine Learning Research* 12 (2011): 2825–2830. .. [2] scikit-learn developers. "Receiver operating characteristic (ROC)." Last modified August 2013. http://scikit-learn.org/stable/auto_examples/plot_roc.html. """ fpr, tpr, __ = roc_curve(y_true, y_pred) roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, label='ROC curve (area = {:0.2f})'.format(roc_auc)) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver Operating Characteristic') plt.legend(loc='lower right')
def graph(f, n, xmin, xmax, resolution=1001): xlist = np.linspace(xmin, xmax, n) ylist = f(xlist) xlist_fine = np.linspace(xmin, xmax, resolution) ylist_fine = p_L(xlist_fine, xlist, ylist) plt.plot(xlist, ylist, 'ro') plt.plot(xlist_fine, ylist_fine)
def plotISVar(): plt.figure() plt.title('Variance minimization problem (call).\nVertical lines mark the minima.') for K in [0.6, 0.8, 1.0, 1.2]: theta = np.linspace(-0.6, 2) var = [BS.exactCallVar(K*s0, theta) for theta in theta] minth = theta[np.argmin(var)] line, = plt.plot(theta, var, label=str(K)) plt.axvline(minth, color=line.get_color()) plt.xlabel(r'$\theta$') plt.ylabel('call variance') plt.legend(title=r'$K/s_0$', loc='upper left') plt.autoscale(tight=True) plt.figure() plt.title('Variance minimization problem (put).\nVertical lines mark the minima.') for K in [0.8, 1.0, 1.2, 1.4]: theta = np.linspace(-2, 0.5) var = [BS.exactPutVar(K*s0, theta) for theta in theta] minth = theta[np.argmin(var)] line, = plt.plot(theta, var, label=str(K)) plt.axvline(minth, color=line.get_color()) plt.xlabel(r'$\theta$') plt.ylabel('put variance') plt.legend(title=r'$K/s_0$', loc='upper left') plt.autoscale(tight=True)
def test_get_obs(self): plt.figure() ant_sigs = antennas.antennas_signal(self.ants, self.ant_models, self.sources, self.rad.timebase) rad_sig_full = self.rad.sampled_signal(ant_sigs[0, :], 0) obs_full = self.rad.get_full_obs(ant_sigs, self.utc_date, self.config) ant_sigs_simp = antennas.antennas_simplified_signal(self.ants, self.ant_models, self.sources, self.rad.baseband_timebase, self.rad.int_freq) obs_simp = self.rad.get_simplified_obs(ant_sigs_simp, self.utc_date, self.config) freqs, spec_full_before_obs = spectrum.plotSpectrum(rad_sig_full, self.rad.ref_freq, label='full_before_obs_obj', c='blue') freqs, spec_full = spectrum.plotSpectrum(obs_full.get_antenna(1), self.rad.ref_freq, label='full', c='cyan') freqs, spec_simp = spectrum.plotSpectrum(obs_simp.get_antenna(1), self.rad.ref_freq, label='simp', c='red') plt.legend() self.assertTrue((spec_full_before_obs == spec_full).all(), True) plt.figure() plt.plot(freqs, (spec_simp-spec_full)/spec_full) plt.show() print len(obs_full.get_antenna(1)), obs_full.get_antenna(1).mean() print len(obs_simp.get_antenna(1)), obs_simp.get_antenna(1).mean()
def _plot(self,names,title,style,when=0,showLegend=True): if isinstance(names,str): names = [names] assert isinstance(names,list) legend = [] for name in names: assert isinstance(name,str) legend.append(name) # if it's a differential state if name in self.xNames: index = self.xNames.index(name) ys = np.squeeze(self._log['x'])[:,index] ts = np.arange(len(ys))*self.Ts plt.plot(ts,ys,style) if name in self.outputNames: index = self.outputNames.index(name) ys = np.squeeze(self._log['outputs'][name]) ts = np.arange(len(ys))*self.Ts plt.plot(ts,ys,style) if title is not None: assert isinstance(title,str), "title must be a string" plt.title(title) plt.xlabel('time [s]') if showLegend is True: plt.legend(legend) plt.grid()
# dashed_line_graph01.py import matplotlib.pyplot as plt x = [0, 1, 2, 3, 4, 5, 6] y = [1, 4, 5, 8, 9, 5, 3] # 그래프 크기 설정 plt.figure(figsize = (10, 6)) # 그래프 선 색과 종류 설정(기본값은 파란 선 그래프) plt.plot(x, y , color = 'green' , linestyle = 'dashed') plt.show()
def printPlotResults(self, xAxis, yTrainErr, yValidErr, yTestErr, numUpdate, currTrainDataShuffle, factorMean, factorCovariance, factorWeights): figureCount = 0 # TODO: Make global import matplotlib.pyplot as plt print "mean", factorMean print "K: ", self.K print "Iter: ", numUpdate print "mean", factorMean print "meanShape", factorMean.shape print "CoVariance", factorCovariance print "CoVarianceShape", factorCovariance.shape print "Lowest TrainLoss", np.min(yTrainErr) print "Lowest ValidLoss", np.min(yValidErr) print "Lowest TestLoss", np.min(yTestErr) trainStr = "Train" validStr = "Valid" testStr = "Test" typeLossStr = "Loss" typeScatterStr = "Assignments" trainLossStr = trainStr + typeLossStr validLossStr = validStr + typeLossStr testLossStr = testStr + typeLossStr iterationStr = "Iteration" paramStr = "K" + str(self.K) + "Learn" + str(self.learningRate) + "NumEpoch" + str(self.numEpoch) # Train Loss figureCount = figureCount + 1 plt.figure(figureCount) title = trainStr + typeLossStr + paramStr plt.title(title) plt.xlabel(iterationStr) plt.ylabel(typeLossStr) plt.plot(np.array(xAxis), np.array(yTrainErr), label = trainLossStr) plt.legend() plt.savefig(self.questionTitle + title + ".png") plt.close() plt.clf() # Valid Loss figureCount = figureCount + 1 plt.figure(figureCount) title = validStr + typeLossStr + paramStr plt.title(title) plt.xlabel(iterationStr) plt.ylabel(typeLossStr) plt.plot(np.array(xAxis), np.array(yValidErr), label = validLossStr) plt.legend() plt.savefig(self.questionTitle + title + ".png") plt.close() plt.clf() # Test Loss figureCount = figureCount + 1 plt.figure(figureCount) title = testStr + typeLossStr + paramStr plt.title(title) plt.xlabel(iterationStr) plt.ylabel(typeLossStr) plt.plot(np.array(xAxis), np.array(yTestErr), label = testLossStr) plt.legend() plt.savefig(self.questionTitle + title + ".png") plt.close() plt.clf() # Weight Images for i in xrange(self.K): imageTitle = self.questionTitle + "WeightDim" + str(i) + "K" + str(self.K) + "NumEpoch" + str(self.numEpoch) # print factorWeights print factorWeights.shape self.saveGrayscaleImage(factorWeights[:, i], 8, 8, imageTitle) self.saveGrayscaleImage(np.transpose(factorWeights)[i, :], 8, 8, imageTitle + "OTHER")
# mean signal raw_signal_iso['Mean0R'] = roll_mean(raw_signal_iso["Unmarked Fiber0R"]) raw_signal_iso['Mean1R'] = roll_mean(raw_signal_iso["Marked Fiber1R"]) raw_signal_iso['Mean2G'] = roll_mean(raw_signal_iso["Unmarked Fiber2G"]) raw_signal_iso['Mean3G'] = roll_mean(raw_signal_iso["Marked Fiber3G"]) raw_signal_gcmp['Mean2G'] = roll_mean(raw_signal_gcmp["Unmarked Fiber2G"]) raw_signal_gcmp['Mean3G'] = roll_mean(raw_signal_gcmp["Marked Fiber3G"]) raw_signal_rcmp['Mean0R'] = roll_mean(raw_signal_rcmp["Unmarked Fiber0R"]) raw_signal_rcmp['Mean1R'] = roll_mean(raw_signal_rcmp["Marked Fiber1R"]) # Plotting an example mean to see how things are progressing - looks good! plt.figure() plt.plot(raw_signal_iso["Timestamp"],raw_signal_iso["Unmarked Fiber2G"],'k',\ raw_signal_iso["Timestamp"],raw_signal_iso["Mean2G"],'b',\ raw_signal_gcmp["Timestamp"],raw_signal_gcmp["Mean2G"],'g') plt.legend(("Raw Iso", "Mean Iso", "Mean GCaMP")) plt.title("Unmarked Fiber, ROI 2G") plt.savefig("Testing Means for 2G.pdf") ### Step 2 - is baseline correction with airPLS, from Zhang et al. 2010. # A python version of the functions is available on gibhub, just need to # understand how it takes in data and what it outputs! lambda_ = 5e4 # SUPER IMPORTANT, controls flatness fo baseline. # Current best value known: 1e9 (from MATLAB version trials) # Martianova's exp program used lambd = 5e4 porder = 1 itermax = 50 # These values recommended by exp prog raw_signal_iso['BLC 0R'] = airPLS(raw_signal_iso['Mean0R'], lambda_, porder,
A = np.array( np.vstack([np.ones(n), moc, moc**2, moc**3, moc**4, moc**5, moc**6]).T) #[ 6.86201953e+05 -1.00845488e+04 6.01867466e+01 -1.86938703e-01 3.19466082e-04 -2.85541432e-07 1.04530523e-10] # A=np.array(np.vstack([np.ones(n),temp,temp**3,temp**4,moc,moc**2,moc**3]).T) print('moja:') print(A) a, b, c = poisci_parametre(A, y, np.ones(n)) print('testna na polinom 6:') print(a) tocke = np.linalg.lstsq(A, y)[0] print(tocke) print('konec polinom 6:') plt.plot(y) plt.plot(np.dot(A, a)) plt.plot(np.dot(A, tocke)) plt.legend(['meritve', 'fit', 'lsqrt']) plt.show() print('lsqr:') print('tesnta') y, x, stevilka = fitanje() urejeno_stevka = [] urejeno_indeks = [] urejeno_hi = [] for i in range(len(x)):
df = df.reshape(-1, 1) print(df.shape) df[:5] #Split data into training and test data dataset_train = np.array(df[:int(df.shape[0]*0.8)]) dataset_test = np.array(df[int(df.shape[0]*0.8):]) dataset_test_orig = dataset_test print(dataset_train.shape) print(dataset_test.shape) print(dataset_train[1]) print(dataset_test[1]) a=plt.figure(1) plt.plot(dataset_train, linewidth=3, color='red', label='Training') plt.plot(dataset_test, linewidth=1, color='blue', label='Test') plt.plot(df, linewidth=1, color='black', label=ticker) plt.legend(['Training','Test',ticker], loc='upper left') a.show() # Scale training data scaler = MinMaxScaler(feature_range=(0,1)) dataset_train = scaler.fit_transform(dataset_train) dataset_train[:5] # Scale Test data dataset_test = scaler.transform(dataset_test) dataset_test[:5]
'''for exmaple 1, drawing the TD(lambda) curve when lambda has the value between 0 and 1''' temp_list = [] alter_list = [] for i in np.linspace(0, 1, num=50): alter_list.append(i) temp_list.append(cal_TD(probToState=0.81, valueEstimates=[0.0, 4.0, 25.7, 0.0, 20.1, 12.2, 0.0], rewards=[7.9, -5.1, 2.5, -7.2, 9.0, 0.0, 1.6], lambd=i, gamma=1)) plt.grid() # ylim = (0, 1.1) # plt.ylim(*ylim) plt.plot(alter_list, temp_list, color="r") plt.savefig('example1.png') plt.gcf().clear() # cal_TD(lambd=1, # probToState=0.81, # valueEstimates=[0.0, 4.0, 25.7, 0.0, 20.1, 12.2, 0.0], # rewards=[7.9, -5.1, 2.5, -7.2, 9.0, 0.0, 1.6], # gamma=1) # '''example set 1''' # # Use scipy.optimize.fslove to calculate the numerical solution on what value can make cal_TD = 0. # print("============start finding lambda to make TD(lambda) = TD(1)===============") # result = fsolve(cal_TD,
def graph(x, y, xd, yd, xp, yp, color): pyplot.scatter(x, y, s=8) pyplot.scatter(xd, yd, s=64, c='g') pyplot.scatter(xp, yp, c=color) pyplot.plot(xp, yp, c='c')
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck', 'embark_town', 'alone'] NUMERIC_COLUMNS = ['age', 'fare'] feature_columns = get_feature_columns(CATEGORICAL_COLUMNS, NUMERIC_COLUMNS, x_train) train_input_fn = make_input_fn(x_train, y_train) eval_input_fn = make_input_fn(x_eval, y_eval, shuffle=False, n_epochs=1) est = tf.estimator.LinearClassifier(feature_columns) est.train(train_input_fn, max_steps=100) result = est.evaluate(eval_input_fn) print(pd.Series(result)) est = tf.estimator.BoostedTreesClassifier(feature_columns, n_batches_per_layer=1) est.train(train_input_fn, max_steps=100) result = est.evaluate(eval_input_fn) print(pd.Series(result)) pred_dicts = list(est.predict(eval_input_fn)) probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts]) probs.plot(kind='hist', bins=20, title='predicted probabilities') plt.show() fpr, tpr, _ = roc_curve(y_eval, probs) plt.plot(fpr, tpr) plt.title('ROC curve') plt.xlabel('false positive rate') plt.ylabel('true positive rate') plt.xlim(0, ) plt.ylim(0, ) plt.show()
def multiclass_classifier(X_train, X_test, y_train, y_test, model, list_of_classes, class_labels): # Binarize the output y_train, y_test = label_binarize(y_train, classes=list_of_classes), label_binarize( y_test, classes=list_of_classes) n_classes = len(class_labels) # Learn to predict each class against the other classifier = OneVsRestClassifier(model) y_score = classifier.fit(X_train, y_train).predict_proba(X_test) # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at these points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves plt.figure(figsize=(12, 12)) plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), color='deeppink', linestyle=':', linewidth=4) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["macro"]), color='navy', linestyle=':', linewidth=4) colors = cycle([ 'aqua', 'darkorange', 'cornflowerblue', 'green', 'purple', 'red', 'blue' ]) for i, color in zip(range(n_classes), colors): plt.plot(fpr[i], tpr[i], color=color, label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i + 1, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title( 'Some extension of Receiver operating characteristic to multi-class') plt.legend(loc="lower right") figure = plt.show() y_prob = classifier.predict_proba(X_test) # macro_roc_auc_ovo = roc_auc_score(y_test, y_prob, multi_class="ovo", # average="macro") # weighted_roc_auc_ovo = roc_auc_score(y_test, y_prob, multi_class="ovo", # average="weighted") macro_roc_auc_ovr = roc_auc_score(y_test, y_prob, average="macro") weighted_roc_auc_ovr = roc_auc_score(y_test, y_prob, average="weighted") # print("One-vs-One ROC AUC scores:\n{:.6f} (macro),\n{:.6f} " # "(weighted by prevalence)" # .format(macro_roc_auc_ovo, weighted_roc_auc_ovo)) y_pred = classifier.predict(X_test) mcm = multilabel_confusion_matrix(y_test, y_pred, labels=class_labels) print("One-vs-Rest ROC AUC scores:\n{:.6f} (macro),\n{:.6f} " "(weighted by prevalence)".format( macro_roc_auc_ovr, weighted_roc_auc_ovr)), print(figure), print(mcm) return classifier
def graph2(x, y, c='r'): pyplot.plot(x, y, c) pyplot.show()
def DrawFig( figureFile, distance, leftIden, rigthIden, aveIden, nr, aa, bb, test ) : fig = plt.figure( num=None, figsize=(16, 18), facecolor='w', edgecolor='k' ) plt.subplot(321) """ from matplotlib.colors import LogNorm plt.hist2d(test[:,4], test[:,5], bins=50, norm=LogNorm()) plt.plot(test[:,0], test[:,1], 'co') """ plt.title('Distance distribution', fontsize=16) plt.plot(distance[:,0] , 100 * distance[:,1]/np.sum(distance[:,1]) , 'ro-' ) plt.xlabel('The breakpoints of varints span on assemble sequence(%)', fontsize=16) plt.ylabel('% of Number', fontsize=16) plt.subplot(322) plt.title('Left Side', fontsize=16) plt.plot(leftIden[:,0] , leftIden[:,2]/np.sum(leftIden[:,1]) , 'go-' ) plt.axis([0,100,0.0,1.0]) plt.xlabel('Left Side Identity of varints(<=%)', fontsize=16) plt.ylabel('% of Accumulate', fontsize=16) plt.subplot(323) plt.title('Right Side', fontsize=16) plt.plot(rigthIden[:,0], rigthIden[:,2]/np.sum(rigthIden[:,1]), 'bo-' ) plt.axis([0,100,0.0,1.0]) plt.xlabel('Right Side Identity of varints(<=%)', fontsize=16) plt.ylabel('% of Accumulate', fontsize=16) plt.subplot(324) plt.title('Averge', fontsize=16) plt.plot(aveIden[:,0] , aveIden[:,2]/np.sum(aveIden[:,1]) , 'co-' ) plt.axis([0,100,0.0,1.0]) plt.xlabel('Averge Identity of varints(<=%)', fontsize=16) plt.ylabel('% of Accumulate', fontsize=16) plt.subplot(325) plt.title('N Ratio', fontsize=16) plt.plot(nr[:,0], nr[:,2]/np.sum(nr[:,1]), 'yo-' ) plt.axis([0,5,0.0,1.0]) plt.xlabel('N Ratio of varints\' regions(>=%)', fontsize=16) plt.ylabel('% of Accumulate', fontsize=16) plt.subplot(6,2,10) plt.plot(aa[:,0], aa[:,2]/np.sum(aa[:,1]), 'mo-' ) plt.axis([0,100,0.0,1.0]) plt.xlabel('Perfect Depth(<=)', fontsize=12) plt.ylabel('% of Accumulate', fontsize=16) plt.subplot(6,2,12) plt.plot(bb[:,0], bb[:,2]/np.sum(bb[:,1]), 'ko-' ) plt.axis([0,100,0.0,1.0]) plt.xlabel('Both ImPerfect Depth(<=)', fontsize=12) plt.ylabel('% of Accumulate', fontsize=16) fig.savefig(figureFile + '.png')
def drawObstacle(obstacles): for obs in obstacles: pyplot.plot(obs[0], obs[1], c='r')
from pylab import * import math import matplotlib.pyplot as plt import scipy.signal as sp #Obtaining f(t) F_num = poly1d([1, 0.5]) F_denom = poly1d([1, 1, 2.5]) F = sp.lti(F_num, F_denom) t_f, f = sp.impulse(F, None, linspace(0, 50, 1001)) #Plotting f(t) figure(0) plt.plot(t_f, f) plt.title('Plot of f(t)') plt.xlabel('t') plt.ylabel('f(t)') plt.show() #Obtaining x(t) X_num = F_num X_denom = polymul([1, 0, 2.25], F_denom) X = sp.lti(X_num, X_denom) t_x, x = sp.impulse(X, None, linspace(0, 50, 1001)) x[0] = 0 #Plotting x(t)
tf[compl] = 1 frame_array[compl] = pl compl = compl + 1 else: while (tf[rp] != 0): tf[rp] = 0 rp = rp + 1 if (rp == g): rp = 0 frame_array[rp] = pl tf[rp] = 1 print("elements in frame_array : ") print(frame_array) c = c + 1 print("Number of page_array faults " + str(c - 1)) pages_falts_list.append(int(c - 1)) frameno_list.append(a) matlab_plots.plot(frameno_list, pages_falts_list, marker='p', color='green', label='Page Faults') matlab_plots.legend(loc='upper left') matlab_plots.xlabel('Total Number of Frames') matlab_plots.ylabel('Page Faults') matlab_plots.xticks(frameno_list, frameno_list) matlab_plots.style.use('ggplot') matlab_plots.show()
p=data.values ((p[1]-p[0])**2.).sum()**.5,((p[2]-p[1])**2.).sum()**.5,((p[3]-p[2])**2.).sum()**.5 ((data.loc[11]-data.loc[0]).values**2).sum() V0_1= p[1]-p[0] V0_11=p[11]-p[0] V0_1,V0_11 np.dot(V0_1,V0_11) fig=plt.figure() ax= fig.add_subplot(1,1,1) ax.set_aspect("equal") plt.plot(data.tx[:10], data.ty[:10],"or-") plt.grid() plt.show() data.tx corners=np.array(corners) data2=pd.DataFrame({"px":corners[:,0,0,1],"py":corners[:,0,0,0]},index=ids.flatten()) data2.sort_index(inplace=True) data2 n0=data2.loc[0] n1=data2.loc[1] d01=((n0-n1).values**2).sum()**.5
plt.imshow(XB[ii].reshape(40, 45), 'Greys_r') # G_BA(X_B) 결과 f, axes = plt.subplots(figsize=(7, 7), nrows=1, ncols=2, sharey=True, sharex=True) for ii in range(2): plt.subplot(1, 2, ii + 1) plt.suptitle('Result of G_BA') plt.imshow(samples_A[ii].reshape(45, 40), 'Greys_r') # 판별자, 생성자의 비용함수 그림 fig, ax = plt.subplots(figsize=(7, 7)) losses = np.array(losses) plt.plot(losses.T[0], label='DiscriminatorA') plt.plot(losses.T[1], label='DiscriminatorB') plt.plot(losses.T[2], label='Generator') plt.title("Training Losses") plt.legend() # 도메인 A 에 속하는 이미지 f, axes = plt.subplots(figsize=(7, 7), nrows=2, ncols=4, sharey=True, sharex=True) f.tight_layout() for ii in range(8): plt.subplot(2, 4, ii + 1) f.suptitle('Domain A')
dict(n_hidden_recog_1=300, # 1st layer encoder neurons n_hidden_gener_1=300, # 1st layer decoder neurons # n_hidden_gener_2=500, # 2nd layer decoder neurons n_input=784, # MNIST data input (img shape: 28*28) n_z=15) # dimensionality of latent space vae, new_cost = train(network_architecture, training_epochs=10) x_sample = mnist.test.next_batch(100)[0] x_reconstruct = vae.reconstruct(x_sample) training_epochs=10 #plotting reconstruct data x = np.arange(0,training_epochs,1) plt.title("Cost Graph") plt.plot(x, new_cost) plt.show() #plotting the images before and after reconstruction plt.figure(figsize=(8, 12)) for i in range(5): plt.subplot(5, 2, 2*i + 1) plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray") plt.title("Test input") plt.colorbar() plt.subplot(5, 2, 2*i + 2) plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray") plt.title("Reconstruction") plt.colorbar()
file = open('./weights.txt', 'w') # 参数提取 for v in model.trainable_variables: file.write(str(v.name) + '\n') file.write(str(v.shape) + '\n') file.write(str(v.numpy()) + '\n') file.close() ############################################### show ############################################### # 显示训练集和验证集的acc和loss曲线 acc = history.history['sparse_categorical_accuracy'] val_acc = history.history['val_sparse_categorical_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(acc, label='Training Accuracy') plt.plot(val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(loss, label='Training Loss') plt.plot(val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show()
def load_and_plot_experiments( experiments, process_results, yaxis, xaxis, nsamples, color, marker, section="val", min_epoch=0, max_epoch=None, hollow_marker=False, show_labels=True, plot_fit=False, ): """ Load results and plot list of experiments. Args: experiments: Dictionary {name: path}, see example above. yaxis: The metric to use as yaxis. xaxis: The metric to use as xaxis. nsamples: Number of samples to use for computing mean+-std. section: The section of the results to use. min_epoch, max_epoch: The min and max epochs to perform the fit. """ x_all = [] y_all = [] # remove alpha for marker labels if len(color) == 4: text_color = color[:-1] else: text_color = color # In these cases the polynomial fails to capture the loss behaviour over the full range of epochs, so we harcode the range were the optimum is. for path, label in experiments: if path == "experiments/softlabels_inaturalist19_beta15": min_epoch = 0 results, epochs, start, end = get_results(path, nsamples, min_epoch, max_epoch, plot_fit=plot_fit) # write list of epochs around minimum on dictionary if path not in experiment_to_best_epoch: experiment_to_best_epoch[path] = [ epochs[e] for e in range(start, end + 1) ] else: assert experiment_to_best_epoch[path] == [ epochs[e] for e in range(start, end + 1) ], "Found two different best epoch for run '{}'".format(path) x_values = [ process_results["x"](results[epochs[i]][xaxis]) for i in range(start, end + 1) ] y_values = [ process_results["y"](results[epochs[i]][yaxis]) for i in range(start, end + 1) ] x_m = np.median(x_values) # x_e = np.std(x_values, ddof=1) y_m = np.median(y_values) # y_e = np.std(y_values, ddof=1) x_all.append(x_m) y_all.append(y_m) if not hollow_marker: plt.plot(x_m, y_m, color=color, marker=marker, zorder=100) else: plt.plot(x_m, y_m, color=color, marker=marker, zorder=100, markerfacecolor="w") if show_labels: plt.text(x_m * (1 - 0.01), y_m * (1 - 0.01), label, color=text_color, fontsize=8) plt.plot(x_all, y_all, "--", color="k", alpha=0.4, zorder=0, linewidth=1)
def loss_plot(history): plt.plot(history.history['loss'], label = 'training loss') plt.plot(history.history['val_loss'], label='validation loss') plt.legend() plt.savefig('reports/figures/loss_plot.png')
''' matplotlib.pyplot 画图工具 ''' import numpy as np import matplotlib.pyplot as plt plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题 a = np.arange(1, 10, 0.2) b = np.sin(a) plt.figure(figsize=(10, 5)) # 生成画布 plt.plot(a, b, 'ro') # 准备数据 plt.figure() plt.plot(a, b, color='r', linestyle='--', linewidth=3.0, label='sin') c = np.cos(a) plt.plot(a, c, 'g-.', label='cos') plt.legend() # 显示图例(即不同线注解) plt.xlabel('弧度') # x轴名称 plt.ylabel('正/余弦值') # y轴名称 plt.figure() plt.scatter(a,b) plt.show() # 显示
plt.ylabel('Mean Activation +/- 1 SE') plt.savefig(os.path.join(args.save_dir, 'mean_activations_line.png'), bbox_inches='tight') plt.close() seaborn.boxplot(y=mean) plt.ylabel('Mean Activation') plt.savefig(os.path.join(args.save_dir, 'mean_activations_box.png'), bbox_inches='tight') plt.close() logging.info('PLOTTING LIFETIME SPARSITY') lifetime = lifetime_sparsity(acts) lifetime.sort() plt.plot(lifetime[::-1]) plt.xlabel('Neuron Index') plt.ylabel('Lifetime Sparsity') plt.savefig(os.path.join(args.save_dir, 'lifetime_sparsity_line.png'), bbox_inches='tight') plt.close() seaborn.boxplot(y=lifetime) plt.ylabel('Lifetime Sparsity') plt.savefig(os.path.join(args.save_dir, 'lifetime_sparsity_box.png'), bbox_inches='tight') plt.close() logging.info('PLOTTING POPULATION SPARSITY') population = population_sparsity(acts) population.sort()
a_per = 100 * (i + 1) / len(alphas) n_per = 100 * (j + 1) / N_trials update_str = 'Alphas done: {:.2f}%, Trials done: {:.2f}%'.format( a_per, n_per) print('\r' + update_str, end='') print('') # return the gradients return gradients if __name__ == '__main__': K = 100 N = 100 p_true = np.random.dirichlet(np.ones(K)) x = np.random.multinomial(n=N, pvals=p_true) alphas = np.linspace(1.01, 3.0, 100) grads = gamma_variance_test(x=x, alphas=alphas, alpha_prior=np.ones(K), N_trials=100) # take the variance across samples grad_var = np.var(grads, axis=1) plt.figure() plt.plot(alphas, grad_var[:, 0]) plt.show()
#We now initialise each of our 50 Markov chains near the #optimum reported by the minimize function. nwalkers, ndim = 50, 3 pos = soln.x + 1e-4 * np.random.randn(nwalkers, ndim) #We now use the emcee library to do the MCMC so that each #Markov chain takes 5,000 steps. import emcee sampler = emcee.EnsembleSampler(nwalkers,ndim,log_probability,args=(x, y, yerr)) sampler.run_mcmc(pos, 4000) #We can look at the chains by plotting them: samples = sampler.get_chain() plt.suptitle("Plotting MCMC chains of the parameters") plt.subplot(3,1,1) plt.plot(samples[:, :, 0]) plt.xlabel("Step number") plt.ylabel("MCMC chains of a") plt.tight_layout() plt.subplot(3,1,2) plt.plot(samples[:, :, 1]) plt.xlabel("Step number") plt.ylabel("MCMC chains of b") plt.tight_layout() plt.subplot(3,1,3) plt.plot(samples[:, :, 2]) plt.xlabel("Step number") plt.ylabel("MCMC chains of c") plt.tight_layout()
##import sys ##sys.path.append('numpy_path') import matplotlib.pyplot as plt import numpy as np #import pylab as pl ##plt.plot([1,2,3,4]) ##plt.ylabel('some numbers') ##plt.show() x=[1,2,3,4,5] y=[1,4,9,16,25] #plt.title('Plot of y vs. x') plt.ylabel('squre') plt.plot(x,y) plt.show()
reg.fit(feature_train, target_train) print(reg.score(feature_train, target_train)) print(reg.score(feature_test, target_test)) print(reg.coef_) print(reg.intercept_) # draw the scatterplot, with color-coded training and testing points for feature, target in zip(feature_test, target_test): plt.scatter(feature, target, color=test_color) for feature, target in zip(feature_train, target_train): plt.scatter(feature, target, color=train_color) # labels for the legend plt.scatter(feature_test[0], target_test[0], color=test_color, label="test") plt.scatter(feature_test[0], target_test[0], color=train_color, label="train") # draw the regression line, once it's coded try: plt.plot(feature_test, reg.predict(feature_test)) except NameError: pass plt.xlabel(features_list[1]) plt.ylabel(features_list[0]) reg.fit(feature_test, target_test) plt.plot(feature_train, reg.predict(feature_train), color="r") plt.legend() plt.show()