def jump_afteraxes(current_data): # Plot position of jump on plot mpl.hold(True) mpl.plot([0.5,0.5],[-10.0,10.0],'k--') mpl.plot([0.0,1.0],[0.0,0.0],'k--') mpl.hold(False) mpl.title('')
def compare_chebhist(dname, mylambda, c, Nbin = 25): if mylambda == 'Do not exist': print('--!!Warning: eig file does not exist, can not display compare histgram') else: mylambda = 1 - mylambda lmin = max(min(mylambda), -1) lmax = min(max(mylambda), 1) # print c cheb_file_content = '\n'.join([str(st) for st in c]) x = np.linspace(lmin, lmax, Nbin + 1) y = plot_chebint(c, x) u = (x[1:] + x[:-1]) / 2 v = y[1:] - y[:-1] plt.clf() plt.hold(True) plt.hist(mylambda,Nbin) plt.plot(u, v, "r.", markersize=10) plt.hold(False) plt.show() filename = 'data/' + dname + '.png' plt.savefig(filename) cheb_filename = 'data/' + dname + '.cheb' f = open(cheb_filename, 'w+') f.write(cheb_file_content) f.close()
def test_manual(self): import numpy as np import matplotlib.pyplot as plt from os import sys, path test_dir = path.dirname(path.abspath(__file__)) + '/data/' dm = DirectionMap(test_dir+'test.dm', fg = [0,0,0,1], bg = [1,1,1,1]) lfm = LowFlowMap(test_dir+'test.lfm', fg = [1,0,0,1]) hcm = HighCurvatureMap(test_dir+'test.hcm', fg = [0,0,1,1]) qm = QualityMap(test_dir+'test.qm', fg = [0,1,0,1]) dm.create() lfm.create() hcm.create() qm.create() fig = plt.figure() ax = plt.gca() ax.set_frame_on(False) ax.set_axis_off() plt.hold(True) dm.plot(ax=ax) lfm.plot(ax=ax, alpha=.5) hcm.plot(ax=ax, alpha=.5) qm.plot(ax=ax, alpha=.3) plt.show()
def marking_init(enhanced_img, mep, mbp): # mark initially extracted minutiae points img_thin = np.array(enhanced_img[:]) # convert image to array for marking points fig = plt.figure(figsize=(10,8),dpi=30000) num1 = len(mep) num2 = len(mbp) figure, imshow(img_thin, cmap = cm.Greys_r) title('mark extracted points') plt.hold(True) for i in range(num1): xy = mep[i,:] u = xy[0] v = xy[1] if (u != 0.0) & (v != 0.0): plt.plot(v, u, 'r.', markersize = 7) plt.hold(True) for i in range(num2): xy = mbp[i,:] u = xy[0] v = xy[1] if (u != 0.0) & (v != 0.0): plt.plot(v, u, 'c+', markersize = 7) plt.show() cv2.imwrite("initial_extraction.png", img_thin)
def visualize(u1, t1, u2, t2, U, omega): plt.figure(1) plt.plot(t1, u1, 'r--o') t_fine = np.linspace(0, t1[-1], 1001) # мелкая сетка для точного решения u_e = u_exact(t_fine, U, omega) plt.hold('on') plt.plot(t_fine, u_e, 'b-') plt.legend([u'приближенное', u'точное'], loc='upper left') plt.xlabel('$t$') plt.ylabel('$u$') tau = t1[1] - t1[0] plt.title('$\\tau = $ %g' % tau) umin = 1.2*u1.min(); umax = -umin plt.axis([t1[0], t1[-1], umin, umax]) plt.savefig('tmp1.png'); plt.savefig('tmp1.pdf') plt.figure(2) plt.plot(t2, u2, 'r--o') t_fine = np.linspace(0, t2[-1], 1001) # мелкая сетка для точного решения u_e = u_exact(t_fine, U, omega) plt.hold('on') plt.plot(t_fine, u_e, 'b-') plt.legend([u'приближенное', u'точное'], loc='upper left') plt.xlabel('$t$') plt.ylabel('$u$') tau = t2[1] - t2[0] plt.title('$\\tau = $ %g' % tau) umin = 1.2 * u2.min(); umax = -umin plt.axis([t2[0], t2[-1], umin, umax]) plt.savefig('tmp2.png'); plt.savefig('tmp2.pdf')
def plot_times(bag, cloud_topics=None, markersize=2): if cloud_topics is None: cloud_topics = ['/camera1/depth_registered/points', '/camera2/depth_registered/points'] prev_buffers = {1:[],2:[]} times = [] ros_times = [] topic_times = {1:[],2:[]} topic_rostimes = {1:[],2:[]} for (topic, cloud, time) in bag.read_messages(topics=cloud_topics): # assert cloud.header.stamp.to_nsec() == time.to_nsec() ct = cloud.header.stamp.to_sec() rt = time.to_sec() times.append(ct) ros_times.append(rt) if topic.find('1') >= 0: topic_times[1].append(ct) topic_rostimes[1].append(rt) else: topic_times[2].append(ct) topic_rostimes[2].append(rt) ppl.plot(topic_times[1],'s', markersize=markersize) ppl.hold(True) ppl.plot(topic_times[2],'ro', markersize=markersize) ppl.show()
def plotFit(min_x, max_x, mu, sigma, theta, p): #PLOTFIT Plots a learned polynomial regression fit over an existing figure. #Also works with linear regression. # PLOTFIT(min_x, max_x, mu, sigma, theta, p) plots the learned polynomial # fit with power p and feature normalization (mu, sigma). # Hold on to the current figure plt.hold(True) # We plot a range slightly bigger than the min and max values to get # an idea of how the fit will vary outside the range of the data points x = np.array(np.arange(min_x - 15, max_x + 25, 0.05)) # 1D vector # Map the X values X_poly = pf.polyFeatures(x, p) X_poly = X_poly - mu X_poly = X_poly/sigma # Add ones X_poly = np.column_stack((np.ones((x.shape[0],1)), X_poly)) # Plot plt.plot(x, np.dot(X_poly, theta), '--', linewidth=2) # Hold off to the current figure plt.hold(False)
def make_intens_all(w1, w2): fig = plt.figure(figsize=(6., 6.)) gs = gridspec.GridSpec(1,1) gs.update(left=0.13, right=0.985, bottom = 0.13, top=0.988) ax = plt.subplot(gs[0]) plt.minorticks_on() make_contours() labels = ["A", "B", "C", "D"] for i, field in enumerate(fields): os.chdir(os.path.join(data_dir, "combined_{0}".format(field))) image = "collapsed_w{0}_{1}.fits".format(w1, w2) intens = pf.getdata(image, verify=False) extent = calc_extent(image) extent = offset_extent(extent, field) plt.imshow(intens, cmap="bone", origin="bottom", extent=extent, vmin=-20, vmax=80) verts = calc_verts(intens, extent) path = Path(verts, [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY,]) patch = patches.PathPatch(path, facecolor='none', lw=2, edgecolor="r") ax.add_patch(patch) xtext, ytext = np.mean(verts[:-1], axis=0) plt.text(xtext-8, ytext+8, labels[i], color="r", fontsize=35, fontweight='bold', va='top') plt.hold(True) plt.xlim(26, -38) plt.ylim(-32, 32) plt.xlabel("X [kpc]") plt.ylabel("Y [kpc]") # plt.show() plt.savefig(os.path.join(plots_dir, "muse_fields.eps"), dpi=60, format="eps") plt.savefig(os.path.join(plots_dir, "muse_fields.png"), dpi=200) return
def test_prop(self): N = 800.0 V = linspace(5.0,51.0,50) rho = 1.2255 beta = 45.0 J = list() CT = list() CP = list() effy = list() for v in V: data = self.analyze_prop(beta,N,v,rho) J.append(data[2]) CT.append(data[3]) CP.append(data[4]) effy.append(data[5]) plt.figure(1) plt.grid(True) plt.hold(True) plt.plot(J,CT,'o-') plt.xlabel('J') plt.plot(J,CP,'ro-') plt.axis([0,2.5,0,0.15]) plt.figure(2) plt.plot(J,effy,'gs-') plt.hold(True) plt.grid(True) plt.axis([0,2.5,0,1.0]) plt.xlabel('advance ratio') plt.ylabel('efficiency') plt.show()
def arrowsSequence(X,Y,color=None): ''' Draws an arrows sequence from 2d coordinates ''' if color==None: color=['b','g','k'] # print(type(X[0])==list) if type(X[0])==list: plt.figure() plt.hold(True) for i in range(len(X)): x=np.array(X[i]) y=np.array(Y[i]) plt.plot(x[0],y[0],'ro') plt.plot(x,y,color=color[i]) plt.quiver(x[:-1], y[:-1], x[1:]-x[:-1], y[1:]-y[:-1], scale_units='xy', angles='xy', scale=1) plt.axis((0,1020,0,800)) plt.show() else: x=np.array(X) y=np.array(Y) plt.figure() plt.hold(True) plt.plot(x[0],y[0],'r') plt.plot(x,y,'b') plt.quiver(x[:-1], y[:-1], x[1:]-x[:-1], y[1:]-y[:-1], scale_units='xy', angles='xy', scale=1) plt.axis((0,1020,0,800)) plt.show()
def smooth_demo(): Smoother = SmoothClass() xfile = ArrayClass("DatasetX") #print xfile.array xn = np.array(xfile.array) plt.subplot(211) #plt.plot(np.ones(ws)) windows=['blackman'] #windows=['flat', 'hanning', 'hamming', 'bartlett', 'blackman'] plt.hold(True) plt.axis([0,30,0,1.1]) plt.legend(windows) plt.title("The smoothing windows") plt.subplot(212) #plt.plot(xn) plt.plot(xn) plt.plot(Smoother.smooth(xn,10,'blackman'))
def gauge_after_axes(cd): if cd.gaugeno in [1,2,3,4]: axes = plt.gca() # # Add Kennedy gauge data # kennedy_gauge = kennedy_gauges[gauge_name_trans[cd.gaugeno]] # axes.plot(kennedy_gauge['t'] - seconds2days(date2seconds(gauge_landfall[0])), # kennedy_gauge['mean_water'] + kennedy_gauge['depth'], 'k-', # label='Gauge Data') # Add GeoClaw gauge data geoclaw_gauge = cd.gaugesoln axes.plot(seconds2days(geoclaw_gauge.t - date2seconds(gauge_landfall[1])), geoclaw_gauge.q[3,:] + gauge_surface_offset[0], 'b--', label="GeoClaw") # Add ADCIRC gauge data # ADCIRC_gauge = ADCIRC_gauges[kennedy_gauge['gauge_no']] # axes.plot(seconds2days(ADCIRC_gauge[:,0] - gauge_landfall[2]), # ADCIRC_gauge[:,1] + gauge_surface_offset[1], 'r-.', label="ADCIRC") # Fix up plot axes.set_title('Station %s' % cd.gaugeno) axes.set_xlabel('Days relative to landfall') axes.set_ylabel('Surface (m)') axes.set_xlim([-2,1]) axes.set_ylim([-1,5]) axes.set_xticks([-2,-1,0,1]) axes.set_xticklabels([r"$-2$",r"$-1$",r"$0$",r"$1$"]) axes.grid(True) axes.legend() plt.hold(False)
def createResponsePlot(dataframe,plotdir): mag = dataframe['MAGPDE'].as_matrix() response = (dataframe['TFIRSTPUB'].as_matrix())/60.0 response[response > 60] = 60 #anything over 60 minutes capped at 6 minutes imag5 = (mag >= 5.0).nonzero()[0] imag55 = (mag >= 5.5).nonzero()[0] fig = plt.figure(figsize=(8,6)) n,bins,patches = plt.hist(response[imag5],color='g',bins=60,range=(0,60)) plt.hold(True) plt.hist(response[imag55],color='b',bins=60,range=(0,60)) plt.xlabel('Response Time (min)') plt.ylabel('Number of earthquakes') plt.xticks(np.arange(0,65,5)) ymax = text.ceilToNearest(max(n),10) yinc = ymax/10 plt.yticks(np.arange(0,ymax+yinc,yinc)) plt.grid(True,which='both') plt.hold(True) x = [20,20] y = [0,ymax] plt.plot(x,y,'r',linewidth=2,zorder=10) s1 = 'Magnitude 5.0, Events = %i' % (len(imag5)) s2 = 'Magnitude 5.5, Events = %i' % (len(imag55)) plt.text(35,.85*ymax,s1,color='g') plt.text(35,.75*ymax,s2,color='b') plt.savefig(os.path.join(plotdir,'response.pdf')) plt.savefig(os.path.join(plotdir,'response.png')) plt.close() print 'Saving response.pdf'
def update_figures(self): plt.figure(self.figure.number) x = np.arange(0, 256, 0.1) # artificial x-axis # self.figure.gca().cla() # clearing the figure, just to be sure # plt.subplot(411) plt.plot(self.bins, self.hist, 'k') plt.hold(True) if self.rv_healthy and self.rv_hypo and self.rv_hyper: healthy_y = self.rv_healthy.pdf(x) if self.win.params['unaries_as_cdf']: hypo_y = (1 - self.rv_hypo.cdf(x)) * self.rv_healthy.pdf(self.rv_healthy.mean()) hyper_y = self.rv_hyper.cdf(x) * self.rv_healthy.pdf(self.rv_healthy.mean()) else: hypo_y = self.rv_hypo.pdf(x) hyper_y = self.rv_hyper.pdf(x) y_max = max(healthy_y.max(), hypo_y.max(), hyper_y.max()) fac = self.hist.max() / y_max plt.plot(x, fac * healthy_y, 'g', linewidth=2) plt.plot(x, fac * hypo_y, 'b', linewidth=2) plt.plot(x, fac * hyper_y, 'r', linewidth=2) plt.title('all PDFs') ax = plt.axis() plt.axis([0, 256, ax[2], ax[3]]) plt.hold(False) self.canvas.draw()
def plot_categorical_scatter_with_mean(vals, categoryLabels, jitter=True, colours=None, xlabel=None, ylabel=None, title=None): import matplotlib.colors import scipy.stats import pdb numCategories = len(vals) plt.hold(True) if colours is None: colours = plt.cm.gist_rainbow(np.linspace(0,1,numCategories)) for category in range(numCategories): edgeColour = matplotlib.colors.colorConverter.to_rgba(colours[category], alpha=0.5) xval = (category+1)*np.ones(len(vals[category])) if jitter: jitterAmt = np.random.random(len(xval)) xval = xval + (0.3 * jitterAmt) - 0.15 #pdb.set_trace() plt.plot(xval, vals[category], 'o', mec=edgeColour, mew = 4, mfc='none', ms=16) mean = np.mean(vals[category]) sem = scipy.stats.sem(vals[category]) print mean, sem plt.plot(category+1, mean, 'o', color='k', mec=colours[category], ms=20) plt.errorbar(category+1, mean, yerr = sem, color=colours[category]) plt.xlim(0,numCategories+1) plt.ylim(0,1) ax = plt.gca() ax.set_xticks(range(1,numCategories+1)) ax.set_xticklabels(categoryLabels, fontsize=16) if xlabel is not None: plt.xlabel(xlabel, fontsize=20) if ylabel is not None: plt.ylabel(ylabel, fontsize=20) if title is not None: plt.title(title) plt.show()
def plot(self, pth='', points=True, line=True): # get predicted data numpnts = 1000 x = np.linspace(0., 1., num = numpnts) y = self._fun(self.params, x) # add noise to data to increase visibility l = len(self.x) yrnd = np.random.uniform(-0.05, 0.05, l) # plot setup plt.hold(True) # just in case (matlab habit) plt.grid(True) # plot line if line: plt.plot(x, y, zorder = 1) if points: plt.scatter(self.x, self.orig_y + yrnd, alpha=0.6, lw=0, c=[0.3, 0.3, 0.3]) # aesthetics maxval = np.max(self.x) uplim = min([1.0, np.round(maxval + 0.5, decimals = 1)]) plt.xlim([0.0, uplim]) plt.ylim([-0.1, 1.1]) plt.xlabel('stimulus intensity') plt.ylabel('correctness') # save figure if pth: tempfname = os.path.join(pth, 'weibull_fit_temp.png') plt.savefig(tempfname, dpi = 120) plt.close() return tempfname
def internal_surf_afteraxes(cd): plt.hold(True) plt.title('') plt.ylabel('m') plt.subplots_adjust(hspace=0.05) plt.plot([multilayer_data.bathy_location,multilayer_data.bathy_location],bottom_surf_zoomed,'--k') plt.hold(False)
def run_Lagrange_interp_abs_Cheb(N, ymin=None, ymax=None): f = sp.Abs(1-2*x) fn = sp.lambdify([x], f) psi, points= Lagrange_polynomials(x, N, [0, 1], point_distribution='Chebyshev') u = interpolation(f, psi, points) comparison_plot(f, u, Omega=[0, 1], filename='Lagrange_interp_abs_Cheb_%d' % (N+1), plot_title='Interpolation by Lagrange polynomials '\ 'of degree %d' % N, ymin=ymin, ymax=ymax) print 'Interpolation points:', points # Make figures of Lagrange polynomials (psi) plt.figure() xcoor = np.linspace(0, 1, 1001) legends = [] for i in (2, (N+1)/2+1): fn = sp.lambdify([x], psi[i]) ycoor = fn(xcoor) plt.plot(xcoor, ycoor) legends.append(r'$\psi_%d$' % i) plt.hold('on') plt.legend(legends) plt.plot(points, [0]*len(points), 'ro') #if ymin is not None and ymax is not None: # axis([xcoor[0], xcoor[-1], ymin, ymax]) plt.savefig('Lagrange_basis_Cheb_%d.pdf' % (N+1)) plt.savefig('Lagrange_basis_Cheb_%d.png' % (N+1))
def update_figures(self): plt.figure(self.figure.number) x = np.arange(self.data.min(), self.data.max())#, (self.data.max() - self.data.min()) / 100) # artificial x-axis # self.figure.gca().cla() # clearing the figure, just to be sure # plt.subplot(411) plt.plot(self.bins, self.hist, 'k') plt.hold(True) # if self.rv_heal is not None and self.rv_hypo is not None and self.rv_hyper is not None: if self.models is not None: healthy_y = self.rv_heal.pdf(x) if self.unaries_as_cdf: hypo_y = (1 - self.rv_hypo.cdf(x)) * self.rv_heal.pdf(self.rv_heal.mean()) hyper_y = self.rv_hyper.cdf(x) * self.rv_heal.pdf(self.rv_heal.mean()) else: hypo_y = self.rv_hypo.pdf(x) hyper_y = self.rv_hyper.pdf(x) y_max = max(healthy_y.max(), hypo_y.max(), hyper_y.max()) fac = self.hist.max() / y_max plt.plot(x, fac * healthy_y, 'g', linewidth=2) plt.plot(x, fac * hypo_y, 'b', linewidth=2) plt.plot(x, fac * hyper_y, 'r', linewidth=2) if self.params and self.params.has_key('win_level') and self.params.has_key('win_width'): ax = plt.axis() border = 5 xmin = self.params['win_level'] - self.params['win_width'] / 2 - border xmax = self.params['win_level'] + self.params['win_width'] / 2 + border plt.axis([xmin, xmax, ax[2], ax[3]]) plt.gca().tick_params(direction='in', pad=1) plt.hold(False) # plt.grid(True) self.canvas.draw()
def PlotEDepSummary(gFiles,nFiles,figureName='EDepSummary.png',tParse=GetThickness, histKey='eDepHist'): """ PlotEDepSummary Plotss the energy deposition summary """ # Extrating the average values gT = list() gDep = list() gDepError = list() nT = list() nDep = list() nDepError = list() for fname in gFiles: f = TFile(fname,'r') hist = f.Get(histKey) gT.append(GetThickness(fname)) gDep.append(hist.GetMean()) gDepError.append(hist.GetMeanError()) for fname in nFiles: f = TFile(fname,'r') hist = f.Get(histKey) nT.append(GetThickness(fname)) nDep.append(hist.GetMean()) nDepError.append(hist.GetMeanError()) # Plotting plt.errorbar(gT,gDep,yerr=gDepError,fmt='r+') plt.hold(True) plt.errorbar(nT,nDep,yerr=nDepError,fmt='go') plt.xlabel("Thickness (mm)") plt.ylabel("Average Energy Deposition (MeV)") plt.legend(["Co-60","Cf-252"]) plt.xscale("log") plt.yscale("log") plt.grid(True) plt.savefig(figureName)
def plot(self,nrows=1,ncols=1,iplot=1,ttitle=''): """ Plots the rooted tree. *INPUT*: (optional) * nrows, ncols -- number of rows and columns of subplots in the figure * iplot -- index of the subplot in which to plot this tree These are only necessary if plotting more than one tree in a single figure using subplot. *OUTPUT*: None. The plot is created recursively by plotting the root, parsing the subtrees, plotting the subtrees' roots, and calling _plot_subtree on each child """ import matplotlib.pyplot as pl if iplot==1: pl.clf() pl.subplot(nrows,ncols,iplot) pl.hold(True) pl.scatter([0],[0]) if self!='T': self._plot_subtree(0,0,1.) fs=int(np.ceil(30./nrows)) pl.title(ttitle,{'fontsize': fs}) pl.xticks([]) pl.yticks([]) pl.hold(False) pl.axis('off')
def plot_conv(all_JSD,all_JSDs,rest_type): fold = len(all_JSD) rounds = len(all_JSDs[0]) n_rest = len(rest_type) new_JSD = [[] for i in range(n_rest)] for i in range(len(all_JSD)): for j in range(n_rest): new_JSD[j].append(all_JSD[i][j]) JSD_dist = [[] for i in range(n_rest)] JSD_std = [[] for i in range(n_rest)] for rest in range(n_rest): for f in range(fold): temp_JSD = all_JSDs[f][:,rest] JSD_dist[rest].append(np.mean(temp_JSD)) JSD_std[rest].append(np.std(temp_JSD)) plt.figure(figsize=(10,5*n_rest)) x = np.arange(100./fold,101.,fold) colors = ['red','blue','green','black','magenta','gold','navy'] for i in range(n_rest): plt.subplot(n_rest,1,i+1) plt.plot(x,new_JSD[i],'o-',color=colors[i],label=rest_type[i]) plt.hold(True) plt.plot(x,JSD_dist[i],'o',color=colors[i],label=rest_type[i]) plt.fill_between(x,np.array(JSD_dist[i])+np.array(JSD_std[i]),np.array(JSD_dist[i])-np.array(JSD_std[i]),color=colors[i],alpha=0.2) plt.xlabel('dataset (%)') plt.ylabel('JSD') plt.legend(loc='best') plt.tight_layout() plt.savefig('convergence.pdf')
def plot_meth_and_twobeds(coverage, methylated, mod): l1 = len(mod.bed_list_gt) l2 = len(mod.bed_list_h) n_cells = np.shape(coverage)[0] plt.figure() # Get current size fig_size_temp = plt.rcParams["figure.figsize"] fig_size = fig_size_temp fig_size[0] = 500 fig_size[1] = 40 plt.rcParams["figure.figsize"] = fig_size fig, axarr = plt.subplots(n_cells+l1+l2+1, 1, sharex=True) plt.hold(True) plot_meth(axarr[:n_cells], coverage, methylated) for i in range(0, l1): axn = n_cells+i plot_bed([axarr[axn]], [mod.bed_list_gt[i]]) axarr[axn].set_ylabel(mod.state_name_gt[i]) for i in range(0, l2): axn = n_cells+l1+1+i plot_bed([axarr[axn]], [mod.bed_list_h[i]]) axarr[axn].set_ylabel(mod.state_name_h[i]) fig.savefig(mod.path_name + mod.bed_title + 'l1 = ' + str(l1) + 'l2 = ' + str(l2) + 'n_cells = ' + str(n_cells)+'_l='+str(mod.l)+'_l_test='+str(mod.l_test) + '.pdf') plt.hold(False) plt.rcParams["figure.figsize"] = fig_size_temp plt.close(fig)
def plot(x, dict_res, plot_func): colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3'] plt.hold(True) k = 0 for (alg, dtype) in dict_res.keys(): for comp in dict_res[(alg, dtype)]: if len(alg) < 4: label = '{0:4s}'.format(alg.upper()) + ' - ' else: label = '{0:4s}'.format(alg.upper()) + ' - ' if comp: label += '{0:5s} - '.format('comp.') linestyle = '-' else: label += '{0:5s} - '.format('QR') linestyle = '--' label += dtype y = dict_res[(alg, dtype)][comp] valid = np.isfinite(y).flatten() if not np.any(valid): continue plot_func(x[valid], y[valid], label=label, linestyle=linestyle, linewidth=2, marker='o', markeredgecolor='none', color=colors[k]) k += 1 plt.hold(False)
def band_select(spikeTimeStamps, eventOnsetTimes, amplitudes, bandwidths, timeRange, fullRange = [0.0, 2.0]): numBands = np.unique(bandwidths) numAmps = np.unique(amplitudes) spikeArray = np.zeros((len(numBands), len(numAmps))) errorArray = np.zeros_like(spikeArray) trialsEachCond = behavioranalysis.find_trials_each_combination(bandwidths, numBands, amplitudes, numAmps) spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes( spikeTimeStamps, eventOnsetTimes, fullRange) spikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, timeRange) baseTimeRange = [timeRange[1]+0.5, fullRange[1]] baseSpikeCountMat = spikesanalysis.spiketimes_to_spikecounts(spikeTimesFromEventOnset, indexLimitsEachTrial, baseTimeRange) baselineSpikeRate = np.mean(baseSpikeCountMat)/(baseTimeRange[1]-baseTimeRange[0]) plt.hold(True) for amp in range(len(numAmps)): trialsThisAmp = trialsEachCond[:,:,amp] for band in range(len(numBands)): trialsThisBand = trialsThisAmp[:,band] if spikeCountMat.shape[0] != len(trialsThisBand): spikeCountMat = spikeCountMat[:-1,:] print "FIXME: Using bad hack to make event onset times equal number of trials" thisBandCounts = spikeCountMat[trialsThisBand].flatten() spikeArray[band, amp] = np.mean(thisBandCounts) errorArray[band,amp] = stats.sem(thisBandCounts) return spikeArray, errorArray, baselineSpikeRate
def iterer_los_plot(x0,max_error,max_iter,f): # For aa visualisere iterasjonsprosessen tar vi vare paa punktene underveis X=array(x0) F=array(0.0) # Setter startverdi for feilen lik 1.0 slik at while-lokken garantert starter error = 1.0 j = 0 while error > max_error and j < max_iter: x1=f(x0) X=append(X,[x0,x0]) F=append(F,[x0,x1]) error = abs(x1-x0) x0 = x1 j = j+1 # Gi beskjed dersom vi har brukt opp max antall iterasjoner # uten at feilen har blitt mindre enn max_error if j == max_iter: print('Max antall iterasjoner brukt opp.\n') else: print('Losning funnet: x=%3.12f' % (x0)) # Plotter x, f(x) og iterasjonsprosessen(stiplet) x = arange(0,1,0.001) figure('Iterasjoner') hold(True) plot(x,f(x),'b',label=r'$f(x)$') plot(X,F,'k--',label='iterasjoner') plot(x,x,'g',label=r'$y=x$') legend(loc='best') show() # Returner losningen return x0
def showResults(challenger_data, model): ''' Show the original data, and the resulting logit-fit''' temperature = challenger_data[:,0] failures = challenger_data[:,1] # First plot the original data plt.figure() setFonts() sns.set_style('darkgrid') np.set_printoptions(precision=3, suppress=True) plt.scatter(temperature, failures, s=200, color="k", alpha=0.5) plt.yticks([0, 1]) plt.ylabel("Damage Incident?") plt.xlabel("Outside Temperature [F]") plt.title("Defects of the Space Shuttle O-Rings vs temperature") plt.tight_layout # Plot the fit x = np.arange(50, 85) alpha = model.params[0] beta = model.params[1] y = logistic(x, beta, alpha) plt.hold(True) plt.plot(x,y,'r') plt.xlim([50, 85]) outFile = 'ChallengerPlain.png' showData(outFile)
def plot_filter_characteristics(self): w, h = freqz(self.freq_filter.num, self.freq_filter.denom) plt.figure(1) plt.subplot(2,1,1) plt.hold(True) powa = plt.plot((self.filter_parameters.sample_rate*0.5/pi)*w, abs(h),'b-', label = 'Char. amplitudowa') plt.title('Charakterystyki filtru') plt.xlabel('Czestotliwosc [Hz]') plt.ylabel('Amplituda') plt.twinx(ax=None) angles = unwrap(angle(h)) plt.znie = plot((self.filter_parameters.sample_rate*0.5/pi)*w,angles, 'g-', label = 'Char. fazowa') plt.ylabel('Faza') plt.grid() tekst = powa + znie wybierz = [l.get_label() for l in tekst] plt.legend(tekst, wybierz, loc='best') ######################################################################################################################## plt.subplot(2,1,2) w2, gd = group_delay((num, denom)) plt.plot((sample_rate*0.5/pi)*w2, gd) plt.grid() plt.xlabel('Czestotliwosc [Hz]') plt.ylabel('Opoznienie grupowe [probki]') plt.title('Opoznienie grupowe filtru') plt.show()
def write_final(newmep, newmbp, enhanced_img): eminutiae_array = removezero(newmep) bminutiae_array = removezero(newmbp) num1 = len(eminutiae_array) num2 = len(bminutiae_array) img_thin = np.array(enhanced_img[:]) fig = plt.figure(figsize=(15,12),dpi=30000) figure, imshow(img_thin, cmap = cm.Greys_r) title('minutiae marking') plt.hold(True) for i in range(num1): xy = eminutiae_array[i,:] u = xy[0] v = xy[1] plt.plot(v, u, 'r.', markersize = 10) plt.hold(True) for i in range(num2): xy = bminutiae_array[i,:] u = xy[0] v = xy[1] plt.plot(v, u, 'c+', markersize = 15) plt.show() cv2.imwrite("final_minutiae_image.png", img_thin)
def plotPaper(appeared, citedBy, pubLabel): hold(True) yrange = range(appeared, curDate + 1) months = len(yrange) cites = [0] * months citeVenues = {} for citation in citedBy: (venue, date) = citation if venue in citeVenues: citeVenues[venue] += 1 else: citeVenues[venue] = 1 for i in range(date - appeared, months): cites[i] += 1 for i in range(date - startDate, curDate - startDate + 1): citations[i] += 1 citeAx.plot(yrange, cites, label = pubLabel) logCiteAx.semilogy(yrange, cites, label = pubLabel) venues[pubLabel] = citeVenues articleCitations[pubLabel] = (appeared, cites)
def get_text_placement_mask(xyz, mask, plane, pad=2, viz=False): """ Returns a binary mask in which text can be placed. Also returns a homography from original image to this rectified mask. XYZ : (HxWx3) image xyz coordinates MASK : (HxW) : non-zero pixels mark the object mask REGION : DICT output of TextRegions.get_regions PAD : number of pixels to pad the placement-mask by """ contour, hier = cv2.findContours(mask.copy().astype('uint8'), mode=cv2.RETR_CCOMP, method=cv2.cv.CV_CHAIN_APPROX_SIMPLE) contour = [np.squeeze(c).astype('float') for c in contour] #plane = np.array([plane[1],plane[0],plane[2],plane[3]]) H, W = mask.shape[:2] # bring the contour 3d points to fronto-parallel config: pts, pts_fp = [], [] center = np.array([W, H]) / 2 n_front = np.array([0.0, 0.0, -1.0]) for i in xrange(len(contour)): cnt_ij = contour[i] xyz = su.DepthCamera.plane2xyz(center, cnt_ij, plane) R = su.rot3d(plane[:3], n_front) xyz = xyz.dot(R.T) pts_fp.append(xyz[:, :2]) pts.append(cnt_ij) # unrotate in 2D plane: rect = cv2.minAreaRect(pts_fp[0].copy().astype('float32')) box = np.array(cv2.boxPoints(rect)) R2d = su.unrotate2d(box.copy()) box = np.vstack([box, box[0, :]]) #close the box for visualization mu = np.median(pts_fp[0], axis=0) pts_tmp = (pts_fp[0] - mu[None, :]).dot(R2d.T) + mu[None, :] boxR = (box - mu[None, :]).dot(R2d.T) + mu[None, :] # rescale the unrotated 2d points to approximately # the same scale as the target region: s = rescale_frontoparallel(pts_tmp, boxR, pts[0]) boxR *= s for i in xrange(len(pts_fp)): pts_fp[i] = s * ((pts_fp[i] - mu[None, :]).dot(R2d.T) + mu[None, :]) # paint the unrotated contour points: minxy = -np.min(boxR, axis=0) + pad // 2 ROW = np.max(ssd.pdist(np.atleast_2d(boxR[:, 0]).T)) COL = np.max(ssd.pdist(np.atleast_2d(boxR[:, 1]).T)) place_mask = 255 * np.ones( (np.ceil(COL) + pad, np.ceil(ROW) + pad), 'uint8') pts_fp_i32 = [(pts_fp[i] + minxy[None, :]).astype('int32') for i in xrange(len(pts_fp))] cv2.drawContours(place_mask, pts_fp_i32, -1, 0, thickness=cv2.cv.CV_FILLED, lineType=8, hierarchy=hier) if not TextRegions.filter_rectified((~place_mask).astype('float') / 255): return # calculate the homography H, _ = cv2.findHomography(pts[0].astype('float32').copy(), pts_fp_i32[0].astype('float32').copy(), method=0) Hinv, _ = cv2.findHomography(pts_fp_i32[0].astype('float32').copy(), pts[0].astype('float32').copy(), method=0) if viz: plt.subplot(1, 2, 1) plt.imshow(mask) plt.subplot(1, 2, 2) plt.imshow(~place_mask) plt.hold(True) for i in xrange(len(pts_fp_i32)): plt.scatter(pts_fp_i32[i][:, 0], pts_fp_i32[i][:, 1], edgecolors='none', facecolor='g', alpha=0.5) plt.show() return place_mask, H, Hinv
) / 2. # average of the bottom and second-to-bottom rows, shifted up to the bottom row where_unsat = np.where(col_dat[2, 0, xPos, :] == 0)[0] print(where_unsat) if len(where_unsat) == 0: wtd[i, q] = 0. elif where_unsat[0] == 0: wtd[i, q] = z_surf[xPos] - z_bott[xPos] else: wtd[i, q] = z_surf[xPos] - ( col_dat[1, :, xPos, where_unsat[0]] + col_dat[1, :, xPos, where_unsat[0] - 1]) / 2 plt.scatter(np.ones([8, 1], 'd') * bct_lo[q], -wtd[0:8, q], c=col[q], marker='s') plt.hold(True) plt.scatter(np.ones([8, 1], 'd') * bct_hi[q], -wtd[8:16, q], c=col[q], marker='s') plt.hold(True) plt.scatter(np.ones([8, 1], 'd') * bct_lo[q], -wtd[16:24, q], c=col[q]) plt.hold(True) plt.scatter(np.ones([8, 1], 'd') * bct_hi[q], -wtd[24:32, q], c=col[q]) plt.hold(True) plt.title( 'Active Layer Thickness vs. Acrotelm Thickness, Grouped by Catotelm Thickness' ) plt.xlabel('Catotelm Thickness [m]') plt.ylabel('Active Layer Thickness [m]')
def plot_polygons(polygons_points, style=None, figname=None, label=None, alpha=None): """ Take list of polygons and plot. Inputs: polygons - list of polygons style - style list corresponding to each polygon - for a polygon, use 'line' - for points falling outside a polygon, use 'outside' - style can also be user defined as in normal pylab plot. figname - name to save figure to label - title for plotA alpha - transparency of polygon fill, 0.0=none, 1.0=solid if not supplied, no fill. Outputs: - plot of polygons """ try: import matplotlib matplotlib.use('Agg') from matplotlib.pyplot import hold, plot, savefig, xlabel, \ ylabel, title, close, title, fill except: return assert type(polygons_points) == list, \ 'input must be a list of polygons and/or points' #ion() hold(True) if label is None: label = '' # clamp alpha to sensible range if alpha: try: alpha = float(alpha) except ValueError: alpha = None else: alpha = max(0.0, min(1.0, alpha)) num_points = len(polygons_points) colour = [] if style is None: style_type = 'line' style = [] for i in range(num_points): style.append(style_type) colour.append('b-') else: for style_name in style: if style_name == 'line': colour.append('b-') if style_name == 'outside': colour.append('r.') if style_name == 'point': colour.append('g.') if style_name not in ['line', 'outside', 'point']: colour.append(style_name) for i, item in enumerate(polygons_points): pt_x, pt_y = _poly_xy(item) plot(pt_x, pt_y, colour[i]) if alpha: fill(pt_x, pt_y, colour[i], alpha=alpha) xlabel('x') ylabel('y') title(label) if figname is not None: savefig(figname) else: savefig('test_image') #ioff() hold(False) close('all')
def train(config, FIGURE=True): assert config.model_type in ('RNN', 'LSTM') # Initialize the device which to run the model on device = torch.device(config.device) # Initialize the model that we are going to use if config.model_type == 'RNN': model = VanillaRNN(config.input_length, config.input_dim, config.num_hidden, config.num_classes, config.batch_size, device).cuda() else: model = LSTM(config.input_length, config.input_dim, config.num_hidden, config.num_classes, config.batch_size, device).cuda() # Initialize the dataset and data loader (note the +1) dataset = PalindromeDataset(config.input_length + 1) data_loader = DataLoader(dataset, config.batch_size, num_workers=1) # Setup the loss and optimizer criterion = nn.CrossEntropyLoss() optimizer = torch.optim.RMSprop(model.parameters(), lr=config.learning_rate) #loss_list = []; accuracy_list = [] for step, (batch_inputs, batch_targets) in enumerate(data_loader): # Only for time measurement of step through network t1 = time.time() # Add more code here ... optimizer.zero_grad() batch_inputs = batch_inputs.to(device) batch_targets = batch_targets.to(device) out = model(batch_inputs) loss_criterion = criterion(out, batch_targets) loss_criterion.backward() ############################################################################ # QUESTION: what happens here and why? ############################################################################ torch.nn.utils.clip_grad_norm(model.parameters(), max_norm=config.max_norm) ############################################################################ optimizer.step() values, indices = torch.max(out, 1) loss = loss_criterion.data[0] accuracy = ( (indices[indices == batch_targets].size())[0]) / config.batch_size # Just for time measurement t2 = time.time() examples_per_second = config.batch_size / float(t2 - t1) if step % 10 == 0: print( "[{}] Train Step {:04d}/{:04d}, Batch Size = {}, Examples/Sec = {:.2f}, " "Accuracy = {:.2f}, Loss = {:.3f}".format( datetime.now().strftime("%Y-%m-%d %H:%M"), step, config.train_steps, config.batch_size, examples_per_second, accuracy, loss)) accuracy_list += [accuracy] if len(accuracy_list) > 5: if check_converge(accuracy, accuracy_list): break if step == config.train_steps: # If you receive a PyTorch data-loader error, check this bug report: # https://github.com/pytorch/pytorch/pull/9655 break if not FIGURE: plt.figure(1) print('Done training.') x = np.arange(len(accuracy_list)) * 10 plt.plot(x, accuracy_list, 'r') plt.title('Accuracy of VanillaRNN with T={:d}'.format( config.input_length)) plt.xlabel('Steps') plt.ylabel('Accuracy') plt.hold(True) plt.show() return accuracy_list
def plotTrackingLow(trackResults, settings): fig = [] if (settings.plotTrackingNumPts > len(trackResults[0].I_P)): x_pts = [i * 0.001 for i in range(len(trackResults[0].I_P))] else: x_pts = [i * 0.001 for i in range(settings.plotTrackingNumPts)] for channelNr in settings.plotTrackingLowInds: fig.append([]) if settings.plotTrackingLowDisc: fig[channelNr].append(plt.figure()) fig[channelNr][-1].clf() plt.figtext( 0.02, 0.95, "Channel %d (PRN %d) Tracking Results : I/Q Diagram, PLL Disc, and DLL Disc" % (channelNr, trackResults[channelNr].PRN)) fig[channelNr][-1].add_subplot(3, 2, 1) plt.plot(trackResults[channelNr].I_P[0:len(x_pts)], trackResults[channelNr].Q_P[0:len(x_pts)], '.') plt.ylabel('IQ Diagram\nQuadrature') plt.xlabel('In-phase') fig[channelNr][-1].add_subplot(3, 2, 3) plt.plot(x_pts, trackResults[channelNr].pllDiscr[0:len(x_pts)], 'b.') plt.ylabel('PLL Discriminant') plt.xlabel('Time') fig[channelNr][-1].add_subplot(3, 2, 4) plt.plot(x_pts, trackResults[channelNr].pllDiscrFilt[0:len(x_pts)], 'r.') plt.ylabel('Filtered PLL Discriminant') plt.xlabel('Time') fig[channelNr][-1].add_subplot(3, 2, 5) plt.plot(x_pts, trackResults[channelNr].dllDiscr[0:len(x_pts)], 'b.') plt.ylabel('DLL Discriminant') plt.xlabel('Time') fig[channelNr][-1].add_subplot(3, 2, 6) plt.plot(x_pts, trackResults[channelNr].dllDiscrFilt[0:len(x_pts)], 'r.') plt.ylabel('Filtered DLL Discriminant') plt.xlabel('Time') if settings.plotTrackingLowCorr: fig[channelNr].append(plt.figure()) fig[channelNr][-1].clf() plt.figtext( 0.02, 0.95, "Channel %d (PRN %d) Tracking Results : Correlations" % (channelNr, trackResults[channelNr].PRN)) fig[channelNr][-1].add_subplot(2, 1, 1) plt.plot(x_pts, trackResults[channelNr].I_P[0:len(x_pts)], 'b') plt.ylabel('IP Correlation') plt.xlabel('Time') fig[channelNr][-1].add_subplot(2, 1, 2) plt.hold(True) plt.plot(x_pts,np.sqrt(np.square(trackResults[channelNr].I_E[0:len(x_pts)])\ + np.square(trackResults[channelNr].Q_E[0:len(x_pts)])),'y') plt.plot(x_pts,np.sqrt(np.square(trackResults[channelNr].I_P[0:len(x_pts)])\ + np.square(trackResults[channelNr].Q_P[0:len(x_pts)])),'b') plt.plot(x_pts,np.sqrt(np.square(trackResults[channelNr].I_L[0:len(x_pts)])\ + np.square(trackResults[channelNr].Q_L[0:len(x_pts)])),'r') plt.ylabel('Early / Prompt / Late Power') plt.xlabel('Time') plt.hold(False) return fig
def unpaired_data(): ''' Then some unpaired comparison: 24 hour total energy expenditure (MJ/day), in groups of lean and obese women''' # Get the data: energy expenditure in mJ and stature (0=obese, 1=lean) inFile = 'altman_94.txt' energ = np.genfromtxt(inFile, delimiter=',') # Group them group1 = energ[:, 1] == 0 group1 = energ[group1][:, 0] group2 = energ[:, 1] == 1 group2 = energ[group2][:, 0] np.mean(group1) np.mean(group2) # --- >>> START stats <<< --- # two-sample t-test # null hypothesis: the two groups have the same mean # this test assumes the two groups have the same variance... # (can be checked with tests for equal variance) # independent groups: e.g., how boys and girls fare at an exam # dependent groups: e.g., how the same class fare at 2 different exams t_statistic, p_value = stats.ttest_ind(group1, group2) # p_value < 0.05 => alternative hypothesis: # they don't have the same mean at the 5% significance level print(("two-sample t-test", p_value)) # For non-normally distributed data, perform the two-sample wilcoxon test # a.k.a Mann Whitney U # Watch out: the keyword "alternative" was introduced in scipy 0.17, with default"two-sided"; # before scipy 0.17, the keyword "use_continuity" was in use, with the # default "use_continuity=True", corresponding to "alternative='greater'"! if np.int(sp.__version__.split('.')[1]) > 16: u, p_value = stats.mannwhitneyu(group1, group2, alternative='two-sided') else: u, p_value = stats.mannwhitneyu(group1, group2, use_continuity=True) p_value *= 2 # because the default was a one-sided p-value print(("Mann-Whitney test", p_value)) # --- >>> STOP stats <<< --- # Plot the data plt.plot(group1, 'bx', label='obese') plt.hold(True) plt.plot(group2, 'ro', label='lean') plt.legend(loc=0) plt.show() # The same calculations, but implemented with pandas, would be: #import pandas as pd #df = pd.DataFrame(energ, columns = ['energy', 'weightClass']) #grouped = df.groupby('weightClass') #grouped.mean() #t_statistic, p_value = stats.ttest_ind(grouped.get_group(0).energy, grouped.get_group(1).energy) #grouped.energy.plot(marker='o', lw=0) #plt.legend(['obese', 'lean']) #plt.show() return p_value # should be 0.0021216133858800489
def plot_results(td, Z_prob, last_Z_prob, tau_params, beta_params, gamma_params, m_h_tau, m_h_gamma, A_corr_count, tau_autocorrelation): [trial_num, K] = gamma_params.shape Z_true = td['Z'] sample_num = len(tau_autocorrelation) plt.figure(figsize=(12, 9)) # Plot beta plt.subplot(2, 2, 1) beta_mean = np.divide(beta_params[:, 0], np.sum(beta_params, axis=1)) beta_std = np.sqrt( np.divide( np.multiply(beta_mean, beta_params[:, 1]), np.multiply(np.sum(beta_params, axis=1), np.sum(beta_params, axis=1) + 1))) plt.plot(beta_mean, 'r-') plt.hold(True) plt.fill_between(np.arange(trial_num), beta_mean - beta_std, beta_mean + beta_std, facecolor=[1, .5, .5], color=[1, .5, .5]) plt.axis([0, trial_num - 1, 0, 1]) switch_trials = np.where(td['B'])[0] plt.plot([0, trial_num], [td['beta'], td['beta']], 'r--', linewidth=2) plt.hold(False) plt.ylabel('Estimated beta parameters') # Plot tau plt.subplot(2, 2, 3) tau_mean = np.divide(tau_params[:, 0], np.sum(tau_params, axis=1)) tau_std = np.sqrt( np.divide( np.multiply(tau_mean, tau_params[:, 1]), np.multiply(np.sum(tau_params, axis=1), np.sum(tau_params, axis=1) + 1))) plt.plot(tau_mean, 'b-') plt.hold(True) plt.fill_between(np.arange(trial_num), tau_mean - tau_std, tau_mean + tau_std, facecolor=[.5, .5, 1], color=[.5, .5, 1]) # Mark switch and trap trials plt.axis([0, trial_num - 1, 0, 1]) plt.plot([0, trial_num], [td['tau'], td['tau']], 'b--', linewidth=2) plt.hold(False) plt.ylabel('Estimated tau paramaters') # Plot gamma paramaters plt.subplot(2, 2, 4) plt.imshow(gamma_params.T) plt.hold(True) plt.plot(Z_true, 'k--', linewidth=1) plt.axis([0, trial_num - 1, 0, K - 1]) plt.xlabel('trials') plt.hold(False) plt.ylabel('Estimated gamma parameters') # Plot state probability plt.subplot(2, 2, 2) plt.imshow(Z_prob.T) plt.hold(True) plt.plot(Z_true, 'w--') plt.axis([0, trial_num - 1, 0, K - 1]) plt.xlabel('trials') plt.hold(False) plt.ylabel('p(TS|past) at decision time') plt.draw() # Plot performances plt.figure(figsize=(12, 9)) #plot final performance plt.subplot(2, 2, 1) plt.plot(np.divide(A_corr_count, np.arange(trial_num) + 1), 'k-', linewidth=2) plt.hold(True) plt.axis([0, trial_num - 1, 0, 1]) plt.hold(False) plt.xlabel('trials') plt.ylabel('proportion correct answers') #plot final Z estimated plt.subplot(2, 2, 3) plt.imshow(last_Z_prob.T) plt.hold(True) plt.plot(Z_true, 'w--') plt.axis([0, trial_num - 1, 0, K - 1]) # For speed plt.hold(False) plt.xlabel('trials') plt.ylabel('p(TS|past) at current time') #plot gamma metropolis-hasting acceptance rate plt.subplot(2, 2, 2) plt.plot(m_h_gamma, 'g-') plt.hold(True) plt.plot(m_h_tau, 'b-') plt.axis([0, trial_num - 1, 0, 1]) plt.hold(False) plt.xlabel('trials') plt.ylabel('gamma(green)/tau(blue) acceptance rates') #plot gibbs autocorrelation function plt.subplot(2, 2, 4) plt.plot(tau_autocorrelation, 'k-') plt.hold(True) plt.axis([0, sample_num - 1, 0, 1]) plt.hold(False) plt.xlabel('trials') plt.ylabel('Gibbs sampler autocorrelation') plt.draw() return 'plot ok'
validation_split=.25, batch_size=128, verbose=2, nb_epoch=nb_epoch) # compute final accuracy on training and test sets pred_tr = model.predict([x_train[:, 0], x_train[:, 1]]) pred_ts = model.predict([x_test[:, 0], x_test[:, 1]]) # auc and other things tpr, fpr, _ = roc_curve(y_test, pred_ts) roc_auc = auc(fpr, tpr) plt.figure(1) plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc) plt.hold(True) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.hold(False) plt.savefig('roc_curve_face.png') thresh = .35 tr_acc = accuracy_score(y_train, (pred_tr < thresh).astype('float32')) te_acc = accuracy_score(y_test, (pred_ts < thresh).astype('float32')) print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc)) print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
def write_field(field, step): plt.hold(False) plt.imshow(field) plt.axis('off') plt.savefig('heat_{0:03d}.png'.format(step))
pmh2.runSampler(smc, data, sys, thSys, par, "PMH2") # Uncomment to export data to file #pmh0.writeToFile("results/lgss-bpf/pmh0.csv",par); #pmh1.writeToFile("results/lgss-bpf/pmh1.csv",par); #pmh2.writeToFile("results/lgss-bpf/pmh2.csv",par); ######################################################################## # Plot the results ######################################################################## plt.subplot(3, 1, 1) plt.plot(pmh0.th[:, 0], pmh0.th[:, 1], 'k') plt.xlabel("th0") plt.ylabel("th1") plt.hold("on") plt.plot(pmh0.th[:, 0], pmh0.th[:, 1], 'k.') plt.plot([sys.par[0], sys.par[0]], [0, 2], 'k:') plt.plot([0, 1], [sys.par[1], sys.par[1]], 'k:') plt.hold("off") plt.axis([0, 1, 0, 2]) plt.title("PMH0") plt.subplot(3, 1, 2) plt.plot(pmh1.th[:, 0], pmh1.th[:, 1], 'r') plt.xlabel("th0") plt.ylabel("th1") plt.hold("on") plt.plot(pmh1.th[:, 0], pmh1.th[:, 1], 'r.') plt.plot([sys.par[0], sys.par[0]], [0, 2], 'k:') plt.plot([0, 1], [sys.par[1], sys.par[1]], 'k:')
f = 1.0 mag = 5.0 randMag = 0.1 * mag deltaT = 0.01 startTime = 0 stopTime = 10 outFileClean = "OpenFOAM4x/forces_synthetic_clean.dat" outFileNoise = "OpenFOAM4x/forces_synthetic_noise.dat" timeSteps = np.linspace(startTime, stopTime, num=((stopTime - startTime) / deltaT + 1), endpoint=True, dtype='f') dataClean = mag * np.sin(2 * np.pi * f * timeSteps) dataNoise = mag * np.sin(2 * np.pi * f * timeSteps) + np.random.normal( scale=randMag, size=len(timeSteps)) plt.figure(1) plt.plot(timeSteps, dataClean, label="clean data") plt.plot(timeSteps, dataNoise, label="noisy data, noise mag = {}".format(randMag)) plt.grid() plt.legend(loc="best") plt.hold() plt.show() writeFile(outFileClean, timeSteps, dataClean) writeFile(outFileNoise, timeSteps, dataNoise)
# stats = reps(env=env, featurizer=rbf_featurizer, policy_fn=policy_fn, dual_fn=dual_fn, num_episodes=num_episodes, num_steps=num_steps, num_samples=num_samples, eta=eta, v=v, epsilon=epsilon, discounted_factor=0.95) mean_rewards[i_trails, :, i_epsilon] = stats.rewards fig = plt.figure() plt.hold("True") ax = fig.add_subplot(111) ax.set_xlabel("Iteration") ax.set_ylabel("Average reward") c = ['b', 'm', 'r'] for l in range(len(epsilon_coeffs)): r_mean = np.mean(mean_rewards[:, :, l], axis=0) r_std = np.std(mean_rewards[:, :, l], axis=0) plt.fill_between(range(num_episodes), r_mean - r_std, r_mean + r_std, alpha=0.3, color=c[l]) plt.plot(range(num_episodes), r_mean,
@author: Meowasaurus """ from matplotlib.pyplot import (figure, hold, plot, title, xlabel, ylabel, colorbar, imshow, xticks, yticks, show) from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix import Porject2_KNN as KNN bestIndex = KNN.bestIndex print(bestIndex) figure(1) hold(True) styles = ['.b', '.r', '.g', '.y'] for c in range(C): class_mask = (y_train == c) plot(X_train[class_mask, 0], X_train[class_mask, 1], styles[c]) # K-nearest neighbors K = bestIndex # Distance metric (corresponds to 2nd norm, euclidean distance). # You can set dist=1 to obtain manhattan distance (cityblock distance). dist = 2 # Fit classifier and classify the test points knclassifier = KNeighborsClassifier(n_neighbors=K, p=dist)
def evaluateFit(A, B, Q, C, d, R, y, u, Ext, Extxt, Extxtm1, LLs): """ TO BE EXTENDED """ T = y.shape[1] Trial = y.shape[2] xDim = A.shape[0] yDim = C.shape[0] uDim = u.shape[0] Pi_h = np.array([sp.linalg.solve_discrete_lyapunov(A, Q)])[0, :, :] Pi_t_h = np.dot(A.transpose(), Pi_h) dataCov = np.cov(y[:, 0:T - 1, 0], y[:, 1:T, 0]) covyy = dataCov[np.ix_(np.arange(0, yDim), np.arange(0, yDim))] covyy_m1 = dataCov[np.ix_(np.arange(0, yDim), np.arange(yDim, 2 * yDim))] plt.figure(1) cmap = matplotlib.cm.get_cmap('brg') clrs = [cmap(i) for i in np.linspace(0, 1, xDim)] for i in range(xDim): plt.subplot(xDim, 1, i) plt.plot(x[i, :, 0], color=clrs[i]) plt.hold(True) if (np.mean(np.square(x[i, :, 0] - Ext_h[i, :, 0])) < np.mean( np.square(x[i, :, 0] + Ext_h[i, :, 0]))): plt.plot(Ext_h[i, :, 0], color=clrs[i], ls=':') else: plt.plot(-Ext_h[i, :, 0], color=clrs[i], ls=':') m = np.min([Pi_h.min(), covyy.min()]) M = np.max([Pi_h.max(), covyy.max()]) plt.figure(1) plt.subplot(1, 3, 1) plt.imshow(np.dot(np.dot(C_h, Pi_h), C_h.transpose()) + R_h, interpolation='none') plt.title('cov_hat(y_t,y_t)') plt.clim(m, M) plt.subplot(1, 3, 2) plt.imshow(covyy, interpolation='none') plt.title('cov_emp(y_t,y_t)') plt.clim(m, M) plt.subplot(1, 3, 3) plt.imshow(np.dot(np.dot(C, Pi), C.transpose()) + R, interpolation='none') plt.title('cov_true(y_t,y_t)') plt.clim(m, M) plt.figure(3) m = np.min([covyy_m1.min(), Pi_t_h.min()]) M = np.max([covyy_m1.max(), Pi_t_h.max()]) plt.subplot(1, 3, 1) plt.imshow(np.dot(np.dot(C_h, Pi_t_h), C_h.transpose()), interpolation='none') plt.title('cov_hat(y_t,y_{t-1})') plt.clim(m, M) plt.subplot(1, 3, 2) plt.imshow(covyy_m1, interpolation='none') plt.title('cov(y_t,y_{t-1})') plt.clim(m, M) plt.subplot(1, 3, 3) plt.imshow(np.dot(np.dot(C, Pi_t), C.transpose()), interpolation='none') plt.title('cov_true(y_t,y_{t-1})') plt.clim(m, M) plt.figure(4) plt.plot(np.sort(np.linalg.eig(A)[0]), 'r') plt.hold(True) plt.plot(np.sort(np.linalg.eig(A_h)[0]), 'b') plt.legend(['true', 'est'])
def SMC2(td, show_progress=True, numberOfStateSamples=1000, numberOfThetaSamples=1000, coefficient = .5, beta_softmax=None): print('Constant Volatility Model') print('number of theta samples ' + str(numberOfThetaSamples)); print('\n') #Start timer start_time_multi = time.time() # Extract parameters from task description stimuli = td['S'] # Sequence of Stimuli Z_true = td['Z'] # Sequence of Task Sets numberOfActions = td['action_num'] # Number of Actions possible numberOfStimuli = td['state_num'] # Number of states or stimuli K = np.prod(np.arange(numberOfActions+1)[-numberOfStimuli:]) # Number of possible Task Sets numberOfTrials = len(Z_true) # Number of Trials # Sampling and prior settings betaPrior = np.array([1, 1]) # Prior on Beta, the feedback noise parameter tauPrior = np.array([1, 1]) # Prior on Beta, the volatility parameter gammaPrior = np.ones(K) # Prior on Gamma, the Dirichlet parameter # Mapping from task set to correct action per stimulus mapping = get_mapping.Get_TaskSet_Stimulus_Mapping(state_num=numberOfStimuli, action_num=numberOfActions).T # Probabilities of every actions updated at every time step -> Used to take the decision actionLikelihood = np.zeros(numberOfActions) # For 1 observation, likelihood of the action. Requires a marginalisation over all task sets actions = np.zeros(numberOfTrials) - 1 rewards = np.zeros(numberOfTrials, dtype=bool) # Keep track of probability correct/exploration after switches countPerformance = np.zeros(numberOfTrials) # Number of correct actions after i trials countExploration = np.zeros(numberOfTrials) # Number of exploratory actions after i trials correct_before_switch = np.empty(0) # The correct task set before switch tsProbability = np.zeros([numberOfTrials, K]) acceptanceProba = 0. volTracking = np.zeros(numberOfTrials) volStdTracking = np.zeros(numberOfTrials) betaTracking = np.zeros(numberOfTrials) betaStdTracking = np.zeros(numberOfTrials) time_list = [start_time_multi] # SMC particles initialisation betaSamples = np.random.beta(betaPrior[0], betaPrior[1], numberOfThetaSamples) tauSamples = np.random.beta(tauPrior[0], tauPrior[1], numberOfThetaSamples) gammaSamples = np.random.dirichlet(gammaPrior, numberOfThetaSamples) logThetaWeights = np.zeros(numberOfThetaSamples) currentSamples = np.zeros([numberOfThetaSamples, numberOfStateSamples], dtype=np.intc) ancestorSamples = np.zeros([numberOfThetaSamples, numberOfStateSamples], dtype=np.intc) weightsList = np.ones([numberOfThetaSamples, numberOfStateSamples])/numberOfStateSamples essList = np.zeros(numberOfTrials) tasksetLikelihood = np.zeros(K) # variable for speed-up ancestorsIndexes = np.zeros(numberOfStateSamples, dtype=np.intc) gammaAdaptedProba = np.zeros(K) likelihoods = np.zeros(K) positiveStates = np.zeros(K, dtype=np.intc) # Guided SMC variables dirichletParamCandidates = np.zeros(K) # Plot progress if show_progress : plt.figure(figsize=(12,9)); plt.ion(); # Loop over trials for T in range(numberOfTrials): # Print progress if (T+1) % 10 == 0 : sys.stdout.write(' ' + str(T+1)); sys.stdout.flush(); time_list.append(time.time() - start_time_multi); if (T+1) % 100 == 0: print ('\n') smc_c.bootstrapUpdateStep_c(currentSamples, logThetaWeights, gammaSamples, betaSamples/2. + 1/2., tauSamples/2., T, ancestorSamples, weightsList, \ np.ascontiguousarray(mapping), stimuli[T-1], actions[T-1], rewards[T-1], ancestorsIndexes, gammaAdaptedProba, likelihoods, positiveStates, 0) ancestorSamples[:] = np.array(currentSamples) # Degeneray criterion logEss = 2 * useful_functions.log_sum(logThetaWeights) - useful_functions.log_sum(2 * logThetaWeights) essList[T] = np.exp(logEss) # Move step normalisedThetaWeights = useful_functions.to_normalized_weights(logThetaWeights) if (essList[T] < coefficient * numberOfThetaSamples): acceptanceProba = 0. tauMu = np.sum(normalisedThetaWeights * tauSamples) tauVar = np.sum(normalisedThetaWeights * (tauSamples - tauMu)**2) tauAlpha = ((1 - tauMu)/tauVar - 1/tauMu) * tauMu**2 tauBeta = tauAlpha * (1/tauMu - 1) assert(tauAlpha > 0); assert(tauBeta > 0) betaMu = np.sum(normalisedThetaWeights*betaSamples) betaVar = np.sum(normalisedThetaWeights * (betaSamples - betaMu)**2) betaAlpha = ((1 - betaMu)/betaVar - 1/betaMu) * betaMu**2 betaBeta = betaAlpha * (1/betaMu - 1) assert(betaAlpha > 0); assert(betaBeta > 0) dirichletMeans = np.sum(normalisedThetaWeights*gammaSamples.T, axis=1) dirichletVar = np.sum(normalisedThetaWeights*(gammaSamples**2).T, axis=1) - dirichletMeans**2 dirichletPrecision = np.sum(dirichletMeans - dirichletMeans**2)/(np.sum(dirichletVar)) - 1 dirichletParamCandidates = np.maximum(dirichletMeans * dirichletPrecision, 1.) assert((dirichletParamCandidates>0).all()) tauSamples = np.random.beta(tauAlpha, tauBeta, numberOfThetaSamples) betaSamples = np.random.beta(betaAlpha, betaBeta, numberOfThetaSamples) gammaSamples = np.random.dirichlet(dirichletParamCandidates, numberOfThetaSamples) logThetaWeights[:] = 0 normalisedThetaWeights = useful_functions.to_normalized_weights(logThetaWeights) # Take decision for ts_idx in range(K): tsProbability[T, ts_idx] = np.sum(normalisedThetaWeights * np.sum((currentSamples == ts_idx), axis = 1)) if beta_softmax is None: # Compute action likelihood for action_idx in range(numberOfActions): actionLikelihood[action_idx] = np.sum(tsProbability[T, mapping[stimuli[T]] == action_idx]) # Select action actions[T] = np.argmax(actionLikelihood) else: # Compute action likelihood tsProbability[T] /= sum(tsProbability[T]) for action_idx in range(numberOfActions): actionLikelihood[action_idx] = np.exp(np.log(np.sum(tsProbability[T, mapping[stimuli[T].astype(int)] == action_idx])) * beta_softmax) actionLikelihood /= sum(actionLikelihood) # Select action actions[T] = np.where(np.random.multinomial(1, actionLikelihood, size=1)[0])[0][0] # Select action and compute vol volTracking[T] = np.sum(normalisedThetaWeights * tauSamples) volStdTracking[T] = np.sum(normalisedThetaWeights * (tauSamples - volTracking[T])**2) betaTracking[T] = np.sum(normalisedThetaWeights * betaSamples) betaStdTracking[T] = np.sum(normalisedThetaWeights * (betaSamples - betaTracking[T])**2) # Update performance if K == 2: assert(mapping[stimuli[T].astype(int), Z_true[T].astype(int)] == Z_true[T]) if (K == 2) and (actions[T] == mapping[stimuli[T].astype(int), Z_true[T].astype(int)]): rewards[T] = not td['trap'][T] countPerformance[T:] += 1 elif (K == 24) and (actions[T] == td['A_correct'][T]): rewards[T] = not td['trap'][T] countPerformance[T:] += 1 else: rewards[T] = td['trap'][T] if show_progress: plt.subplot(3,2,1) plt.imshow(tsProbability[:T].T, aspect='auto'); plt.hold(True) plt.plot(Z_true[:T], 'w--') plt.axis([0, T-1, 0, K-1]) plt.hold(False) plt.xlabel('trials') plt.ylabel('p(TS|past) at current time') plt.subplot(3,2,2) plt.plot(volTracking[:T], 'b'); plt.hold(True) plt.fill_between(np.arange(T),volTracking[:T]-volStdTracking[:T], volTracking[:T]+volStdTracking[:T],facecolor=[.5,.5,1], color=[.5,.5,1]); plt.plot(td['tau'], 'b--', linewidth=2) plt.axis([0, T-1, 0, .5]) plt.hold(False) plt.xlabel('trials') plt.ylabel('Volatility') plt.subplot(3,2,3) x = np.linspace(0.01,.99,100) plt.plot(x, normlib.pdf(x, betaTracking[T], betaStdTracking[T]), 'r'); plt.hold(True) plt.plot([betaTracking[T], betaTracking[T]], plt.gca().get_ylim(),'r', linewidth=2) plt.plot([td['beta'], td['beta']], plt.gca().get_ylim(), 'r--', linewidth=2) plt.hold(False) plt.xlabel('Parameters') plt.ylabel('Gaussian pdf') plt.subplot(3,2,4) plt.plot(np.arange(T)+1, essList[:T], 'g', linewidth=2); plt.hold(True) plt.plot(plt.gca().get_xlim(), [coefficient*numberOfThetaSamples,coefficient*numberOfThetaSamples], 'g--', linewidth=2); plt.axis([0,T-1,0,numberOfThetaSamples]); plt.hold(False); plt.xlabel('trials'); plt.ylabel('ESS'); plt.subplot(3, 2, 5); plt.plot(np.divide(countPerformance[:T], np.arange(T)+1), 'k--', linewidth=2); plt.hold(True) plt.axis([0,T-1,0,1]); plt.hold(False); plt.xlabel('Trials'); plt.ylabel('Performance'); plt.draw() plt.show() plt.pause(0.1) elapsed_time = time.time() - start_time_multi return [td, tauSamples, volTracking, volStdTracking, betaSamples, betaTracking, betaStdTracking, gammaSamples, tsProbability, countPerformance, actions, essList, time_list, elapsed_time]
exp_mat *= A_discrete U, E, VH = np.linalg.svd(M_sqrt * exp_mat * inv_M_sqrt) if max_sing_val < E[0]: max_sing_val = E[0] optimal_dist = np.mat(VH).H[:, 0] #print i, E[0], max_sing_val B = -inv_M_sqrt * optimal_dist C = np.mat(np.exp(-((x - x_s) / s)**2)) * M A_adj = inv_M * A.H * M C_adj = inv_M * C.H # Plot spatial distributions of B and C if plots: PLT.figure() PLT.hold(True) PLT.plot(x, B.real, 'b') PLT.plot(x, B.imag, 'r') PLT.xlabel('x') PLT.ylabel('B') PLT.legend(['Real', 'Imag']) PLT.xlim([-20, 20]) PLT.grid(True) PLT.figure() PLT.hold(True) PLT.plot(x, C.T.real, 'b') PLT.plot(x, C.T.imag, 'r') PLT.xlabel('x') PLT.ylabel('C') PLT.legend(['Real', 'Imag'])
def plot(self, frame, true_pos=None, true_angle=None, fname=None, fig=None, verbose=False): """ """ x, y, angle, angle_w, cam = self.predict(frame, verbose=verbose) if fig is None: fig = plt.figure(frameon=False) ax = fig.add_axes([0, 0, 1, 1]) ax.imshow(frame) im_h, im_w = frame.shape[:2] plt.hold(True) ax.imshow(cam, cmap=plt.cm.jet, alpha=0.3, interpolation='bilinear') if not np.isnan(angle): ax.plot(x, y, 'o', ms=5, mec=[1, 0.6, 0.3], mfc='none', mew=1) ax.plot(x, y, 'o', ms=20, mec=[1, 0.6, 0.3], mfc='none', mew=1) x1, y1 = get_max_gaze_line(angle, x, y, im_w, im_h, units='deg') ax.plot([x, x1], [y, y1], '-', color=[1, 0.6, 0.2], lw=2, label='argmax') x1, y1 = get_max_gaze_line(angle_w, x, y, im_w, im_h, units='deg') ax.plot([x, x1], [y, y1], '-', color=[1, 0.3, 0.0], lw=2, label='weighted') else: ax.plot(x, y, 'o', ms=20, mfc='w', mec='w', lw=2) if not true_pos is None: # Maximum possible error given x, y max_xerr, max_yerr = max(x, im_w - x), max(y, im_h - y) max_err = np.sqrt(max_xerr**2 + max_yerr**2) error = im_h * np.sqrt((x - true_pos['x'])**2 + (y - true_pos['y'])**2) / max_err # Note that x,y gets replaced so that true_angle will be drawn # starting at true_pos instead of predicted pos. x, y = true_pos['x'], true_pos['y'] ax.plot(x, y, 'o', ms=5, mec='g', mfc='none', mew=1) ax.plot(x, y, 'o', ms=20, mec='g', mfc='none', mew=1) # draw position error as a bar to the right ax.plot([im_w - 4, im_w - 4], [0, error], '-', c='r', lw=4) if not true_angle is None: x1, y1 = get_max_gaze_line(true_angle, x, y, im_w, im_h, units='deg') ax.plot([x, x1], [y, y1], '-', color=[.3, 1., 0.], lw=2, label='True') error_w = im_h * np.abs(anglediff(true_angle, angle_w, 'deg')) / 180 error = im_h * np.abs(anglediff(true_angle, angle, 'deg')) / 180 # Draw orientation error as a bar to the left ax.plot([4, 4], [0, error], '-', c=[1, .6, .2], lw=4) ax.plot([11, 11], [0, error_w], '-', c=[1, .3, 0.], lw=4) ax.set_xlim([0, im_w]) ax.set_ylim([0, im_h]) ax.set_xticks([]) ax.set_yticks([]) #ax.legend() if not fname is None: fig.savefig(fname) plt.close(fig)
buffer_idx = 0 # collect 32 packets to fill the window for n in range(32): # Read the next packet from the network data = random.getrandbits(24) # Extract 16 channel 1 samples from the packet for m in range(16): offset = m * 3 * 8 # The 3 bytes of each sample arrive in reverse order sample = (ord(data[offset + 2]) << 16) sample += (ord(data[offset + 1]) << 8) sample += ord(data[offset]) # Store sample to signal buffer signal_buffer[buffer_idx] = sample buffer_idx += 1 # Calculate DFT ("sp" stands for spectrum) sp = numpy.fft.fft(signal_buffer) sp[0] = 0 # eliminate DC component # Plot spectrum print("Plotting data") plt.plot(sp.real) plt.hold(False) plt.show() # Close socket s.close()
def run_main(): np.set_printoptions(linewidth=150) filename = '/home/kroegert/local/Results/AccGPSFuse/mystream_6_10_12_54_5.csv' #filename = '/home/till/zinc/local/Results/AccGPSFuse/mystream_6_10_12_54_5.csv' #filename = '/home/kroegert/local/Results/AccGPSFuse/mystream_6_11_10_42_32.csv' #filename = '/home/till/zinc/local/Results/AccGPSFuse/mystream_6_11_10_55_10_phoneonback.csv' #filename = '/home/till/zinc/local/Results/AccGPSFuse/mystream_6_11_10_57_31_phoneonfront.csv' #filename = '/home/kroegert/local/Results/AccGPSFuse/mystream_6_11_11_29_28_phoneturn.csv' #filename = '/home/kroegert/local/Results/AccGPSFuse/mystream_6_15_10_59_5_walkcircle.csv' #filename = '/home/kroegert/local/Results/AccGPSFuse/mystream_6_15_11_24_41_phonevertcircle.csv' #filename = '/home/kroegert/local/Results/AccGPSFuse/mystream_6_15_16_12_46_phonesidewaysslide.csv' data_gps, data_accel, data_gyro, data_orient, data_linacc, data_rotvec, data_grav = func_parse_imugps_cvs( filename) # Android coordinate system: When a device is held in its default orientation, the X axis is horizontal and points to the right, the Y axis is vertical and points up, # and the Z axis points toward the outside of the screen face. In this system, coordinates behind the screen have negative Z values. # *** Determine start and end of valid lin.accel and gyroscope data t_start_track = np.maximum(data_linacc[0][0], data_gyro[0][0]) t_end_track = np.minimum(data_linacc[0][0], data_gyro[0][0]) # *** GPS to cartesian # TODO: Note, Android world system spanned by vectors: gravity+direction to magnetic north, GPScartesian: world axis and arbitrary x/y plane testpt = data_gps[1][:, 0:3] # testpt[:,1] : longitude, should be x axis in ref. system, # testpt[:,0] : latitude, should be y axis in ref. system, GPS_xyz = np.vstack( (np.sin(np.radians(testpt[:, 0])) * np.sin(np.radians(testpt[:, 1])) * (testpt[:, 2] + 6371000), np.sin(np.radians(testpt[:, 0])) * (testpt[:, 2] + 6371000), np.sin(np.radians(testpt[:, 0])) * np.cos(np.radians(testpt[:, 1])) * (testpt[:, 2] + 6371000))).transpose() GPS_xyz -= GPS_xyz[testpt.shape[0] / 2, :][None, :] # *** normalize gravity vector to unit norm data_grav = (data_grav[0], data_grav[1] / np.linalg.norm(data_grav[1], axis=1)[:, None]) # *** prepare camera orientation from rotvec data rotgt_org = np.zeros((len(data_rotvec[0]), 3, 3), dtype=float) for i in xrange( len(data_rotvec[0]) ): # Plot cameras from absolute orientation estimation (directly from sensor) rotgt_org[i, :, :] = func_android_rotM_from_rotvec( data_rotvec[1][i, :], True) # *** estimate camera orientation from rotational acceleration data rotestim = func_android_rotM_from_gyroscope(data_gyro[0], data_gyro[1], dosvd=True) #rotestim,p = func_spline_orientation_smooth(data_gyro[0],rotestim) # smooth orientation #rotestim_ = func_spline_orientation_interpolate(data_gyro[0][0:-1:40], rotestim[0:-1:40,:,:], data_gyro[0]) #[func_comp_rot(rotestim[i,:,:], rotestim_new[i,:,:]) for i in xrange(rotestim_new.shape[0])] # *** compute camera trajectory from lin.accel and gyroscopic data rotestim_linacc = func_spline_orientation_interpolate( data_gyro[0], rotestim, data_linacc[0]) # fit gyroscopic orientation data to lin.accel data # fit GT camera orientation to lin.acc. data rotgt = func_spline_orientation_interpolate(data_rotvec[0], rotgt_org, data_linacc[0]) linaccabs = np.zeros((data_linacc[0].shape[0], 3), dtype=float) # linear acceleration in world frame for i in xrange(data_linacc[0].shape[0]): linaccabs[i, :] = np.dot(np.linalg.inv( rotgt[i, :, :]), data_linacc[1][i, :]) # use fused rotation data tck = scpint.splrep(data_linacc[0], linaccabs[:, 0], s=0.0) # x acceleration spline tck = scpint.splantider(tck, 2) # double integral -> x displacement x = scpint.splev(data_linacc[0], tck) tck = scpint.splrep(data_linacc[0], linaccabs[:, 1], s=0.0) # y acceleration spline tck = scpint.splantider(tck, 2) # double integral -> y displacement y = scpint.splev(data_linacc[0], tck) tck = scpint.splrep(data_linacc[0], linaccabs[:, 2], s=0.0) # z acceleration spline tck = scpint.splantider(tck, 2) # double integral -> z displacement z = scpint.splev(data_linacc[0], tck) posaccum = np.stack((x, y, z), axis=1) # posaccum = np.zeros((rotestim_linacc.shape[0],3), dtype=float ) # R0 = func_android_rotM_from_rotvec(data_rotvec[1][0,:], True) # for i in xrange(data_linacc[0].shape[0]-2): # #R = np.dot(R0,rotestim_linacc[i,:,:]) # use GT location from first frame to align initial camera # R = func_android_rotM_from_rotvec(data_rotvec[1][i,:], True) # use absolute orientation data to add linear acceleration # #posaccum[i+1,:] = posaccum[i,:] + np.dot(np.linalg.inv(R), data_linacc[1][i,:]) # *** estimate camera path from linear acceleration data # tck,u=scpint.splprep(data_linacc[1].transpose().tolist(),u=data_linacc[0], s=0.0) # interpolation spline over actual sensor data # # #p = func_smoothing_spline_crossvalp(data_linacc[0][1000:1500],data_linacc[1][1000:1500,:], crossvalperc = 0.1, crrounds = 100, verbosity=1) # #linaccsm, LL, p = func_smoothing_spline(data_linacc[0],data_linacc[1],p) # smooth acceleration # #tck,u=scpint.splprep(linaccsm.transpose().tolist(),u=data_linacc[0], s=0.0) # interpolation spline over smoothed sensor data # #linaccsm = np.array(scpint.splev(data_gps[0],tck)).transpose() ### Plot camera path wh = np.array([1280.0, 960.0]) fc = np.array([900.0, 900.0]) cc = np.array([0.0, 0.0]) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot(posaccum[5:, 0], posaccum[5:, 1], posaccum[5:, 2], color=np.array([0.0, 1.0, 0.0]), linewidth=2.0) func_set_axes_equal(ax) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') plt.show() ### Plot camera orientation path, and gravity direction wh = np.array([1280.0, 960.0]) fc = np.array([900.0, 900.0]) cc = np.array([0.0, 0.0]) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') t = np.array([1.0, 10.0, 100.0]) for i in xrange( 0, len(data_rotvec[0]), 50 ): # Plot cameras from absolute orientation estimation (directly from sensor) R = rotgt_org[ i, :, :] #func_android_rotM_from_rotvec(data_rotvec[1][i,:], True) tc = -np.dot(R, t) # position in camera centric coordinate frame func_plot_cameras(ax, fc, cc, wh, R, tc, rgbface=np.array([1.0, 0.0, 0.0]), camerascaling=2.0, lw=2.0) # # for i in xrange(0,len(data_grav[0]),40): # Plot gravity vectors # R = func_android_rotM_from_rotvec(data_rotvec[1][i,:], True) # g_vec = data_grav[1][i,:] # gravity vector (in camera reference frame) # g_vec_rot = np.dot(R,g_vec[:]) # gravity vector (in world ref. frame, uses estimated camera orientation) # # ax.plot([t[0], t[0]+g_vec[0]], [t[1], t[1]+g_vec[1]], [t[2], t[2]+g_vec[2]], color=np.array([0.0, 0.0, 1.0]), linewidth=2.0) # ax.plot([t[0], t[0]+g_vec_rot[0]], [t[1], t[1]+g_vec_rot[1]], [t[2], t[2]+g_vec_rot[2]], color=np.array([0.0, 1.0, 0.0]), linewidth=2.0) # for i in xrange(0,data_gyro[0].shape[0],10): # Plot cameras from tracked orientation (accumulated gyroscope data) # R0 = func_android_rotM_from_rotvec(data_rotvec[1][0,:], True) # R = np.dot(R0,rotestim[i,:,:]) # use GT location from first frame to align initial camera # tc = -np.dot(R, t ) # position in camera centric coordinate frame # func_plot_cameras(ax, fc,cc,wh,R,tc,rgbface=np.array([0.0, 0.0, 1.0]),camerascaling=2.0,lw=4.0) #g_vec = data_grav[1][0,:] #ax.plot([t[0], t[0]+g_vec[0]], [t[1], t[1]+g_vec[1]], [t[2], t[2]+g_vec[2]], color=np.array([0.0, 1.0, 0.0]), linewidth=4.0) #g_vec = data_grav[1][-1,:] #ax.plot([t[0], t[0]+g_vec[0]], [t[1], t[1]+g_vec[1]], [t[2], t[2]+g_vec[2]], color=np.array([1.0, 0.0, 1.0]), linewidth=4.0) func_set_axes_equal(ax) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') plt.show() ### Plot GPS path m = data_gps[0].shape[0] testpt = data_gps[1][:, 0:3] #p = func_smoothing_spline_crossvalp(data_gps[0],data_gps[1][:,0:2], crossvalperc = 0.1, crrounds = 10, verbosity=1) testptsm, LL, p = func_smoothing_spline(data_gps[0], data_gps[1][:, 0:3], p) tck, u = scpint.splprep(testptsm.transpose().tolist(), u=data_gps[0], s=0.0) # new homogeneous samples t_new = np.linspace(data_gps[0][0], data_gps[0][-1], 500) testptsm = np.array(scpint.splev(t_new, tck)).transpose() plt.scatter(testpt[:, 1], testpt[:, 0], 24, color='blue') plt.scatter(testpt[0, 1], testpt[0, 0], 24, color='green') plt.plot(testpt[:, 1], testpt[:, 0], 24, '-', color='b') plt.plot(testptsm[:, 1], testptsm[:, 0], '-', color='r', markersize=24) plt.hold(True) plt.axis([ np.min(testpt[:, 1]), np.max(testpt[:, 1]), np.min(testpt[:, 0]), np.max(testpt[:, 0]) ]) plt.show() # 3D, display cartesian fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(GPS_xyz[:, 0], GPS_xyz[:, 1], GPS_xyz[:, 2], color=np.array([0.0, 0.0, 1.0]), linewidth=2.0) ax.scatter(GPS_xyz[0, 0], GPS_xyz[0, 1], GPS_xyz[0, 2], color=np.array([0.0, 1.0, 0.0]), linewidth=2.0) func_set_axes_equal(ax) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') plt.show()
from pylearn2.utils import serial path = 'test_err_comp.pkl' d = serial.load(path) x = d['x'] ys = d['ys'] ys = zip(*ys) model_paths = d['model_paths'] model_paths = [path.split('/')[-1].replace('.pkl', '') for path in model_paths] from matplotlib import pyplot pyplot.hold(True) for y, path in zip(ys, model_paths): pyplot.plot(x, y, label=path) pyplot.legend(loc='upper left') pyplot.xlabel('probability of dropping an input unit') pyplot.ylabel('misclass') print 'showing' pyplot.show()
return sq2(a)*sq2(b) - dot2(a,b)**2 def sq2(u): return dot2(u, u) cc = cross2(sq2(a) * b - sq2(b) * a, a, b) / (2*ncross2(a, b)) + C # Grab the Voronoi edges vc = cc[:,tri.neighbors] vc[:,tri.neighbors == -1] = np.nan # edges at infinity, plotting those would need more work... lines = [] lines.extend(zip(cc.T, vc[:,:,0].T)) lines.extend(zip(cc.T, vc[:,:,1].T)) lines.extend(zip(cc.T, vc[:,:,2].T)) # Plot it import matplotlib.pyplot as plt from matplotlib.collections import LineCollection lines = LineCollection(lines, edgecolor='k') plt.hold(1) plt.plot(points[:,0], points[:,1], '.') plt.plot(cc[0], cc[1], '*') plt.gca().add_collection(lines) plt.axis('equal') plt.xlim(-0.1, 1.1) plt.ylim(-0.1, 1.1) plt.show()
New_train_X.append(line1) ############################################################################# #print X #y = np.sin(X).ravel() #print y ############################################################################### # Add noise to targets #y[::5] += 3 * (0.5 - np.random.rand(8)) #print y ############################################################################### # Fit regression model svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1) #svr_lin = SVR(kernel='linear', C=1e3) #svr_poly = SVR(kernel='poly', C=1e3, degree=2) y_rbf = svr_rbf.fit(parent, target).predict(New_train_X) #y_lin = svr_lin.fit(X, y).predict(X) #y_poly = svr_poly.fit(X, y).predict(X) ############################################################################### # look at the results plt.scatter(parent, target, c='k', label='data') plt.hold('on') plt.plot(New_train_X, y_rbf, c='g', label='RBF model') #plt.plot(X, y_lin, c='r', label='Linear model') #plt.plot(X, y_poly, c='b', label='Polynomial model') plt.xlabel('data') plt.ylabel('target') plt.title('Support Vector Regression') plt.legend() plt.show()
pmedian = allpval[(len(allpval)-1)/2] else: pmedian = 0.5*(allpval[len(allpval)/2-1]+allpval[len(allpval)/2]) # lambda is a reserved word in python gclambda = stats.chi2.isf(pow(10, -pmedian), 1)/stats.chi2.isf(0.5, 1) logging.info('Median P = ' + ("%.4f" % pow(10, -pmedian)) + ' lambda = ' + ("%.4f" % gclambda)) logging.info('Generating QQ plot [ ' + options.output + '/qq.png' + ' ]') timebegin = datetime.datetime.now() plt.figure(figsize = [6, 6]) plt.axis([0, math.ceil(pmax + 0.5), 0, math.ceil(pmax + 0.5)]) #plt.setp(plt.gca(), 'xticks', midpt.values()) #plt.setp(plt.gca(), 'xticklabels', midpt.keys()) plt.tick_params(bottom = 'on', left = 'on', top = 'off', right = 'off', direction='out') plt.hold(1) # do we need this? plt.plot([0, math.ceil(pmax + 0.5)], [0, math.ceil(pmax + 0.5)], 'r-') plt.text(0.5*math.ceil(pmax + 0.5), 0.1, 'lambda = ' + ("%.4f" % gclambda), ha = 'center', va = 'baseline') this = plt.plot(-np.log10((np.array(range(len(allpval))) + 0.5)/len(allpval)), allpval[::-1], '.') # stride -1 to reverse plt.setp(this, 'color', [0, 0, 0], 'marker', '.', 'markeredgewidth', 0) plt.xlabel('Expected -log10(p)') plt.ylabel('Observed -log10(p)') plt.savefig(options.output + '/qq.png', dpi=200, bbox_inches = 'tight') timeend = datetime.datetime.now() logging.info('Generating QQ plot took ' + str((timeend-timebegin).seconds) + ' seconds') logging.info('Generating Manhattan plot [ ' + options.output + '/manhattan.png' + ' ]') timebegin = datetime.datetime.now()
def make_plots_for_paper_script(plot_type, **plot_options): """ Makes plots for the core shell nanowire paper""" #file_list=os.listdir(XRAY_DIRECTORY) # First we want to plot all of the diffractions before and after overcoating # For at least one sample. diffraction_list = ['002', '004', '006', '104', '105', '205'] before_sample = 'B982' after_sample = 'GN97' #Values computed using compare fit script, need to check GN84 (used GN84+GN84b) # Should this be the normalized difference in GN84 and GN84b? # Error has now been changed to 2 sigma or 120ppm which ever is bigger before_after = {'B982': 'GN97', 'B738': 'GN84', 'C023': 'GN169'} c_lattice = { 'B982': .518459, 'GN97': .51846, 'B738': .518432, 'GN84': .51844, 'GN84b': .51844, 'C023': .518494, 'GN169': .518536 } c_lattice_error = { 'B982': .00003, 'GN97': .00003, 'B738': .00003, 'GN84': .00003, 'GN84b': .00005, 'C023': .00003, 'GN169': .00007 } a_lattice = { 'B982': .31888, 'GN97': .31893, 'B738': .318984, 'GN84': .319177, 'GN84b': .318933, 'C023': .31905, 'GN169': .31888 } a_lattice_error = { 'B982': .00004, 'GN97': .00004, 'B738': .00003, 'GN84': .0002, 'GN84b': .0003, 'C023': .0001, 'GN169': .00007 } #c Lattice plot if plot_type in ['c_lattice_plot', 'c']: defalt_size = 48 axis_label_size = 48 params = { 'axes.labelsize': defalt_size, 'text.fontsize': defalt_size, 'legend.fontsize': defalt_size, 'xtick.labelsize': defalt_size, 'ytick.labelsize': defalt_size } matplotlib.rcParams.update(params) f = plt.figure(1) plot_axes = plt.axes() plt.xlabel(r"Sample", fontsize=axis_label_size) plt.ylabel(r"$\mathit{c}$ Lattice (nm)", fontsize=axis_label_size) plt.axhspan(0.51845, 0.51865, facecolor='0.25', alpha=0.25) plt.hold(1) def x_name(i): if (i + 1) % 2 is 0: return 'Core-Shell' else: return 'Core' i = 0 x_names = [] for key, value in before_after.iteritems(): i = 2 * before_after.keys().index(key) + 1 x = [i, i + 1] x_names.insert(i, key) x_names.insert(i + 1, value) print key, x y = [c_lattice[key], c_lattice[value]] error = [c_lattice_error[key], c_lattice_error[value]] a = plt.errorbar(x, y, error, fmt='s', lw=4) plot_axes.yaxis.set_major_formatter( ticker.ScalarFormatter(useOffset=False)) abc = 'a a b b c c'.split(' ') #x_labels=[x_name(i)+' ('+abc+')' for i,abc in enumerate(abc)] x_labels = x_names x_labels.insert(0, '') plot_axes.set_xticklabels(x_labels, fontsize=axis_label_size) plt.show() if plot_type in ['a_lattice_plot', 'a']: defalt_size = 32 axis_label_size = 32 params = { 'axes.labelsize': defalt_size, 'text.fontsize': defalt_size, 'legend.fontsize': defalt_size, 'xtick.labelsize': defalt_size, 'ytick.labelsize': defalt_size } matplotlib.rcParams.update(params) f = plt.figure(1) plot_axes = plt.axes() plt.xlabel(r"Sample", fontsize=axis_label_size) plt.ylabel(r"$\mathit{a}$ Lattice (nm)", fontsize=axis_label_size) # reference rectangle -- Porowski JCG 1998, Lo plt.axhspan(0.31876, 0.31894, facecolor='0.25', alpha=0.25) plt.hold(1) def x_name(i): if (i + 1) % 2 is 0: return 'Core-Shell' else: return 'Core' i = 0 x_names = [] for key, value in before_after.iteritems(): i = 2 * before_after.keys().index(key) + 1 x = [i, i + 1] x_names.insert(i, key) x_names.insert(i + 1, value) print key, x y = [a_lattice[key], a_lattice[value]] error = [a_lattice_error[key], a_lattice_error[value]] a = plt.errorbar(x, y, error, fmt='s', lw=4) plot_axes.yaxis.set_major_formatter( ticker.ScalarFormatter(useOffset=False)) abc = 'a a b b c c'.split(' ') #x_labels=[x_name(i)+' ('+abc+')' for i,abc in enumerate(abc)] x_labels = x_names x_labels.insert(0, '') plot_axes.set_xticklabels(x_labels, fontsize=axis_label_size) plt.show() if plot_type in ['before_after_two_theta', 'two_theta']: params = { 'axes.labelsize': 32, 'text.fontsize': 32, 'legend.fontsize': 32, 'xtick.labelsize': 32, 'ytick.labelsize': 32 } matplotlib.rcParams.update(params) f = plt.figure(1) plot_axes = plt.axes() plt.xlabel(r"2 $\theta-\omega$", fontsize=38) plt.ylabel("Normalized Intensity (Arb.)", fontsize=38) try: before_data = XrayData(plot_options['before']) after_data = XrayData(plot_options['after']) except: raise plt.plot(before_data.angle, before_data.normalized_counts, lw='4', color='b') plt.hold(1) plt.plot(after_data.angle, after_data.normalized_counts, lw='4', color='r') h = before_data.h k = before_data.k l = before_data.l sample_1 = before_data.sample sample_2 = after_data.sample #plt.title('Diffraction (%s %s %s) of Samples: %s (blue), %s (red)'%(h,k,l,sample_1,sample_2), #fontsize=32) plot_axes.xaxis.set_major_formatter( ticker.ScalarFormatter(useOffset=False)) try: plt.xlim((before_data.peak() - plot_options['angle_span'], before_data.peak() + plot_options['angle_span'])) plt.ylim((-.01, 1.01)) except: raise plt.show()
def plot_fn(self, itr): # The rest is some example plotting code. # Plotting code is useful for visualizing trajectories across a few different tasks. if True and itr in PLOT_ITRS and self.env.observation_space.shape[ 0] == 2: # point-mass logger.log("Saving visualization of paths") for ind in range(min(5, self.meta_batch_size)): plt.clf() plt.plot(self.goals_to_use_dict[itr][ind][0], self.goals_to_use_dict[itr][ind][1], 'k*', markersize=10) plt.hold(True) preupdate_paths = all_paths_for_plotting[0] postupdate_paths = all_paths_for_plotting[-1] pre_points = preupdate_paths[ind][0]['observations'] post_points = postupdate_paths[ind][0]['observations'] plt.plot(pre_points[:, 0], pre_points[:, 1], '-r', linewidth=2) plt.plot(post_points[:, 0], post_points[:, 1], '-b', linewidth=1) pre_points = preupdate_paths[ind][1]['observations'] post_points = postupdate_paths[ind][1]['observations'] plt.plot(pre_points[:, 0], pre_points[:, 1], '--r', linewidth=2) plt.plot(post_points[:, 0], post_points[:, 1], '--b', linewidth=1) pre_points = preupdate_paths[ind][2]['observations'] post_points = postupdate_paths[ind][2]['observations'] plt.plot(pre_points[:, 0], pre_points[:, 1], '-.r', linewidth=2) plt.plot(post_points[:, 0], post_points[:, 1], '-.b', linewidth=1) plt.plot(0, 0, 'k.', markersize=5) plt.xlim([-0.8, 0.8]) plt.ylim([-0.8, 0.8]) plt.legend(['goal', 'preupdate path', 'postupdate path']) plt.savefig( osp.join( logger.get_snapshot_dir(), 'prepost_path' + str(ind) + '_' + str(itr) + '.png')) print( osp.join( logger.get_snapshot_dir(), 'prepost_path' + str(ind) + '_' + str(itr) + '.png')) elif True and itr in PLOT_ITRS and self.env.observation_space.shape[ 0] == 8: # 2D reacher logger.log("Saving visualization of paths") # def fingertip(env): # while 'get_body_com' not in dir(env): # env = env.wrapped_env # return env.get_body_com('fingertip') for ind in range(min(5, self.meta_batch_size)): plt.clf() print("debug13,", itr, ind) a = self.goals_to_use_dict[itr][ind] plt.plot(self.goals_to_use_dict[itr][ind][0], self.goals_to_use_dict[itr][ind][1], 'k*', markersize=10) plt.hold(True) preupdate_paths = all_paths_for_plotting[0] postupdate_paths = all_paths_for_plotting[-1] pre_points = np.array([ obs[6:8] for obs in preupdate_paths[ind][0]['observations'] ]) post_points = np.array([ obs[6:8] for obs in postupdate_paths[ind][0]['observations'] ]) plt.plot(pre_points[:, 0], pre_points[:, 1], '-r', linewidth=2) plt.plot(post_points[:, 0], post_points[:, 1], '-b', linewidth=1) pre_points = np.array([ obs[6:8] for obs in preupdate_paths[ind][1]['observations'] ]) post_points = np.array([ obs[6:8] for obs in postupdate_paths[ind][1]['observations'] ]) plt.plot(pre_points[:, 0], pre_points[:, 1], '--r', linewidth=2) plt.plot(post_points[:, 0], post_points[:, 1], '--b', linewidth=1) pre_points = np.array([ obs[6:8] for obs in preupdate_paths[ind][2]['observations'] ]) post_points = np.array([ obs[6:8] for obs in postupdate_paths[ind][2]['observations'] ]) plt.plot(pre_points[:, 0], pre_points[:, 1], '-.r', linewidth=2) plt.plot(post_points[:, 0], post_points[:, 1], '-.b', linewidth=1) plt.plot(0, 0, 'k.', markersize=5) plt.xlim([-0.25, 0.25]) plt.ylim([-0.25, 0.25]) plt.legend(['goal', 'preupdate path', 'postupdate path']) plt.savefig( osp.join( logger.get_snapshot_dir(), 'prepost_path' + str(ind) + '_' + str(itr) + '.png')) print( osp.join( logger.get_snapshot_dir(), 'prepost_path' + str(ind) + '_' + str(itr) + '.png')) if self.make_video and itr in VIDEO_ITRS: logger.log("Saving videos...") self.env.reset(reset_args=self.goals_to_use_dict[itr][ind]) video_filename = osp.join( logger.get_snapshot_dir(), 'post_path_%s_%s.gif' % (ind, itr)) rollout(env=self.env, agent=self.policy, max_path_length=self.max_path_length, animated=True, speedup=2, save_video=True, video_filename=video_filename, reset_arg=self.goals_to_use_dict[itr][ind], use_maml=True, maml_task_index=ind, maml_num_tasks=self.meta_batch_size) elif self.make_video and itr in VIDEO_ITRS: for ind in range(min(2, self.meta_batch_size)): logger.log("Saving videos...") self.env.reset(reset_args=self.goals_to_use_dict[itr][ind]) video_filename = osp.join(logger.get_snapshot_dir(), 'post_path_%s_%s.gif' % (ind, itr)) rollout(env=self.env, agent=self.policy, max_path_length=self.max_path_length, animated=True, speedup=2, save_video=True, video_filename=video_filename, reset_arg=self.goals_to_use_dict[itr][ind], use_maml=True, maml_task_index=ind, maml_num_tasks=self.meta_batch_size, extra_input_dim=self.extra_input_dim) self.policy.switch_to_init_dist() for ind in range(min(2, self.meta_batch_size)): logger.log("Saving videos...") self.env.reset(reset_args=self.goals_to_use_dict[itr][ind]) video_filename = osp.join(logger.get_snapshot_dir(), 'pre_path_%s_%s.gif' % (ind, itr)) rollout( env=self.env, agent=self.policy, max_path_length=self.max_path_length, animated=True, speedup=2, save_video=True, video_filename=video_filename, reset_arg=self.goals_to_use_dict[itr][ind], use_maml=False, extra_input_dim=self.extra_input_dim, # maml_task_index=ind, # maml_num_tasks=self.meta_batch_size ) elif False and itr in PLOT_ITRS: # swimmer or cheetah logger.log("Saving visualization of paths") for ind in range(min(5, self.meta_batch_size)): plt.clf() goal_vel = self.goals_to_use_dict[itr][ind] plt.title('Swimmer paths, goal vel=' + str(goal_vel)) plt.hold(True) prepathobs = all_paths_for_plotting[0][ind][0]['observations'] postpathobs = all_paths_for_plotting[-1][ind][0][ 'observations'] plt.plot(prepathobs[:, 0], prepathobs[:, 1], '-r', linewidth=2) plt.plot(postpathobs[:, 0], postpathobs[:, 1], '--b', linewidth=1) plt.plot(prepathobs[-1, 0], prepathobs[-1, 1], 'r*', markersize=10) plt.plot(postpathobs[-1, 0], postpathobs[-1, 1], 'b*', markersize=10) plt.xlim([-1.0, 5.0]) plt.ylim([-1.0, 1.0]) plt.legend(['preupdate path', 'postupdate path'], loc=2) plt.savefig( osp.join( logger.get_snapshot_dir(), 'swim1d_prepost_itr' + str(itr) + '_id' + str(ind) + '.pdf'))
def perfprof(data, linespecs=None, linewidth=2.0, thmax=None, thlabel=r'$\theta$', plabel='$p$', tol=np.double(1e-8), legendnames=None, legendpos=0, fontsize=24, tickfontsize=18, legendfontsize=18, ppfix=False, ppfixmin=np.double(1e-18), ppfixmax=np.finfo(np.double).eps / 2, usetex=None): r""" Plot a performance profile. Make a performance profile of the array *data*. A performance profile is an alternative to a scatter plot when we compare multiple series of data and are looking for the set of values which is smallest on average. Typically this would be comparing alternative algorithms for a problem in terms of their runtime or relative error over a set of test cases. The x-axis represents a tolerance factor, whilst the y-axis is a proportion. If a line passes through the point (2, 0.8) then the corresponding data set was within a factor 2 of the smallest observed value on 80% of the test cases. If the line first reaches y=1 at the point (10.5, 1) then this data set was always within a factor 10.5 of the smallest value observed in each case. See the references for additional detail. This code is based upon perfprof from the MATLAB Guide by D. J. Higham and N. J. Higham. The original code can be downloaded here: http://www.maths.man.ac.uk/~higham/mg/m/perfprof.m Parameters ---------- data - Array of timings/errors to plot. The rows of the array must be the different test cases whilst the columns are the different algorithms to compare. linespecs - List of line specifications, e.g. ['r-', 'k:'] linewidth - Width of the lines. thmax - Maximum value of theta shown on the x-axis. If None then thmax defaults to the point where all algorithms reach 1 on the y-axis. thlabel - Rename theta on the x-axis label. plabel - Rename p on the y-axis label. tol - Tolerance on the x-coordinates to ensure plots fit on the graph. legendnames - Labels for the lines, used to create a legend. If None the legend is not created. legendpos - Position of the legend. fontsize - Font size for the x and y-axis labels. tickfontsize - Font size for the x and y-axis tick labels. legendfontsize - Font size for the legend text. ppfix - Modify the data to avoid tiny results skewing the performance profile. Performs a linear interpolation on small data points to avoid skewing the results. Useful for plotting performance profiles of relative errors: see reference [2]. ppfixmin - The smallest possible data point after manipulation by ppfix. ppfixmax - Data points below this value will be modified by ppfix. usetex - Use LaTeX for all the labels in the plot. References ---------- [1] E.D. Dolan, and J. J. More, Benchmarking Optimization Software with Performance Profiles. Math. Programming, 91:201-213, 2002. [2] N. J. Dingle, and N. J. Higham, Reducing the Influence of Tiny Normwise Relative Errors on Performance Profiles. ACM Trans. Math. Software, 39(4):24:1-24:11, 2013. """ usetexorig = plt.rcParams['text.usetex'] if usetex is not None: try: plt.rc('text', usetex=usetex) except e: print('Problem changing use of LaTeX.') data = np.asarray(data).astype(np.double) if ppfix: data = np.array(data >= ppfixmax, dtype=np.int) * data + \ np.array(data < ppfixmax, dtype=np.int) * \ (ppfixmin + data*(ppfixmax - ppfixmin)/ppfixmax) minvals = np.min(data, axis=1) if thmax is None: thmax = np.max(np.max(data, axis=1) / minvals) m, n = data.shape # m tests cases, n alternatives if len(linespecs) != n: raise ValueError("Length of argument linespecs must equal " "number of columns in the input data.") if legendnames is not None: if len(legendnames) != n: raise ValueError("Length of argument legendnames must " "equal number of columns in input data.") plt.figure() for alt in range(n): # for each alternative col = data[:, alt] / minvals # performance ratio col = col[~np.isnan(col)] # remove nans if len(col) == 0: continue theta = np.unique(col) #theta = np.sort(col) r = len(theta) myarray = np.repeat(col, r).reshape(len(col), r) <= \ np.repeat(theta, len(col)).reshape((len(col), r), order='F') myarray = np.array(myarray, dtype=np.double) prob = np.sum(myarray, axis=0) / m # Get points to print staircase plot k = np.array(np.floor(np.arange(0, r, 0.5)), dtype=np.int) x = theta[k[1:]] y = prob[k[0:-1]] # check endpoints if x[0] >= 1 + tol: x = np.append([1, x[0]], x) y = np.append([0, 0], y) if x[-1] < thmax - tol: x = np.append(x, thmax) y = np.append(y, y[-1]) # plot current line plt.hold('on') if legendnames is None: plt.plot(x, y, linespecs[alt], linewidth=linewidth) else: plt.plot(x, y, linespecs[alt], linewidth=linewidth, label=legendnames[alt]) # set labels and ticks plt.xlabel(thlabel, fontsize=fontsize) plt.ylabel(plabel, fontsize=fontsize) plt.tick_params(labelsize=tickfontsize) # create legend plt.legend(loc=legendpos, fontsize=legendfontsize) # set xlim plt.xlim([1, thmax]) plt.ylim([0, 1.01]) plt.tight_layout() plt.hold('off') plt.draw() if usetex is not None: try: plt.rc('text', usetex=usetexorig) except e: print('Problem changing use of LaTeX.')
def main(): '''Define the main function. ''' # Create a sine-wave t = np.arange(0, 10, 0.1) x = np.sin(t) # Save the data in a text-file, in column form # The formatting is a bit clumsy: data are by default row variables; so to # get a matrix, you stack the two rows above each other, and then transpose # the matrix outFile = 'test.txt' np.savetxt(outFile, np.vstack([t, x]).T) # Read the data into a different variable inData = np.loadtxt(outFile) t2 = inData[:, 0] # Note that Python starts at "0"! x2 = inData[:, 1] # Plot the data, and wait for the user to click plt.show() plt.plot(t2, x2) plt.title('Hit any key to continue') plt.waitforbuttonpress() # Generate a noisy line t = np.arange(-100, 100) # use a Python "dictionary" for named variables par = {'offset': 100, 'slope': 0.5, 'noiseAmp': 4} x = par['offset'] + par['slope'] * t + par['noiseAmp'] * sp.randn(len(t)) # Select "late" values, i.e. with t>10 xHigh = x[t > 10] tHigh = t[t > 10] # Plot the "late" data plt.close() plt.plot(tHigh, xHigh) # Determine the best-fit line # To do so, you have to generate a matrix with "time" in the first # column, and a column of "1" in the second column: xMat = np.vstack((tHigh, np.ones(len(tHigh)))).T slope, intercept = np.linalg.lstsq(xMat, xHigh)[0] # Show and plot the fit, and save it to a PNG-file with a medium resolution. # The "modern" way of Python-formatting is used plt.hold(True) plt.plot(tHigh, intercept + slope * tHigh, 'r') plt.title('Hit any key to continue') plt.savefig('linefit.png', dpi=200) plt.waitforbuttonpress() plt.close() print(('Fit line: intercept = {0:5.3f}, and slope = {1:5.3f}'.format( intercept, slope))) #raw_input('Thanks for using programs by Thomas!') # If you want to know confidence intervals, best switch to "pandas" # Note that this is an advanced topic, and requires new data structures # such ad "DataFrames" and "ordinary-least-squares" or "ols-models". import pandas myDict = {'x': tHigh, 'y': xHigh} df = pandas.DataFrame(myDict) model = pandas.ols(y=df['y'], x=df['x']) print(model)
left015Freqs = summary_tuning['d1pi015_left'] left016Freqs = summary_tuning['d1pi016_left'] right014Freqs = summary_tuning['d1pi014_right'] right015Freqs = summary_tuning['d1pi015_right'] right016Freqs = summary_tuning['d1pi016_right'] left014FreqsSessions = summary_tuning['d1pi014_left_sessions'] left015FreqsSessions = summary_tuning['d1pi015_left_sessions'] left016FreqsSessions = summary_tuning['d1pi016_left_sessions'] right014FreqsSessions = summary_tuning['d1pi014_right_sessions'] right015FreqsSessions = summary_tuning['d1pi015_right_sessions'] right016FreqsSessions = summary_tuning['d1pi016_right_sessions'] allLeftFreqs = np.concatenate([left014Freqs, left015Freqs]) allRightFreqs = np.concatenate([right015Freqs, right016Freqs]) plt.hold('True') randOffset = 0.3 * (np.random.rand(len(allLeftFreqs)) - 0.5) #ax1.plot(1+randOffset, allLeftFreqs, 'o', mec=PHOTOSTIMCOLORS['laser_left'], mfc='None') ax1.plot(1 + randOffset, allLeftFreqs, 'o', mec='k', mfc='None') randOffset = 0.3 * (np.random.rand(len(allRightFreqs)) - 0.5) #ax1.plot(2+randOffset, allRightFreqs, 'o', mec=PHOTOSTIMCOLORS['laser_right'], mfc='None') ax1.plot(2 + randOffset, allRightFreqs, 'o', mec='k', mfc='None') meanLeftFreq = np.mean(allLeftFreqs) meanRightFreq = np.mean(allRightFreqs) #ax1.plot(0.3*np.array([-1,1])+1, 100*np.tile(meanLeftFreq,2), lw=3, color=PHOTOSTIMCOLORS['laser_left']) ax1.plot(0.3 * np.array([-1, 1]) + 1, 100 * np.tile(meanLeftFreq, 2), lw=3, color='k') #ax1.plot(0.3*np.array([-1,1])+2, 100*np.tile(meanRightFreq,2), lw=3, color=PHOTOSTIMCOLORS['laser_right'])
def plotsta(ich, sta_ich, neighborpixels_pos, spkcnt_ich, dt_sta_graphics, time_bins,figpath,spksrc,\ figformat,maxsta_idx,dt_sta,dur_sta, no_neighbpix_x,locc, locc_, minc, locr, locr_, minr, popt_on,mappedrf_on,\ minsta_idx, popt_off, mappedrf_off): fntsz = 12 darkyellow = [255./255,200./255,0./255]# [238./255,238./255,0] x = np.linspace(0, no_neighbpix_x-1, no_neighbpix_x) y = np.linspace(0, no_neighbpix_x-1, no_neighbpix_x) x, y = np.meshgrid(x, y) plt.figure(figsize=(8,6)) plt.subplot(221) plt.plot(sta_ich[:,neighborpixels_pos], color='grey') STASHAPES = sta_ich[:,neighborpixels_pos] #sure? plt.hold('on') # plt.plot(sta_ich[:,maxsta_idx[1]], color='r') # plt.title('STA - '+ 'spkcnt ' + str(spkcnt_ich) , fontsize=10) plt.xlabel('Time (s)', fontsize=fntsz) plt.ylabel('Pixel brightness', fontsize=fntsz) plt.xticks(np.arange(0, len(sta_ich[:,0]), 1/dt_sta_graphics), time_bins) plt.ylim([0,1]) polish_fig(plt.gca(),xlabcoord=-.15,vis_rightaxis=True, vis_leftaxis=False, vis_topaxis=False, vis_bottomaxis=True, xtick_pos='bottom', ytick_pos='right',xlabel_pos='bottom', ylabel_pos='right',boldvline=False) plt.subplots_adjust(hspace=.4) # Plotting firing rate******* #TODO plt.subplot(222) plt.axis('off') plt.subplot(223) img_on = sta_ich[maxsta_idx[0],neighborpixels_pos] # sta_f = scipy.io.loadmat('/Users/sahar/Documents/spktanlz/data/retina/P38_06Mar14/ret2/Luiz/neuron_789_maxsta_SA2.mat', squeeze_me=True) # img_on = np.hstack(sta_f['mean_sta'].transpose().reshape([27*27,1])) plt.title('Positive peak (t = '+ str(int((maxsta_idx[0]*dt_sta - dur_sta)*1000)) + ' ms)' , fontsize=10, color='k') plt.imshow(np.reshape(img_on, [no_neighbpix_x, no_neighbpix_x]), cmap=pylab.cm.gray, interpolation='none') POSFIG = np.reshape(img_on, [no_neighbpix_x, no_neighbpix_x]) POSPEAK = int((maxsta_idx[0]*dt_sta - dur_sta)*1000) POSCENTRE = [locc-minc-.5, locr-minr-.5] plt.text(locc-minc-.5, locr-minr-.5,'+',color='r',horizontalalignment='center',verticalalignment='center', fontsize=fntsz) if popt_on.any(): data_fitted_on = Gaussian2D_bivariate((x, y), *popt_on) if data_fitted_on.any(): plt.contour(x, y, data_fitted_on.reshape(no_neighbpix_x, no_neighbpix_x), [Gaussian2D_bivariate((popt_on[0]+popt_on[3], popt_on[1]+popt_on[4]), *popt_on)], colors='k', linewidths=2) plt.text(popt_on[0],popt_on[1],'+',color='k',horizontalalignment='center',verticalalignment='center', fontsize=fntsz) if mappedrf_on: plt.title('Positive peak (t = '+ str(int((maxsta_idx[0]*dt_sta - dur_sta)*1000)) + ' ms)' , fontsize=fntsz, color=darkyellow) plt.contour(x, y, data_fitted_on.reshape(no_neighbpix_x, no_neighbpix_x), [Gaussian2D_bivariate((popt_on[0]+popt_on[3], popt_on[1]+popt_on[4]), *popt_on)], colors='orange', linewidths=2) plt.text(popt_on[0],popt_on[1],'+',color='orange',horizontalalignment='center',verticalalignment='center', fontsize=fntsz) plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off') plt.tick_params(axis='y', which='both', right='off', left='off', labelleft='off') plt.subplot(224) img_off = sta_ich[minsta_idx[0],neighborpixels_pos] plt.title('Negative peak (t = '+ str(int((minsta_idx[0]*dt_sta - dur_sta)*1000)) + ' ms)' , fontsize=fntsz) plt.imshow(np.reshape(img_off, [no_neighbpix_x, no_neighbpix_x]), cmap=pylab.cm.gray, interpolation='none') NEGFIG = np.reshape(img_off, [no_neighbpix_x, no_neighbpix_x]) NEGPEAK = int((minsta_idx[0]*dt_sta - dur_sta)*1000) NEGCENTRE = [locc-minc-.5, locr-minr-.5] plt.text(locc-minc-.5, locr-minr-.5,'+',color='r',horizontalalignment='center',verticalalignment='center', fontsize=fntsz) if popt_off.any(): data_fitted_off = Gaussian2D_bivariate((x, y), *popt_off) if data_fitted_off.any(): plt.contour(x, y, data_fitted_off.reshape(no_neighbpix_x, no_neighbpix_x), [Gaussian2D_bivariate((popt_off[0]+popt_off[3], popt_off[1]+popt_off[4]), *popt_off)], colors='k', linewidths=2) plt.text(popt_off[0], popt_off[1],'+',color='k',horizontalalignment='center',verticalalignment='center', fontsize=fntsz) # pdb.set_trace() if mappedrf_off: plt.title('Negative peak (t = '+ str(int((minsta_idx[0]*dt_sta - dur_sta)*1000)) + ' ms)' , color=tableau[20],fontsize=fntsz) plt.contour(x, y, data_fitted_off.reshape(no_neighbpix_x, no_neighbpix_x), [Gaussian2D_bivariate((popt_off[0]+popt_off[3], popt_off[1]+popt_off[4]), *popt_off)], colors='b', linewidths=2) plt.text(popt_off[0], popt_off[1],'+',color=tableau[20],horizontalalignment='center',verticalalignment='center', fontsize=fntsz) plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off') plt.tick_params(axis='y', which='both', right='off', left='off', labelleft='off') plt.suptitle(spksrc+ str(ich)+ ' - loc (' + "%0.1f" % locr_ + ',' + "%0.1f" % locc_ + ')', fontsize=fntsz, y=.99) figname = os.path.join(figpath,'sta_'+spksrc+str(ich)+'_spkcnt'+str(spkcnt_ich)+'dur'+str(dur_sta)+'dt'+str(dt_sta)+figformat) plt.savefig(figname, bbox_inches='tight') print 'figure saved to:', figname plt.close() filename = os.path.join(figpath,'sta_'+spksrc+str(ich)+'_spkcnt'+str(spkcnt_ich)+'dur'+str(dur_sta)+'dt'+str(dt_sta)+'.hdf5') FL = h5py.File(filename) FL.create_dataset('negfig',data=NEGFIG) FL.create_dataset('posfig',data=POSFIG) FL.create_dataset('negpeak',data=NEGPEAK) FL.create_dataset('pospeak',data=POSPEAK) FL.create_dataset('poscentre',data=POSCENTRE) FL.create_dataset('negcentre',data=NEGCENTRE) FL.create_dataset('sta',data=STASHAPES) FL.close()
def run(self): measurementset = self.input.v('measurementset') data = self.input.getKeysOf('experimentdata') calib_param = ny.toList(self.input.v('calibration_parameter')) label = ny.toList(self.input.v('label')) unit_temp = ny.toList(self.input.v('unit')) unit = [] for u in unit_temp: if u == '-': unit.append('') else: unit.append('($' + u + '$)') if len(calib_param) == 1: param_range = [[]] elif len(calib_param) == 2: param_range = [[], []] else: raise KnownError( 'ManualCalibration not implemented for calibration with more than 3 parameters.' ) # inital loop to determine the parameter ranges for i, dat in enumerate(data): dc = self.input.v('experimentdata', dat) for j in range(0, len(calib_param)): param_range[j].append(dc.v(calib_param[j])) for j in range(0, len(calib_param)): param_range[j] = sorted(list(set(param_range[j]))) # second loop to determine the values per parameter setting cost_range = np.nan * np.zeros([len(l) for l in param_range] + [2]) for dat in data: dc = self.input.v('experimentdata', dat) index = [ l.index(dc.v(calib_param[i])) for i, l in enumerate(param_range) ] L = dc.v('grid', 'high', 'x') H0 = dc.n('grid', 'high', 'z', x=0) dc.merge(self.input.slice(measurementset)) x_obs = dc.v(measurementset, 'x_waterlevel') / L x_ext = np.zeros(len(x_obs) + 2) x_ext[1:-1] = x_obs x_ext[0] = 0 x_ext[-1] = 1 zeta_obs = dc.v(measurementset, 'zeta', x=x_obs, z=0, f=[1, 2]) # Start fix # zeta_obs = dc.data[measurementset]['zeta'].im_self.dataContainer.data['value'][:,1:3] #fix # from copy import deepcopy #fix # newgrid = deepcopy(dc.data['zeta0']['tide'].im_self.dataContainer.data['grid']['outputgrid']) #fix # i = 0 # while i is not None: # try: # keys = dc.data['zeta'+str(i)].keys() # for key in keys: # if i<2: # dc.data['zeta'+str(i)][key].im_self.dataContainer.data['grid'] = newgrid # else: # keys2 =dc.data['zeta'+str(i)][key].keys() # for key2 in keys2: # dc.data['zeta'+str(i)][key][key2].im_self.dataContainer.data['grid'] = newgrid # i += 1 # except: # i = None # End fix zeta_mod = 0 i = 0 while True: if dc.v('zeta' + str(i), x=x_obs, z=0, f=[1, 2]) is not None: zeta_mod += dc.v('zeta' + str(i), x=x_obs, z=0, f=[1, 2]) i += 1 else: break if i > 3: break cost_range[tuple(index) + (0, )] = cost_function_DJ96( x_ext, zeta_obs[:, 0], zeta_mod[:, 0]) cost_range[tuple(index) + (1, )] = cost_function_DJ96( x_ext, zeta_obs[:, 1], zeta_mod[:, 1]) st.configure() # 1D plots if len(calib_param) == 1: try: minlocM2 = [ param_range[0][np.where( cost_range[:, 0] == np.min(cost_range[:, 0]))[0]] ] minlocM4 = [ param_range[0][np.where( cost_range[:, 1] == np.min(cost_range[:, 1]))[0]] ] except: minlocM2 = [np.nan] minlocM4 = [np.nan] print 'Minumim $M_2$: ' print calib_param[0] + ' ' + str(minlocM2[0]) print 'Minumim $M_4$: ' print calib_param[0] + ' ' + str(minlocM4[0]) if self.input.v('axis') == 'log': axis = np.log10(param_range[0]) label = '$log_{10}$($' + label[0] + '$)' + unit[0] minlocM2 = np.log10(minlocM2) minlocM4 = np.log10(minlocM4) else: axis = param_range[0] label = '$' + calib_param[0] + '$' + unit[0] plt.figure(1, figsize=(1, 1)) plt.plot(axis, cost_range[:, 0], 'k.') plt.plot(minlocM2[0], np.min(cost_range[:, 0]), 'ro') plt.xlabel(label) plt.ylabel('Cost $M_2$') plt.yticks([], []) plt.ylim(0, max(cost_range[:, 0])) plt.figure(2, figsize=(1, 1)) plt.plot(axis, cost_range[:, 1], 'k.') plt.plot(minlocM4[0], np.min(cost_range[:, 1]), 'ro') plt.xlabel(label) plt.ylabel('Cost $M_4$') plt.yticks([], []) plt.ylim(0, max(cost_range[:, 1])) # 2D plots elif len(calib_param) == 2: try: minlocM2 = [ param_range[0][np.where( cost_range[:, :, 0] == np.min(cost_range[:, :, 0]))[0]], param_range[1][np.where( cost_range[:, :, 0] == np.min(cost_range[:, :, 0]))[1]] ] minlocM4 = [ param_range[0][np.where( cost_range[:, :, 1] == np.min(cost_range[:, :, 1]))[0]], param_range[1][np.where( cost_range[:, :, 1] == np.min(cost_range[:, :, 1]))[1]] ] except: minlocM2 = [np.nan, np.nan] minlocM4 = [np.nan, np.nan] print 'Minumim $M_2$: ' print calib_param[0] + ' ' + str(minlocM2[0]) print calib_param[1] + ' ' + str(minlocM2[1]) print 'Minumim $M_4$: ' print calib_param[0] + ' ' + str(minlocM4[0]) print calib_param[1] + ' ' + str(minlocM4[1]) if self.input.v('axis') == 'log': axis1 = np.log10(param_range[0]) axis2 = np.log10(param_range[1]) label1 = '$log_{10}$($' + label[0] + '$)' + unit[0] label2 = '$log_{10}$($' + label[1] + '$)' + unit[1] minlocM2 = [np.log10(i) for i in minlocM2] minlocM4 = [np.log10(i) for i in minlocM4] else: axis1 = param_range[0] axis2 = param_range[1] label1 = '$' + label[0] + '$' + unit[0] label2 = '$' + label[1] + '$' + unit[1] plt.figure(1, figsize=(1, 1)) plt.hold(True) plt.contourf(axis1, axis2, np.transpose(cost_range[:, :, 0]), 30) plt.plot(minlocM2[0], minlocM2[1], 'ro') # plt.plot(axis1, np.log10(0.5*10**axis2*H0), 'r') plt.xlim(min(axis1), max(axis1)) plt.ylim(min(axis2), max(axis2)) # plt.plot(np.log10(0.003), np.log10(0.061), 'yo') # best Scheldt calibration # plt.plot(np.log10(0.098), np.log10(0.019), 'yo') # best Ems1981 calibration plt.title('Cost $M_2$') plt.xlabel(label1) plt.ylabel(label2) #plt.colorbar() plt.figure(2, figsize=(1, 1)) plt.hold(True) plt.plot(minlocM4[0], minlocM4[1], 'ro') plt.contourf(axis1, axis2, np.transpose(cost_range[:, :, 1]), 30) # plt.plot(axis1, np.log10(0.5*10**axis2*H0), 'r') plt.xlim(min(axis1), max(axis1)) plt.ylim(min(axis2), max(axis2)) plt.title('Cost $M_4$') plt.xlabel(label1) plt.ylabel(label2) #plt.colorbar() st.show() d = {} return d