def run(is_tile): m, xmin, xmax, ymin, ymax = mdp1() fig = plt.gcf() if is_tile: f = lf.TileEncoding(m, 5, 1, 14) else: f = lf.RadialBasis(m, 200, 2) pi, f_exp, theta = lf.qlearn3(m, f, num_episodes=500) # pp = pprint.PrettyPrinter() # pp.pprint(theta) heatmap = gen_heatmap(f_exp, f, xmin, xmax, ymin, ymax, 0.1, 0.1) # heatmap = gen_heatmap_2(f, xmin, xmax, ymin, ymax, 0.05, 0.05) # print heatmap plt.clf() cax = plt.imshow(heatmap, origin="lower") hmin, hmax = np.min(heatmap), np.max(heatmap) cbar = plt.colorbar(cax, ticks=[hmin, 0, hmax]) # m.draw(fig) x = np.linspace(m.dimensions[0][0], m.dimensions[0][1], 20) y = np.linspace(m.dimensions[1][0], m.dimensions[1][1], 20) x, y = zip(*itertools.product(list(x), list(y))) to_v = lambda x: (-1, 0) if x == "left" else\ (1, 0) if x == "right" else\ (0, 1) if x == "up" else\ (0, -1) vx, vy = zip(*[to_v(pi((x1, y1))) for x1, y1 in zip(x, y)]) x = [10*x_i for x_i in x] y = [10*y_i for y_i in y] plt.quiver(x, y, vx, vy) plt.show()
def arrowsSequence(X,Y,color=None): ''' Draws an arrows sequence from 2d coordinates ''' if color==None: color=['b','g','k'] # print(type(X[0])==list) if type(X[0])==list: plt.figure() plt.hold(True) for i in range(len(X)): x=np.array(X[i]) y=np.array(Y[i]) plt.plot(x[0],y[0],'ro') plt.plot(x,y,color=color[i]) plt.quiver(x[:-1], y[:-1], x[1:]-x[:-1], y[1:]-y[:-1], scale_units='xy', angles='xy', scale=1) plt.axis((0,1020,0,800)) plt.show() else: x=np.array(X) y=np.array(Y) plt.figure() plt.hold(True) plt.plot(x[0],y[0],'r') plt.plot(x,y,'b') plt.quiver(x[:-1], y[:-1], x[1:]-x[:-1], y[1:]-y[:-1], scale_units='xy', angles='xy', scale=1) plt.axis((0,1020,0,800)) plt.show()
def __call__(self,u,v,w,iteration): q = 4 plt.cool() if self.x == None: ny = v.shape[1] nz = v.shape[0] self.x,self.y = np.meshgrid(range(ny),range(nz)) x,y = self.x,self.y if self.iterations == None: self.iterations = self.sim.bulk_calc(getIteration()) all_itr = self.iterations if self.xvar == None: class temp(sim_operation): def get_params(self): return ["u"] def __call__(self,u): return np.max(self.sim.ddx(u)) self.xvar = self.sim.bulk_calc(temp()) xvar_series = self.xvar min = np.min(xvar_series) max = np.max(xvar_series) if min <= 0: min = 0.000001 if max <= min: max = 0.00001 avgu = np.average(u,2) avgv = np.average(v,2) avgw = -np.average(w,2) xd = self.sim.ddx(u) xd2d = np.max(xd,2) xd1d = np.max(xd2d,1) plt.subplot(221) plt.imshow(avgu) plt.quiver(x[::q,::q],y[::q,::q],avgv[::q,::q],avgw[::q,::q]) plt.title('Avg u') plt.axis("tight") plt.subplot(222) plt.imshow(xd2d) plt.title('Max x Variation (y-z)') plt.axis("tight") plt.subplot(223) plt.plot(xd1d) plt.title('Max x Variation (z)') plt.axis("tight") plt.subplot(224) plt.plot(all_itr,xvar_series, '--') plt.plot([iteration,iteration],[min,max]) plt.semilogy() plt.title('Max x Variation (t)') plt.axis("tight")
def work_on_area(t, do_plot=True): if do_plot: plt.figure(figsize=(4,4)) lst = [] for i in range(N): for j in range(N): ff[i,j] = np.std(ens([s[t], h[t]], [k_arg[i],k_arg[j]]) - m_fc[t]) lst.append([k_arg[i],k_arg[j],ff[i,j]]) lst = np.array(lst) ff_min = np.min(ff) ff_sel = lst[lst[:,2].flatten() < ff_min*1.1][:,0:2].transpose() min_x = k_arg[np.argmin(ff) // ff.shape[1]] min_y = k_arg[np.argmin(ff) % ff.shape[1]] pp.fit(ff_sel.transpose()) if do_plot: plt.contour(k_arg, k_arg, ff.transpose() - np.min(ff)) plt.plot(ff_sel[0], ff_sel[1], 'o') plt.plot(min_x, min_y, 'or') plt.quiver([min_x, min_x], [min_y, min_y], pp.components_[:, 0].flatten() * pp.explained_variance_, pp.components_[:, 1].flatten() * pp.explained_variance_, scale=1.0) plt.xlim(lims) plt.ylim(lims) return np.concatenate(([min_x, min_y],pp.components_.flatten(),pp.explained_variance_))
def _decorate_contour_segment(data, stride=1, options={}, tomax=True, labelled=False, outline=None, aspect=1): default_options = {'scale': 0.2, 'scale_units': 'dots', 'headaxislength': 2, 'headlength': 2, 'headwidth': 2, 'minshaft': 1, 'units': 'dots', #'angles': 'xy', 'edgecolor': outline, 'linewidth': 0 if outline is None else 0.2 } default_options.update(options) x = data[::stride,0] y = data[::stride,1] sign = 1 if tomax else -1 dx = -sign*np.diff(y)*aspect dy = sign*np.diff(x) l = np.sqrt(dx**2+dy**2) dx /= l dy /= l x = 0.5*(x+np.roll(x,-1)) y = 0.5*(y+np.roll(y,-1)) if labelled: x,y,dx,dy = x[1:-2], y[1:-2], dx[1:-1], dy[1:-1] else: x,y = x[:-1], y[:-1] plt.quiver(x, y, dx, dy, **default_options)
def main():#'fingerprint5_small', 'Example_curve_n0', 'Example_curve_n50' img = misc.imread('Images/arch.png').astype(float) xSize , ySize = np.shape(img) #Load in the image and get its size. tangents = basic.atd(img, window=5) ## plt.figure() ## plt.imshow(img[0:25,50:100], interpolation='none', \ ## cmap=plt.get_cmap('gray')) ## plt.quiver(tangents[0:25,50:100,0], tangents[0:25,50:100,1], ## pivot='mid', color='r', units='inches', scale=5) ## plt.show() tx = 60 ty = 5 #tx and ty are the locations where the ridge finding starts plt.imshow(img[0:70,50:90], interpolation='none', cmap=plt.get_cmap('gray')) #Show the image plt.quiver(tangents[0:70,50:90,0], tangents[0:70,50:90,1], pivot='mid', color='r', units='inches', scale=5) oC= basic.followRidge(tangents, tx, ty, img, mu=2, rad=2) rx = [x[0]-50 for x in oC] ry = [x[1] for x in oC] plt.plot(rx,ry,'b-') plt.plot(tx-50, ty,'ro') #plt.show() #Plot where the ridge finding starts plt.savefig('RF_ATD_arch.svg')
def __call__(self,u,w,bx,by,bz,b2,t): q = 8 map = cm.red_blue() if self.x == None: nx = u.shape[2] nz = u.shape[0] self.x,self.y = np.meshgrid(range(nx),range(nz)) x,y = self.x,self.y avgu = np.average(u,1) avgw = np.average(w,1) avgbx = np.average(bx,1) avgby = np.average(by,1) avgbz = np.average(bz,1) avgb2 = np.average(b2,1) avgt = np.average(t,1) plt.subplot(121) plt.imshow(avgt,cmap=map,origin='lower') plt.colorbar() plt.quiver(x[::q,::q],y[::q,::q],avgu[::q,::q],avgw[::q,::q]) plt.title('Tracer-Vel') plt.axis("tight") plt.subplot(122) plt.imshow(avgby,cmap=map,origin='lower') plt.colorbar() plt.quiver(x[::q,::q],y[::q,::q],avgbx[::q,::q],avgbz[::q,::q]) plt.title('By-Twist') plt.axis("tight")
def DisplayPolicy(self,FigHandle = None): ''' Display MDP policy Args: FigHandle -> Pyplot.Figure handle if we want to draw the policy on the previous figure. If this is not passed, a new figure will be created. ''' width,height = self.gm.Width,self.gm.Height Xpolicy = np.zeros((width,height)) Ypolicy = np.zeros((width,height)) Xloc,Yloc = np.zeros((width,height)), np.zeros((width,height)) Uprime = self.mdp['U'] if FigHandle==None: plt.figure() self.gm.PlotNewRiskMapFig() for a in self.g.nodes(): x,y = self.GetXYfromNodeStr(a) Xloc[x][y],Yloc[x][y] = x,y UtilVec = np.zeros(10) maxUtil = -float('inf') k,maxU,maxV= 0,0,0 for u,v,d in self.g.out_edges(a,data=True): i,j = self.GetXYfromNodeStr(v) UtilVec[k] = Uprime[j][i] if maxUtil<=UtilVec[k]: maxUtil = UtilVec[k] maxU,maxV = i-x,j-y k=k+1 Xpolicy[x][y],Ypolicy[x][y] = 0.5*maxU, 0.5*maxV plt.quiver(Xloc,Yloc,Xpolicy,Ypolicy,scale=10*math.sqrt(maxU**2+maxV**2)) plt.title('MDP Policy') return Xpolicy,Ypolicy
def test_complete(): fig = plt.figure('Figure with a label?', figsize=(10, 6)) plt.suptitle('Can you fit any more in a figure?') # make some arbitrary data x, y = np.arange(8), np.arange(10) data = u = v = np.linspace(0, 10, 80).reshape(10, 8) v = np.sin(v * -0.6) plt.subplot(3, 3, 1) plt.plot(list(xrange(10))) plt.subplot(3, 3, 2) plt.contourf(data, hatches=['//', 'ooo']) plt.colorbar() plt.subplot(3, 3, 3) plt.pcolormesh(data) plt.subplot(3, 3, 4) plt.imshow(data) plt.subplot(3, 3, 5) plt.pcolor(data) plt.subplot(3, 3, 6) plt.streamplot(x, y, u, v) plt.subplot(3, 3, 7) plt.quiver(x, y, u, v) plt.subplot(3, 3, 8) plt.scatter(x, x**2, label='$x^2$') plt.legend(loc='upper left') plt.subplot(3, 3, 9) plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4) ###### plotting is done, now test its pickle-ability ######### # Uncomment to debug any unpicklable objects. This is slow (~200 seconds). # recursive_pickle(fig) result_fh = BytesIO() pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL) plt.close('all') # make doubly sure that there are no figures left assert_equal(plt._pylab_helpers.Gcf.figs, {}) # wind back the fh and load in the figure result_fh.seek(0) fig = pickle.load(result_fh) # make sure there is now a figure manager assert_not_equal(plt._pylab_helpers.Gcf.figs, {}) assert_equal(fig.get_label(), 'Figure with a label?')
def plotSimDataSigmaEps(fig, time_snapshots, time_sim, pp_sim, ev_e_sim, ev_p_sim, compression = 'negative'): # Get snapshots from data time_snap, pp_snap = getDataTimeSnapshots(time_snapshots, time_sim, pp_sim) # Activate the figure plt.figure(fig.number) ev_sim = list(map(lambda ev_e, ev_p : ev_e + ev_p, ev_e_sim, ev_p_sim)) # Plot sigma_a vs. time if (compression == 'positive'): ev_data = list(map(lambda p : -p, ev_sim)) pp_data = list(map(lambda p : -p, pp_sim)) ev_e_data = list(map(lambda p : -p, ev_e_sim)) ev_p_data = list(map(lambda p : -p, ev_p_sim)) plt.plot(ev_data, pp_data, '--', color='C0', label='Total strain (Simulation)') plt.plot(ev_e_data, pp_data, '--', color='C1', label='Elastic strain (Simulation)') plt.plot(ev_p_data, pp_data, '--', color='C2', label='Plastic strain (Simulation)') x_arrow = np.array(ev_data[:-1]) y_arrow = np.array(pp_data[:-1]) u_arrow = np.array(ev_data[1:]) - x_arrow v_arrow = np.array(pp_data[1:]) - y_arrow plt.quiver(x_arrow, y_arrow, u_arrow, v_arrow, scale_units='xy', angles='xy', scale=1.5, width=0.001, headwidth=7, headlength=10, linewidth=0.5, edgecolor='b', color='k') else: plt.plot(ev_sim, pp_sim, '--', color='C0', label='(Total strain) Simulation') #plt.plot(ev_e_sim, pp_data, '--', color='C1', label='Elastic strain (Simulation)') #plt.plot(ev_p_sim, pp_data, '--', color='C2', label='Plastic strain (Simulation)') return time_snap, pp_snap
def quiverPlot(psi, orient, saveDest): """ Generates a quiver plot that shows channel orientation and singularity index response strength. Inputs: psi -- singularity index response orient -- local orientation at each spatial location (x,y) saveDest -- output figure save destination Returns: None (saves the figure at saveDest) """ # downsample psi_s = psi[::4,::4] orient_s = orient[::4,::4] U = -psi_s * np.sin(orient_s) V = psi_s * np.cos(orient_s) R, C = psi.shape aspect_ratio = float(R)/C plt.figure(figsize=(10, 10*aspect_ratio)) ax = plt.gca() ax.invert_yaxis() plt.quiver(U, V, scale=4, width=0.001, pivot='mid', headwidth=0, minlength=0) plt.axis('off') plt.savefig(saveDest)
def plot_field(X, Y, U, V, filename): ''' Function to plot the potential field. Args: X (numpy.ndarray): X component of the sample points. Y (numpy.ndarray): Y component of the sample points. U (numpy.ndarray): X component of field at sample points. V (numpy.ndarray): Y component of field at sample points. ''' # Generate plot. padding = 0.5 plt.figure() plt.quiver(X, Y, U, V, color='#007ce8', units='x', pivot='tail') plt.axis('equal') plt.axis([np.amin(X) - padding, np.amax(X) + padding, np.amin(Y) - padding, np.amax(Y) + padding]) # plt.savefig("potential_field_back1.svg", format='svg') plt.savefig(filename + ".svg", format='svg') plt.show()
def three_d_contour(file,show): xloc = raw_input("xlocation(two_d_initial_x_value.txt):") or "two_d_initial_x_value.txt" yloc = raw_input("ylocation(two_d_initial_y_value.txt):") or "two_d_initial_y_value.txt" uloc = raw_input("ulocation(two_d_cavity_u_results.txt):") or "two_d_cavity_u_results.txt" vloc = raw_input("vlocation(two_d_cavity_v_results.txt):") or "two_d_cavity_v_results.txt" ploc = raw_input("plocation(two_d_cavity_p_results.txt):") or "two_d_cavity_p_results.txt" u=np.loadtxt(uloc) v=np.loadtxt(vloc) p=np.loadtxt(ploc) X=np.loadtxt(xloc) Y=np.loadtxt(yloc) X,Y=np.meshgrid(X,Y) # print X,Y fig = plt.figure() plt.contourf(X,Y,p,alpha=0.5) ###plnttong the pressure field as a contour plt.colorbar() plt.contour(X,Y,p) ###plotting the pressure field outlines plt.quiver(X[::3,::3],Y[::3,::3],u[::3,::3],v[::3,::3]) ##plotting velocity plt.xlabel('X') plt.ylabel('Y') plt.title(file) sep = '.' filename = file.split(sep, 1)[0] plt.savefig(filename+'.png') # save figure as .png file if show: # print('test') plt.show() plt.close(fig)
def test_ligne(self): plt.figure('LIGNE ATTRACTIVE') X, Y = np.mgrid[-100:100:40j, -100:100:40j] U, V = vfl.ligne( X, Y, 0, 0, xb=50, yb=0, K=0.2, R=20, effect_range=40) plt.quiver(X, Y, U, V, scale=2) plt.show()
def test_dir_segment_complet(self): plt.figure('test segment complet') X, Y = np.mgrid[-20:20:20j, -20:20:20j] U1, V1 = vfl.dir_segment(X, Y, -5, 0, 5, 0) U2, V2 = vfl.dir_segment_extremity(X, Y, -5, 0, 5, 0) plt.quiver(X, Y, U1 + U2, V1 + V2) plt.show()
def plot_cycle(P, **kwargs): X = [] Y = [] for p in P: X.append(p[0]) Y.append(p[1]) X = np.array(X) Y = np.array(Y) lim_offset = 1 plt.xlim((min(X) - lim_offset, max(X) + lim_offset)) plt.ylim((min(Y) - lim_offset, max(Y) + lim_offset)) w = max(X) - min(X) h = max(Y) - min(Y) plt.quiver(X[:-1], Y[:-1], X[1:]-X[:-1], Y[1:]-Y[:-1], \ scale_units='xy', angles='xy', scale=1, \ width=plt.gcf().get_figwidth()*.0005, \ headwidth=2, headlength=3) if 'filename' in kwargs: plt.savefig(kwargs['filename'], dpi=1500) if kwargs.get('show', True): plt.show() plt.close()
def plot_arrow(vectordata,imname,imdim): """ plot arrow plot """ # plot slope direction rows = imdim[0] cols = imdim[1] fig = plt.figure(figsize=(rows/12.0,cols/12.0)) fig.patch.set_alpha(0.0) fig.subplots_adjust(left=0.0,bottom=0.0,top=1.0,right=1.0) [X,Y,U,V] = vectordata if rows > 50 or cols > 50: #print "generate simple arrow plot" skip_p = int((rows*cols)/1000.0) plt.quiver(X[::skip_p], Y[::skip_p], U[::skip_p], V[::skip_p],color='w') else: plt.quiver(X, Y, U, V,color='w') #plt.plot(X,Y,"bo") plt.axis('off') xup = np.unique(X) xstep = abs(xup[0] - xup[1]) yup = np.unique(Y) ystep = abs(yup[0] - yup[1]) plot_range = [np.min(X)-xstep,np.max(X)+xstep,np.min(Y)-ystep,np.max(Y)+ystep] plt.axis(plot_range) plt.savefig(imname + ".png", format="PNG",aspect="auto",transparent=False) # keep the white background plt.close(fig) return plot_range
def plotOrthogonalField(sh, b): center=(np.array(sh)-1)/2.0 C,R=sp.meshgrid(np.array(range(sh[1]), dtype=np.float64), np.array(range(sh[0]), dtype=np.float64)) R=R-center[0]+b[0] C=C-center[1]+b[1] plt.figure() plt.quiver(R, -C)
def plot_vector_field(dxdt, dydt, x_range=[-100,100], y_range=[-100,100], grid_resolution=10, normalize=True): """ Plots a vector field with vectors generated from dxdt and dydt functions. :param dxdt: Function of (x,y) that returns the x-component of a vectors with origin in (x,y) :param dydt: Function of (x,y) that returns the y-component of a vectors with origin in (x,y) :param x_range limits of the grid in x-axis :param y_range limits of the grid in y-axis :param grid_resolution: number of points to generate in the grid for each axis :normalize: Should the vector field be normalized? In this case it becaomes a direction field. """ # Generate Mesh Grid x = np.linspace(x_range[0], x_range[1], grid_resolution) y = np.linspace(y_range[0], y_range[1], grid_resolution) x, y = np.meshgrid(x, y) # Generate Vector Field vx = dxdt(x,y) vy = dydt(x,y) if normalize: norm = 1 / np.sqrt(vx**2 + vy**2) vx = vx * norm vy = vy * norm # plot vector field plt.axhline(0, color='black') plt.axvline(0, color='black') plt.quiver(x, y, vx, vy, pivot='middle', headwidth=4, headlength=6) plt.xlabel('$x$') plt.ylabel('$y$') plt.axis('image') plt.show()
def render_still_vector_grid2(filename_x, filename_y, output_filename, **kwargs): has_xtick = True has_ytick = True if 'has_xtick' in kwargs: has_xtick = kwargs['has_xtick'] if 'has_ytick' in kwargs: has_ytick = kwargs['has_ytick'] grid_data_u = np.load(filename_x) grid_data_v = np.load(filename_y) nx = len(grid_data_u[0]) ny = len(grid_data_u) X, Y = np.meshgrid(np.arange(0, 1, 1.0 / nx), np.arange(0, float(ny)/nx, 1.0 / nx)) U = grid_data_u V = grid_data_v fig, ax = plt.subplots() if not has_xtick: ax.set_xticks(()) ax.set_xticklabels(()) if not has_ytick: ax.set_yticks(()) ax.set_yticklabels(()) ax.set_aspect('equal') plt.quiver(X, Y, U, V) plt.savefig(output_filename) plt.close(fig) print ('Rendered <%s>' % output_filename)
def show_normals(ys, num_neighbors=10): tree = cKDTree(ys) ns = calculate_normals(tree, num_neighbors) plt.quiver(ys[:,0], ys[:,1], ns[:,0], ns[:,1], angles='xy') plt.plot(ys[:,0], ys[:,1], 'o') plt.show()
def main(): gw = gridworld() a = agent(gw) for epoch in range(20): a.initEpoch() while True: rwd, stat, act = a.takeAction() a.updateQ(rwd, stat, act) if gw.status() == 'Goal': break if mod(a.counter, 10)==0: print(gw.state()) print(gw.field()) print('Finished') print(a.counter) print(gw.state()) print(gw.field()) Q = transpose(a.Q(), (2,0,1)) for i in range(4): plt.subplot(2,2,i) plt.imshow(Q[i], interpolation='nearest') plt.title(a.actions()[i]) plt.colorbar() plt.show() plt.quiver(Q[0]-Q[1], Q[3]-Q[2]) plt.show()
def contour_plot(ax, isub, var, U, V, x_range, y_range, levels_var=None, annot=None): X, Y = np.meshgrid(x_range, y_range) plt.subplots_adjust(hspace=0.0) plt.subplots_adjust(wspace=0) temp = ticker.MaxNLocator(3) # if isub < 2: # ax.set_xticklabels(()) if isub % 2 != 0: ax.set_yticklabels(()) # contour plot for v field CS = plt.contourf(X, Y, var, cmap=plt.cm.Blues, alpha=0.7, levels=levels_var) # plotting vectors from U and V fields if (U ** 2 + V ** 2).max() > 0: plt.quiver(X[::20, ::20], Y[::20, ::20], U[::20, ::20], V[::20, ::20], scale=15) # cbar = plt.colorbar(CS, fraction=0.04, format="%.1e") CS_l = plt.contour(CS, levels=CS.levels, colors="k", linewidths=0.2) ax.set_ylim(-8.5, 8.5) ax.set_xlim(-8.5, 8.5) ax.annotate( annot, xy=(0.01, 0.97), xycoords="axes fraction", fontsize=12, horizontalalignment="left", verticalalignment="top", )
def plotArrows( # not yet tested prefix, dX, dY, X, Y, slices, BG=None, C=None, extent=None, zs = None, by=2, figsize=default_figsize, cmap=default_cmap, interpolation=default_interpolation, vmin=None, vmax=None, cbar=False, atoms=None, bonds=None, atomSize=default_atom_size ): for ii,i in enumerate(slices): print " plotting ", i plt.figure( figsize=figsize ) #plt.plt.quiver( dX, dY, X, Y, C=C, width=width, scale=scale ) plt.quiver( Xs[::by,::by], Ys[::by,::by], dX[::by,::by], dY[::by,::by], color = 'k', headlength=10, headwidth=10, scale=15 ) if BG is not None: plt.imshow ( BG[i,:,:], origin='image', interpolation=interpolation, cmap=cmap, extent=extent, vmin=vmin, vmax=vmax ) if cbar: plt.colorbar() plotGeom( atoms, bonds, atomSize=atomSize ) plt.xlabel(r' Tip_x $\AA$') plt.ylabel(r' Tip_y $\AA$') if zs is None: plt.title( r"iz = %i" %i ) else: plt.title( r"Tip_z = %2.2f $\AA$" %zs[i] ) plt.savefig( prefix+'_%3.3i.png' %i, bbox_inches='tight' ) plt.close()
def plot_vector_grid(filename, x_grid, y_grid): try: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt except ImportError: print 'Plotting diasbled' return fig = plt.figure() x_coords = [] y_coords = [] z_coords = [] for itt_x in range(len(x_grid[0])): for itt_y in range(len(x_grid)): x_coords.append(itt_x) y_coords.append(itt_y) z_coords.append(( x_grid[itt_y][itt_x], y_grid[itt_y][itt_x] )) plt.quiver(x_coords, y_coords, [x[0] for x in z_coords], [y[1] for y in z_coords]) fig.savefig(filename)
def testLinVortList(): """Test function for a linear vortex sheet list""" V=linVortList() Npoints=50 lowLim=numpy.array([0.,0.]) upLim=numpy.array([1.,1.]) for i in range(Npoints): p1=(lowLim+(float(i)/Npoints*(upLim-lowLim))) p2=(lowLim+(float(i+1)/Npoints*(upLim-lowLim))) V.addLinVortex(0.1,0.1,p1,p2) X,Y = numpy.meshgrid(numpy.arange(-2.,2.,0.2),numpy.arange(-2,2,0.2)) u=numpy.copy(X) v=numpy.copy(X) for i in range(len(X.flatten())): vel=V.fieldEffect([X.flatten()[i],Y.flatten()[i]]) u.ravel()[i]=vel[0] v.ravel()[i]=vel[1] plt.figure() plt.quiver(X,Y,u,v) plt.title('Vector field due to multiple linear vortex sheets') plt.plot([lowLim[0],upLim[0]],[lowLim[1],upLim[1]]) plt.show() return()
def weight_quiver(weights, color='c'): plt.quiver(weights[0, :-1], weights[1, :-1], weights[0, 1:] - weights[0, :-1], weights[1, 1:] - weights[1, :-1], scale_units='xy', angles='xy', scale=1, color=color)
def display_velocity( q , p ,mu , mu2 = None): W = 5*SIGMA res = 30 N_nodes = res**2 store = np.outer( np.linspace(-W,W , res), np.ones(res) ) nodes = np.zeros( [N_nodes , DIM] ) nodes[:,0] = np.reshape( store , N_nodes ) nodes[:,1] = np.reshape( store.T , N_nodes ) K,DK,D2K,D3K = jpf.derivatives_of_kernel(nodes , q) vel_field = np.einsum('ijab,jb->ia',K,p) - np.einsum('ijabc,jbc->ia',DK,mu) if mu2 != None: vel_field = vel_field + np.einsum('ijabcd,jbcd->ia',D2K,mu2) U = vel_field[:,0] V = vel_field[:,1] plt.figure() plt.quiver( nodes[:,0] , nodes[:,1] , U , V , scale=10 ) plt.plot(q[:,0],q[:,1],'ro') for i in range(0,N): if np.linalg.norm(p[i]) < 1e-4: continue plt.arrow(q[i,0], q[i,1], p[i,0], p[i,1], head_width=0.1, head_length=0.2, lw = 4.0, fc='b', ec='b') plt.arrow(q[i,0], q[i,1], p[i,0], p[i,1], head_width=0.1, head_length=0.2, lw = 2.0, fc='w', ec='w') plt.axis('equal') plt.axis([- W, W,- W, W ]) return plt.gcf()
def testVortexList(strength=1.0,blobType=0,delta=0.0): """Test function for a point vortex list""" V=vortexList() Npoints=50 lowLim=numpy.array([0.,0.]) upLim=numpy.array([1.,1.]) for i in range(Npoints): [x,y]=(lowLim+(float(i)/Npoints*(upLim-lowLim))) V.addVortex([x,y], strength,blobType,delta) X,Y = numpy.meshgrid(numpy.arange(-2.,2.,0.2),numpy.arange(-2,2,0.2)) u=numpy.copy(X) v=numpy.copy(X) for i in range(len(X.flatten())): vel=V.fieldEffect([X.flatten()[i],Y.flatten()[i]]) u.ravel()[i]=vel[0] v.ravel()[i]=vel[1] plt.figure() plt.quiver(X,Y,u,v) plt.title('Vector field due to multiple vortices') plt.scatter([lowLim[0],upLim[0]],[lowLim[1],upLim[1]]) plt.show() return()
def plot_xwt_wavetransf(power, time, wa, T, S, sig95, pangle, time_base, scalemin=0, scalemax=6, ylabel='Pressure (mb)', plot_percentile=False): """plotting WaveTransform Power with confidence interval contour and phase vectors""" fig = plt.figure(10) ax = plt.subplot(1,1,1) if plot_percentile: #use following to contour at "percentiles variances" when using non-normalized data to match web output csf =plt.contourf(T, S, power, levels=[ 0, stats.scoreatpercentile(power, 25), stats.scoreatpercentile(power, 50), stats.scoreatpercentile(power, 75), stats.scoreatpercentile(power, 95), stats.scoreatpercentile(power, 100)], colors=bmap) else: #use following to contour at "normalized variances" BAMS csf =plt.contourf(T, S, power, levels=[ 0, .2,.4,.6,.8,1], colors=bmap) cbar = plt.colorbar(pad=.1, shrink=.5, format='%.4f', extend='both') #move and shrink colorbar levels = [-99, 1] # values greater than 1 are significant plt.contour(T, S, sig95,levels, colors='black', linewidths=1) ax.set_yscale('log') ax.grid(True) # plot phase relationship arr_dens = [60, 30] arr_densx = np.round( len(time) / arr_dens[0] ) arr_densy = np.round( len(wa.scales) / arr_dens[1] ) if arr_dens == 0: arr_dens = 1 plt.quiver(T[::arr_densy,::arr_densx],S[::arr_densy,::arr_densx],(np.cos(pangle))[::arr_densy,::arr_densx],(np.sin(pangle))[::arr_densy,::arr_densx],\ width=.00125, headwidth=4, headlength=4, alpha=0.6, color='k') # put the ticks at powers of 2 in the scale ticks = np.unique(2 ** np.floor(np.log2(wa.scales)))[1:] ax.yaxis.set_ticks(ticks) ax.yaxis.set_ticklabels(ticks.astype(str)) ax.set_ylim(scalemax, scalemin) ax.set_ylabel('scales') # second y scale with equivalent fourier periods to scales # except with the ticks at the powers of 2 ax_fourier = ax.twinx() ax_fourier.set_yscale('log') # match the fourier ticks to the scale ticks ax_fourier.set_yticks(ticks) ax_fourier.set_yticklabels(ticks.astype(str)) ax_fourier.set_ylabel('fourier period (%s)' % time_base ) fourier_lim = [wa.fourier_period(i) for i in ax.get_ylim()] ax_fourier.set_ylim(fourier_lim) ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d')) fig.autofmt_xdate() # shade the region between the edge and coi C, S = wa.coi ax.fill_between(x=C, y1=S, y2=wa.scales.max(), color='gray', alpha=0.5) ax.set_xlim(time.min(), time.max()) #plt.show() DefaultSize = fig.get_size_inches() fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) ) return (plt, fig)
def quiver(self, it=None, u=None, v=None, title=None, scale=0.1, color="k", **kwargs): x = kwargs.get("x", self._obj.SCHISM_hgrid_node_x[:].values) y = kwargs.get("y", self._obj.SCHISM_hgrid_node_y[:].values) try: t = kwargs.get("t", self._obj.time.values) except: pass fig = plt.figure(figsize=(12, 8)) title = kwargs.get("title", "vector plot for {}".format(title)) xy = kwargs.get("xy", (0.05, -0.1)) ## CHOOSE YOUR PROJECTION # ax = plt.axes(projection=ccrs.Orthographic(grid_x.mean(), grid_y.mean())) # ax = plt.axes(projection=ccrs.PlateCarree()) # ax.background_patch.set_facecolor('k') ax = plt.gca() # optional mask for the data mask = kwargs.get("mask", None) if "mask" in kwargs: u = np.ma.masked_array(u, mask) v = np.ma.masked_array(v, mask) v = v.filled(fill_value=-99999) u = u.filled(fill_value=-99999) for val in [ "x", "y", "t", "it", "u", "v", "title", "tes", "xy", "scale", "mask", "color", "var", ]: try: del kwargs[val] except: pass ax.set_aspect("equal") p = plt.quiver(x, y, u, v, angles="xy", scale_units="xy", scale=scale, color=color, **kwargs) plt.xlabel("Longitude (degrees)") plt.ylabel("Latitude (degrees)") ax.set_title(title, pad=30) if it: text = "time={}".format(t[it]) an = ax.annotate(text, xy=xy, xycoords="axes fraction") return p # , ax
proj.drawmeridians(np.arange( np.floor(domain[1] / 5) * 5, np.ceil(domain[3] / 5) * 5 + 1, 5), labels=[False, False, False, False]) #labels=[False,False,False,True]) lonproj, latproj = proj(lon_uv, lat_uv) H = plt.contourf(lonproj, latproj, np.sqrt(tau_x_mth_abs[:, :, tt]**2 + tau_y_mth_abs[:, :, tt]**2), levels=np.arange(0.2, 1 + 0.1, 0.1) * 0.25, cmap=plt.cm.gnuplot2_r) plt.clim(0, 0.4) plt.quiver(lonproj[::d, ::d], latproj[::d, ::d], tau_x_mth_abs[::d, ::d, tt], tau_y_mth_abs[::d, ::d, tt], scale=2, linewidth=0.4) plt.title(labels_month[np.mod(tt, 12)]) AXPOS = AX.get_position() CAX = fig.add_axes([AXPOS.x1 + 0.015, AXPOS.y0, 0.01, AXPOS.y1 - AXPOS.y0]) HB = plt.colorbar(H, CAX, orientation='vertical') HB.set_label(r'[N m$^{-2}$]') plt.savefig('mhw_properties/' + mhwname + '_OMAPS_TAU_Monthly2016.png', bbox_inches='tight', pad_inches=0.5, dpi=150) # Wind Stress Anomaly
[pos_arr, vel_arr, theta_arr] = initialize(n, l, v) for ti in range(t): print(ti) [thetas, positions, velocities] = run_1step(n, l, thetas, positions, velocities, radius, dt) #print(np.shape(positions)) #print(np.shape(velocities)) position_evolution_arr[ti + 1] = positions velocity_evolution_arr[ti + 1] = velocities print(np.shape(velocity_evolution_arr)) print(np.shape(position_evolution_arr)) plt.quiver(position_evolution_arr[0, :, 0], position_evolution_arr[0, :, 1], velocity_evolution_arr[0, :, 0], velocity_evolution_arr[0, :, 1]) plt.show() plt.quiver(position_evolution_arr[t, :, 0], position_evolution_arr[t, :, 1], velocity_evolution_arr[t, :, 0], velocity_evolution_arr[t, :, 1]) plt.show() fig, ax = plt.subplots() Writer = matplotlib.animation.writers['ffmpeg'] writer = Writer(fps=5, metadata=dict(artist='Anuran'), bitrate=1800) ani = matplotlib.animation.FuncAnimation( fig,
def pyramid(im, max_scale): images = [im] for k in range(1, max_scale): images.append(cv2.resize(images[k - 1], (0, 0), fx=0.5, fy=0.5)) return images I = cv2.imread("I.jpg") J = cv2.imread("J.jpg") IG = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY) JG = cv2.cvtColor(J, cv2.COLOR_BGR2GRAY) max_scale = 3 IP = pyramid(IG, max_scale) JP = pyramid(JG, max_scale) u0 = np.zeros(IP[-1].shape, np.uint8) v0 = np.zeros(JP[-1].shape, np.uint8) for k in range(1, max_scale + 1): if k != 1: u0 = cv2.resize(u, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_NEAREST) v0 = cv2.resize(v, (0, 0), fx=2, fy=2, interpolation=cv2.INTER_NEAREST) u, v = of(IP[-k], JP[-k], u0, v0) plt.figure(1) plt.gca().invert_yaxis() # plt.imshow(IG) plt.quiver(u, v) plt.show()
nx = 51 ny = 51 nt = 5000 Uwall=1. dx = 2.0/(nx-1) dy = 2.0/(ny-1) x = np.linspace(0,2,nx) y = np.linspace(0,2,ny) X,Y = np.meshgrid(x,y) rho = 1 nu = 0.02 dt = .002 u = np.zeros((ny, nx)) v = np.zeros((ny, nx)) p = np.zeros((ny, nx)) b = np.zeros((ny, nx)) u, v, p = cavityFlow(nt, u, v, dt, dx, dy, p, rho, nu, Uwall) fig = plt.figure(figsize=(11,7), dpi=100) plt.contourf(X,Y,p,alpha=0.5) ###plotting the pressure field as a contour plt.colorbar() plt.contour(X,Y,p) ###plotting the pressure field outlines plt.quiver(X,Y,u,v) ##plotting velocity plt.xlabel('X') plt.ylabel('Y') plt.show() plt.streamplot(x,y,u,v) plt.show()
def _sample_antipodal_grasps(self, rgbd_im, camera_intr, num_samples, segmask=None, visualize=False): """ Sample a set of 2D grasp candidates from a depth image by finding depth edges, then uniformly sampling point pairs and keeping only antipodal grasps with width less than the maximum allowable. Parameters ---------- rgbd_im : :obj:`perception.RgbdImage` RGB-D image to sample from camera_intr : :obj:`perception.CameraIntrinsics` intrinsics of the camera that captured the images num_samples : int number of grasps to sample segmask : :obj:`perception.BinaryImage` binary image segmenting out the object of interest visualize : bool whether or not to show intermediate samples (for debugging) Returns ------- :obj:`list` of :obj:`Grasp2D` list of 2D grasp candidates """ # compute edge pixels edge_start = time() depth_im = rgbd_im.depth depth_im = depth_im.apply(snf.gaussian_filter, sigma=self._depth_grad_gaussian_sigma) depth_im_downsampled = depth_im.resize(self._rescale_factor) depth_im_threshed = depth_im_downsampled.threshold_gradients( self._depth_grad_thresh) edge_pixels = self._downsample_rate * depth_im_threshed.zero_pixels() if segmask is not None: edge_pixels = np.array( [p for p in edge_pixels if np.any(segmask[p[0], p[1]] > 0)]) num_pixels = edge_pixels.shape[0] logging.debug('Depth edge detection took %.3f sec' % (time() - edge_start)) logging.debug('Found %d edge pixels' % (num_pixels)) # exit if no edge pixels if num_pixels == 0: return [] # compute_max_depth min_depth = np.min(depth_im.data) + self._min_depth_offset max_depth = np.max(depth_im.data) + self._max_depth_offset # compute surface normals normal_start = time() edge_normals = self._surface_normals(depth_im, edge_pixels) logging.debug('Normal computation took %.3f sec' % (time() - normal_start)) if visualize: vis.figure() vis.subplot(1, 2, 1) vis.imshow(depth_im) if num_pixels > 0: vis.scatter(edge_pixels[:, 1], edge_pixels[:, 0], s=10, c='b') X = [pix[1] for pix in edge_pixels] Y = [pix[0] for pix in edge_pixels] U = [10 * pix[1] for pix in edge_normals] V = [-10 * pix[0] for pix in edge_normals] plt.quiver(X, Y, U, V, units='x', scale=1, zorder=2, color='g') vis.title('Edge pixels and normals') vis.subplot(1, 2, 2) vis.imshow(depth_im_threshed) vis.title('Edge map') vis.show() # form set of valid candidate point pairs sample_start = time() max_grasp_width_px = Grasp2D(Point(np.zeros(2)), 0.0, min_depth, width=self._gripper_width, camera_intr=camera_intr).width_px normal_ip = edge_normals.dot(edge_normals.T) dists = ssd.squareform(ssd.pdist(edge_pixels)) valid_indices = np.where( (normal_ip < -np.cos(np.arctan(self._friction_coef))) & (dists < max_grasp_width_px) & (dists > 0.0)) valid_indices = np.c_[valid_indices[0], valid_indices[1]] num_pairs = valid_indices.shape[0] logging.debug('Normal pruning %.3f sec' % (time() - sample_start)) # raise exception if no antipodal pairs if num_pairs == 0: return [] # iteratively sample grasps k = 0 grasps = [] sample_size = min(self._max_rejection_samples, num_pairs) candidate_pair_indices = np.random.choice(num_pairs, size=sample_size, replace=False) while k < sample_size and len(grasps) < num_samples: # sample a random pair without replacement j = candidate_pair_indices[k] pair_ind = valid_indices[j, :] p1 = edge_pixels[pair_ind[0], :] p2 = edge_pixels[pair_ind[1], :] n1 = edge_normals[pair_ind[0], :] n2 = edge_normals[pair_ind[1], :] width = np.linalg.norm(p1 - p2) k += 1 # check force closure if force_closure(p1, p2, n1, n2, self._friction_coef): # compute grasp parameters grasp_center = (p1 + p2) / 2 grasp_axis = p2 - p1 grasp_axis = grasp_axis / np.linalg.norm(grasp_axis) grasp_theta = 0 if grasp_axis[1] != 0: grasp_theta = np.arctan(grasp_axis[0] / grasp_axis[1]) # compute distance from image center dist_from_center = np.linalg.norm(grasp_center - depth_im.center) dist_from_boundary = min( np.abs(depth_im.height - grasp_center[0]), np.abs(depth_im.width - grasp_center[1]), grasp_center[0], grasp_center[1]) if dist_from_center < self._max_dist_from_center and \ dist_from_boundary > self._min_dist_from_boundary: # form grasp object grasp_center_pt = Point( np.array([grasp_center[1], grasp_center[0]])) grasp = Grasp2D(grasp_center_pt, grasp_theta, 0.0) # check grasp dists grasp_dists = [ Grasp2D.image_dist(grasp, candidate, alpha=self._angle_dist_weight) for candidate in grasps ] if len(grasps) == 0 or np.min( grasp_dists) > self._min_grasp_dist: if visualize: vis.figure() vis.imshow(depth_im) vis.scatter(p1[1], p1[0]) vis.scatter(p2[1], p2[0]) vis.title('Grasp candidate %d' % (len(grasps))) vis.show() # sample depths for i in range(self._depth_samples_per_grasp): # get depth in the neighborhood of the center pixel depth_win = depth_im.data[grasp_center[0] - self._h:grasp_center[0] + self._h, grasp_center[1] - self._w:grasp_center[1] + self._w] center_depth = np.min(depth_win) if center_depth == 0 or np.isnan(center_depth): continue # sample depth between the min and max min_depth = np.min( center_depth) + self._min_depth_offset max_depth = np.max( center_depth) + self._max_depth_offset sample_depth = min_depth + ( max_depth - min_depth) * np.random.rand() candidate_grasp = Grasp2D( grasp_center_pt, grasp_theta, sample_depth, width=self._gripper_width, camera_intr=camera_intr) grasps.append(candidate_grasp) # return sampled grasps return grasps
cstride=3, linewidth=1, antialiased=True, cmap=cm.viridis) #cset = ax.contourf(X, Y, Z, zdir='z', offset=-0.15, cmap=cm.viridis) # Adjust the limits, ticks and view angle ax.set_zlim(0, 0.1) ax.set_zticks(np.linspace(0, 0.1, 10)) ax.view_init(27, -21) plt.show() ################################################################### #contour fig = plt.figure() plt.contour(X, Y, Z) plt.contour(X, Y, (Z1 - Z2).reshape(X.shape), levels=[0]) ############################################################## w, v = np.linalg.eig(c1TrainCov) x1, y1 = v w, v = np.linalg.eig(c2TrainCov) x2, y2 = v #eigenvector plt.quiver(c1TrainMean[0], c1TrainMean[1], x1, y1, color='r') plt.quiver(c2TrainMean[0], c2TrainMean[1], x2, y2) ############################################################### plt.scatter(class1Test[0], class1Test[1], color='r') plt.scatter(class2Test[0], class2Test[1], color='b') ############################################################### plt.show()
# show command is used to visualize data plot plt.show() gradUrep[:, :, 0], gradUrep[: ,:, 1] = np.gradient(Urep) dx , dy = np.gradient(Urep) img = mpimg.imread('cs.png') gray = rgb2gray(img) plt.imshow(gray, cmap=plt.get_cmap('gray'), vmin=0, vmax=1) # for coloring the quiver plot # n = -2 # color = np.sqrt(((dx-n)/2)*2 + ((dy-n)/2)*2) plt.quiver(X, Y, gradUrep[:, :, 1], -gradUrep[:, :, 0], 0, alpha = 0.5) plt.show() ######################################################################################## # quadratic cum conic potential # Attractive Potential zeta = 0.1; dstar = 6; q_goal = [20, 60]; X , Y = np.meshgrid(np.linspace(0, 99, 100), np.linspace(0, 99, 100)) Uatt = np.zeros((100, 100)) # Urep an array of size 100 x 100 gradUatt = np.zeros((100, 100, 2))
kw = dict(levels=np.linspace(24, 30, 16)) plt.contourf(x, y, temp_POM, cmap=cmocean.cm.thermal, **kw) xq, yq = m(np.tile(-90, len(lat_POM[:, 0])), lat_POM[:, 0]) plt.plot(xq, yq, '-', color='k') cbar = plt.colorbar() cbar.ax.set_ylabel('($^\circ$C)', fontsize=14, labelpad=15) cbar.ax.tick_params(labelsize=14) #plt.title('HWRF-POM SST and Surface Velocity on '+str(time_POM[0])[0:13],fontsize=14) plt.title('HWRF-POM SST on ' + str(time_POM[0])[0:13], fontsize=16) #c.set_label('($^oC$)',rotation=90, labelpad=15, fontsize=16) yticks, xticks = m(np.arange(-97.5, -79, 2.5), np.arange(15, 33, 2.5)) plt.yticks(yticks, labels=np.arange(15, 33, 2.5), fontsize=12) plt.xticks(xticks, np.arange(-97.5, -79, 2.5), fontsize=12) q = plt.quiver(x[::7, ::7], y[::7, ::7], u_POM[::7, ::7], v_POM[::7, ::7]) xq, yq = m(-74, 33.5) plt.quiverkey(q, xq, yq, 1, "1 m/s", coordinates='data', color='k', fontproperties={'size': 14}) file_name = folder_fig + 'Cristobal_SST_' + str(time_POM[0])[0:10] plt.savefig(file_name, bbox_inches='tight', pad_inches=0.1) #%% POM = xr.open_dataset(ncfiles[0])
x = linspace(x_n1[0] - 5, x_n1[0] + 5, nb_points) y = linspace(x_n1[1] - 5, x_n1[1] + 5, nb_points) # Create meshgrid X1 , Y1 = meshgrid(x,y) # Calculate growth rate at each grid point DX1, DY1 = f([X1, Y1], 0) # Direction at each grid point is the hypotenuse of the prey direction and the # predator direction. M = (hypot(DX1, DY1)) # This is to avoid any divisions when normalizing M[M == 0] = 1. # Normalize the length of each arrow (optional) DX1 /= M DY1 /= M Q = plt.quiver(X1, Y1, DX1, DY1, M, pivot='mid', cmap=plt.cm.plasma) plt.plot([x_n1[0]], [x_n1[1]], marker='o', markersize=3, color="blue") plt.plot([0], [0], marker='o', markersize=3, color="black") plt.plot([eps[0] / gamma[0][1]], [0], marker='o', markersize=3, color="black") plt.xlim([x_n1[0] - 5, x_n1[0] + 5]) plt.ylim([x_n1[1] - 5, x_n1[1] + 5]) print('Точки спокою (при дійсних невід\'ємних х та у)') print(array([0, 0])) print(array([eps[0] / gamma[0][1], 0])) print(x_n1) plt.show()
Q = ax1.quiver(z['west_east'].values, z['south_north'].values, ua_interp.isel(Time=tindex).values, va_interp.isel(Time=tindex).values,pivot='middle',color='black', units='width',width=0.0007,headwidth=10) qk = ax1.quiverkey(Q, 0.92, .95, 5, r'$5 \frac{m}{s}$', labelpos='E', coordinates='figure') cb = plt.colorbar(cb, shrink=0.5, title='Vertical wind (m/s)') ax1.set_title('Vertical motion (m/s) and winds (m/s) at time='+str(tindex)+' and level='+str(level)) plt.tight_layout() plt.show() interactive_plot = interactive(plot_interact, tindex=(0, 13), level=(0, 20000, 500)) output = interactive_plot.children[-1] output.layout.height = '500px' interactive_plot plt.quiver(x,ua_interp.values, va_interp.values) x,y = np.meshgrid(z.west_east.values,z.south_north.values) x y
W[0] -= lrate * dW0 W[1] -= lrate * dW1 # dW0res = np.clip(dW0res, -0.5, 0.5) # dW1res = np.clip(dW1res, -0.5, 0.5) def descr(): s = "" if classic_neuron: s += "_class" return s for i in xrange(num_iters): plt.quiver(W0res[i], W1res[i], dW0res[i], dW1res[i], error_res[i], cmap=cm.seismic, headlength=7, headwidth=5.0) plt.colorbar() plt.savefig("{}/tmp/pics/grad_flow{}_{}.png".format( os.environ["HOME"], descr(), i)) plt.clf()
#So apply the first condition for iLCS to the field, #i.e. s1<0 and s2>0 s1_directional_derivative = np.ma.masked_where(s1 > 0, s1_directional_derivative) s2_directional_derivative = np.ma.masked_where(s2 < 0, s2_directional_derivative) #You may find additional thresholding useful if there are a lot of features in the field #s1_directional_derivative = np.ma.masked_where(-s1<=np.percentile(-s1,85),s1_directional_derivative) #s2_directional_derivative = np.ma.masked_where(s2<=np.percentile(s2,85),s2_directional_derivative) #Now we apply the third condition, i.e. concavity s1_directional_derivative = np.ma.masked_where(s1_concavity < 0, s1_directional_derivative) s2_directional_derivative = np.ma.masked_where(s2_concavity > 0, s2_directional_derivative) #Now we can extract iLCS attracting_ilcs = plt.contour(x, y, s1_directional_derivative, levels=[0], colors='blue') repelling_ilcs = plt.contour(x, y, s2_directional_derivative, levels=[0], colors='red') i = 5 plt.quiver(x[::i, ::i], y[::i, ::i], u[::i, ::i], v[::i, ::i])
s1_bar = -0.5*np.sqrt(0.25*alpha**2+beta**2) N = np.sqrt(0.25*alpha**2+(s1_bar+beta)**2) xi_x=1/N*0.5*alpha xi_y=1/N*s1_bar+beta s1=-0.5*pi**2*A*np.sqrt(cos(pi*f)**2*cos(pi*y)**2*dfdx**2\ +0.25*sin(pi*f)**2*sin(pi*y)**2*(1-dfdx)**2) correction = s1**2-0.5*(B_xx*xi_x**2+2*B_xy*xi_x*xi_y+B_yy*xi_y**2) import matplotlib.pyplot as plt plt.close('all') plt.figure(figsize=(8,4)) plt.subplot(121) plt.pcolormesh(-s1) plt.colorbar() plt.subplot(122) plt.pcolormesh(correction) plt.colorbar() plt.title('Analytic') plt.figure(figsize=(8,4)) plt.quiver(x[::10,::10],y[::10,::10],xi_x[::10,::10],xi_y[::10,::10]) #sigma = s1 + correction*T
def plot_arrows(vlm, colors_dict=None, fontsize=10, quiver_scale=10, axis_on='off', filename=None, xlabel='', ylabel='', dpi=100, figsize=(5, 5), markerscale=5): """ Plot the velocity arrows onto the embedding saved in vlm.ts Parameters -------- vlm: Velocyto Loom object colors_dict: dict Contains colors for the cluster names. When present, a legend is added. fontsize: str legend fontsize """ plt.figure(None, figsize=figsize, dpi=dpi) plt.scatter(vlm.embedding[:, 0], vlm.embedding[:, 1], c="0.8", alpha=0.2, s=10, edgecolor="") ix_choice = np.random.choice(vlm.embedding.shape[0], size=int(vlm.embedding.shape[0] / 1.), replace=False) plt.scatter(vlm.embedding[ix_choice, 0], vlm.embedding[ix_choice, 1], color=vlm.colorandum[ix_choice], alpha=0.2, s=20, edgecolor=(0, 0, 0, 1), lw=0.3, rasterized=True) quiver_kwargs = dict(headaxislength=10, headlength=11, headwidth=12, linewidths=0.5, width=0.00045, edgecolors="k", color=vlm.colorandum[ix_choice], alpha=1) plt.quiver(vlm.embedding[ix_choice, 0], vlm.embedding[ix_choice, 1], vlm.delta_embedding[ix_choice, 0], vlm.delta_embedding[ix_choice, 1], scale=quiver_scale, **quiver_kwargs) # add a legend if colors_dict is not None: plot_legend(colors_dict, fontsize=fontsize, markerscale=markerscale) plt.axis(axis_on) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.plot() if filename is not None: plt.savefig(filename, dpi=dpi, format='svg', bbox_inches='tight', pad_inches=0)
def main(): # get symbol pprint.pprint(cfg) cfg.symbol = 'resnet_v1_101_flownet_rfcn' model = '/../model/fgfa_rfcn_vid_s' # 关键帧间隔*2+1为所有帧的间隔,论文中设置的KEY_FRAME_INTERVAL为10 all_frame_interval = cfg.TEST.KEY_FRAME_INTERVAL + 1 # all_frame_interval = 7 max_per_image = cfg.TEST.max_per_image feat_sym_instance = eval(cfg.symbol + '.' + cfg.symbol)() aggr_sym_instance = eval(cfg.symbol + '.' + cfg.symbol)() feat_sym = feat_sym_instance.get_feat_symbol(cfg) aggr_sym = aggr_sym_instance.get_plot_symbol(cfg) # set up class names num_classes = 2 classes = ['__background__', 'smoke'] # load demo data image_names = sorted( glob.glob(cur_path + '/../data/IR_smoke/Data/VID/val/8/*.png')) output_dir = cur_path + '/../demo/rfcn_fgfa_8_agg_1/' if not os.path.exists(output_dir): os.makedirs(output_dir) data = [] for im_name in image_names: assert os.path.exists(im_name), ('%s does not exist'.format(im_name)) im = cv2.imread(im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) target_size = cfg.SCALES[0][0] max_size = cfg.SCALES[0][1] im, im_scale = resize(im, target_size, max_size, stride=cfg.network.IMAGE_STRIDE) im_tensor = transform(im, cfg.network.PIXEL_MEANS) im_info = np.array( [[im_tensor.shape[2], im_tensor.shape[3], im_scale]], dtype=np.float32) feat_stride = float(cfg.network.RCNN_FEAT_STRIDE) data.append({ 'data': im_tensor, 'im_info': im_info, 'data_cache': im_tensor, 'feat_cache': im_tensor }) # get predictor print 'get-predictor' data_names = ['data', 'im_info', 'data_cache', 'feat_cache'] label_names = [] t1 = time.time() data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))] max_data_shape = [[ ('data', (1, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES]))), ('data_cache', (6, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES]))), ('feat_cache', ((6, cfg.network.FGFA_FEAT_DIM, np.ceil(max([v[0] for v in cfg.SCALES]) / feat_stride).astype(np.int), np.ceil(max([v[1] for v in cfg.SCALES]) / feat_stride).astype(np.int)))) ]] provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))] provide_label = [None for _ in xrange(len(data))] arg_params, aux_params = load_param(cur_path + model, 1, process=True) feat_predictors = Predictor(feat_sym, data_names, label_names, context=[mx.gpu(0)], max_data_shapes=max_data_shape, provide_data=provide_data, provide_label=provide_label, arg_params=arg_params, aux_params=aux_params) aggr_predictors = Predictor(aggr_sym, data_names, label_names, context=[mx.gpu(0)], max_data_shapes=max_data_shape, provide_data=provide_data, provide_label=provide_label, arg_params=arg_params, aux_params=aux_params) nms = py_nms_wrapper(cfg.TEST.NMS) # First frame of the video idx = 0 data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx, provide_data=[[ (k, v.shape) for k, v in zip(data_names, data[idx]) ]], provide_label=[None]) scales = [ data_batch.data[i][1].asnumpy()[0, 2] for i in xrange(len(data_batch.data)) ] all_boxes = [[[] for _ in range(len(data))] for _ in range(num_classes)] data_list = deque(maxlen=all_frame_interval) feat_list = deque(maxlen=all_frame_interval) image, feat = get_resnet_output(feat_predictors, data_batch, data_names) # append cfg.TEST.KEY_FRAME_INTERVAL padding images in the front (first frame) while len(data_list) < cfg.TEST.KEY_FRAME_INTERVAL: data_list.append(image) feat_list.append(feat) vis = False file_idx = 0 thresh = 1e-3 for idx, element in enumerate(data): data_batch = mx.io.DataBatch(data=[element], label=[], pad=0, index=idx, provide_data=[[ (k, v.shape) for k, v in zip(data_names, element) ]], provide_label=[None]) scales = [ data_batch.data[i][1].asnumpy()[0, 2] for i in xrange(len(data_batch.data)) ] if (idx != len(data) - 1): if len(data_list) < all_frame_interval - 1: image, feat = get_resnet_output(feat_predictors, data_batch, data_names) data_list.append(image) feat_list.append(feat) else: ################################################# # main part of the loop ################################################# image, feat = get_resnet_output(feat_predictors, data_batch, data_names) data_list.append(image) feat_list.append(feat) prepare_data(data_list, feat_list, data_batch) flow = plot_feature(aggr_predictors, data_batch) # print flow.shape # if (cfg.TEST.SEQ_NMS==False): if file_idx == 20: # print flow.shape # flow = flow.reshape(19, 24, -1) # print flow.shape # step = 3 # plt.quiver(np.arange(0, flow.shape[1], step), np.arange(flow.shape[0], -1, -step), # flow[::step, ::step, 0], flow[::step, ::step, 1]) # # plt.savefig(output_dir + '/' + str(i) + '.png') # plt.cla() for i in range(len(flow)): print flow[i].shape flow[i] = flow[i].reshape(19, 24, -1) print flow[i].shape step = 2 plt.quiver(np.arange(0, flow[i].shape[1], step), np.arange(flow[i].shape[0], -1, -step), flow[i][::step, ::step, 0], flow[i][::step, ::step, 1]) plt.savefig(output_dir + '/' + str(i) + '.png') plt.cla() # plt.show() # flow[i] = flow[i].reshape(-1, 19, 24) # print flow[i].shape # rgb_flow = flow2rgb(20 * flow[i], max_value=None) # to_save = (rgb_flow * 255).astype(np.uint8).transpose(1, 2, 0) # imwrite(output_dir+'/'+str(i)+'.png', to_save) break print 'testing {} '.format(str(file_idx) + '.png') file_idx += 1 else: ################################################# # end part of a video # ################################################# end_counter = 0 image, feat = get_resnet_output(feat_predictors, data_batch, data_names) while end_counter < cfg.TEST.KEY_FRAME_INTERVAL + 1: data_list.append(image) feat_list.append(feat) prepare_data(data_list, feat_list, data_batch) flow = plot_feature(aggr_predictors, data_batch) # print flow # if (cfg.TEST.SEQ_NMS == False): # save_image(output_dir, file_idx, out_im) # print 'testing {} {:.4f}s'.format(str(file_idx) + '.png', total_time / (file_idx + 1)) file_idx += 1 end_counter += 1 # break print 'done'
#Norme d'un vecteur # sqrt ( x² + y²) def vector_norm(v): x = v[0] y = v[1] r = math.sqrt(pow(x, 2) + pow(y, 2)) print(r) return r #vector_norm(v1) def cosinus_similarity(a, b): r = (scalar_product(a, b)) / (vector_norm(a) * vector_norm(b)) print(r) return r """ reduire de l'overfitting reduire la complexité du model faire de la regularisation avoir plus de donné""" cosinus_similarity(v1, v2) plt.quiver(origin, v1) plt.show()
def plot_arrows_zoom(vlm, axis_ranges=None, colors_dict=None, plot_title='', fontsize=15, figsize=(6, 6)): """ Plots a region of the embeddign with a selected number of cells and their velocities. Parameters -------- vlm: VelocytoLoom object axis_ranges: list shoul be [xmin, xmax, ymin, xmax] colors_dict: dict used for the legend, should contain colors for the cells displayed """ plt.figure(None, figsize) # this is not really a gaussian kernel but more a # gaussian distribution. we use it for local density estimation def gaussian_kernel(X, mu=0, sigma=1): return np.exp(-(X - mu)**2 / (2 * sigma**2)) / np.sqrt(2 * np.pi * sigma**2) steps = 45, 45 grs = [] for dim_i in range(vlm.embedding.shape[1]): # get the range of the embeddig distr in every dim (2) m, M = np.min(vlm.embedding[:, dim_i]), \ np.max(vlm.embedding[:, dim_i]) # widen the range slightly m = m - 0.025 * np.abs(M - m) M = M + 0.025 * np.abs(M - m) # create a grid for each dimension, spanning the range gr = np.linspace(m, M, steps[dim_i]) grs.append(gr) # build a mesh grid from these lists # the * operator unpacks the lists meshes_tuple = np.meshgrid(*grs) # put the gridpoint coordinates in vectors and stack them together gridpoint_coordinates = np.vstack([i.flat \ for i in meshes_tuple]).T # add some normal noise to the gridpoint coordinates gridpoint_coordinates = gridpoint_coordinates + \ norm.rvs(loc = 0, scale = 0, size = gridpoint_coordinates.shape) nn = NearestNeighbors() # train the nearest neighboors classifier nn.fit(vlm.embedding) # for each grid point, we find the 20 closest cells aroundit dist, ixs = nn.kneighbors(gridpoint_coordinates, 20) ix_choice = ixs[:, 0].flat[:] ix_choice = np.unique(ix_choice) # refine the choice of cells based on the local density nn = NearestNeighbors() nn.fit(vlm.embedding) # for our previously chosen cells, find nearest neighbors # in the full data set dist, ixs = nn.kneighbors(vlm.embedding[ix_choice], 20) # estimate the density around each of our chosen cells density_extimate = gaussian_kernel(dist, mu=0, sigma=0.5).sum(1) # find dense regions bool_density = density_extimate > np.percentile(density_extimate, 25) ix_choice = ix_choice[bool_density] # plot all points blurry plt.scatter(vlm.embedding[:, 0], vlm.embedding[:, 1], c=vlm.colorandum, alpha=0.2, s=120, edgecolor="") # plot the selected points plt.scatter(vlm.embedding[ix_choice, 0], vlm.embedding[ix_choice, 1], c=vlm.colorandum[ix_choice], alpha=1, s=120, edgecolor="k") # plot arrows for these cells quiver_kwargs = dict(scale=6.8, headaxislength=9, headlength=15, headwidth=14, linewidths=0.4, edgecolors="k", color="k", alpha=1) plt.quiver(vlm.embedding[ix_choice, 0], vlm.embedding[ix_choice, 1], vlm.delta_embedding[ix_choice, 0], vlm.delta_embedding[ix_choice, 1], **quiver_kwargs) # focus on one region if axis_ranges is not None: plt.xlim(axis_ranges[:2]) plt.ylim(axis_ranges[2:]) despline() plt.title(plot_title) if colors_dict is not None: plot_legend(colors_dict, fontsize=fontsize) plt.plot()
import pandas as pd import matplotlib.pyplot as plt import numpy as np plt.style.use('seaborn-whitegrid') data = pd.read_csv("perturbation.csv", sep=';') df = pd.DataFrame(data=data) df['err_line'] = df['err_line'] / 1250 # Full view new_df = df.loc[df['test_nb'] == 4] u = list(new_df['t'].values) u = [y - x for x, y in zip(u, u[1:])] u.append(0) v = list(new_df['err_line'].values) v = [y - x for x, y in zip(v, v[1:])] v.append(0) plt.quiver(new_df['t'], new_df['err_line'], u, v, color='blue') plt.ylim([-3, 3]) plt.xlabel('Time [seconds]', fontsize=13) plt.ylabel('Error line [cm]', fontsize=13) plt.legend(fontsize=12) plt.tick_params(axis='both', which='major', labelsize=11) plt.savefig('big-perturbation-right.png') plt.show()
def main(): # Dataset. ts_, ts_ext_, ts_vis_, ts, ts_ext, ts_vis, ys, ys_ = make_data() summary = SummaryWriter(os.path.join(args.train_dir, 'tb')) # Plotting parameters. vis_batch_size = 1024 ylims = (-1.75, 1.75) alphas = [0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50, 0.55] percentiles = [0.999, 0.99, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] vis_idx = np.random.permutation(vis_batch_size) if args.color == "blue": fill_color = '#9ebcda' mean_color = '#4d004b' num_samples = 60 else: sample_colors = ('#fc4e2a', '#e31a1c', '#bd0026') fill_color = '#fd8d3c' mean_color = '#800026' num_samples = len(sample_colors) eps = torch.randn(vis_batch_size, 1).to( device) # Fix seed for the random draws used in the plots. bm = torchsde.BrownianInterval( t0=ts_vis[0], t1=ts_vis[-1], size=(vis_batch_size, 1), device=device, levy_area_approximation='space-time', pool_size=POOL_SIZE, cache_size=CACHE_SIZE, ) # We need space-time Levy area to use the SRK solver # Model. # Note: This `mu` is selected based on the yvalue of the two endpoints of the left and right segments. model = LatentSDE(mu=-0.80901699, sigma=args.sigma).to(device) optimizer = make_optimizer(optimizer=args.optimizer, params=model.parameters(), lr=args.lr) scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=.99997) kl_scheduler = LinearScheduler(iters=args.kl_anneal_iters, maxval=args.kl_coeff) nll_scheduler = ConstantScheduler(constant=args.nll_coef) logpy_metric = EMAMetric() kl_metric = EMAMetric() loss_metric = EMAMetric() if os.path.exists(os.path.join(args.train_dir, 'ckpts', f'state.ckpt')): logging.info("Loading checkpoints...") checkpoint = torch.load( os.path.join(args.train_dir, 'ckpts', f'state.ckpt')) model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) try: logpy_metric.set(checkpoint['logpy_metric']) kl_metric.set(checkpoint['kl_metric']) loss_metric.set(checkpoint['loss_metric']) except: logging.warning( f"Could not successfully load logpy, kl, and loss metrics from checkpoint" ) logging.info( f"Successfully loaded checkpoints at global_step {checkpoint['global_step']}" ) if args.show_prior: with torch.no_grad(): zs = model.sample_p(ts=ts_vis, batch_size=vis_batch_size, eps=eps, bm=bm).squeeze() ts_vis_, zs_ = ts_vis.cpu().numpy(), zs.cpu().numpy() zs_ = np.sort(zs_, axis=1) img_dir = os.path.join(args.train_dir, 'prior.png') plt.subplot(frameon=False) for alpha, percentile in zip(alphas, percentiles): idx = int((1 - percentile) / 2. * vis_batch_size) zs_bot_ = zs_[:, idx] zs_top_ = zs_[:, -idx] plt.fill_between(ts_vis_, zs_bot_, zs_top_, alpha=alpha, color=fill_color) # `zorder` determines who's on top; the larger the more at the top. plt.scatter(ts_, ys_[:, 0], marker='x', zorder=3, color='k', s=35) # Data. if args.data != "irregular_sine": plt.scatter(ts_, ys_[:, 1], marker='x', zorder=3, color='k', s=35) # Data. plt.ylim(ylims) plt.xlabel('$t$') plt.ylabel('$Y_t$') plt.tight_layout() plt.savefig(img_dir, dpi=args.dpi) summary.add_figure('Prior', plt.gcf(), 0) logging.info(f'Prior saved to tensorboard') plt.close() logging.info(f'Saved prior figure at: {img_dir}') for global_step in tqdm.tqdm(range(args.train_iters)): # Plot and save. if global_step % args.pause_iters == 0 or global_step == ( args.train_iters - 1): img_path = os.path.join(args.train_dir, "plots", f'global_step_{global_step}.png') with torch.no_grad(): # TODO: zs = model.sample_q(ts=ts_vis, batch_size=vis_batch_size, eps=None, bm=bm).squeeze() samples = zs[:, vis_idx] ts_vis_, zs_, samples_ = ts_vis.cpu().numpy(), zs.cpu().numpy( ), samples.cpu().numpy() zs_ = np.sort(zs_, axis=1) plt.subplot(frameon=False) if args.show_percentiles: for alpha, percentile in zip(alphas, percentiles): idx = int((1 - percentile) / 2. * vis_batch_size) zs_bot_, zs_top_ = zs_[:, idx], zs_[:, -idx] plt.fill_between(ts_vis_, zs_bot_, zs_top_, alpha=alpha, color=fill_color) if args.show_mean: plt.plot(ts_vis_, zs_.mean(axis=1), color=mean_color) if args.show_samples: for j in range(num_samples): plt.plot(ts_vis_, samples_[:, j], linewidth=1.0) if args.show_arrows: t_start, t_end = ts_vis_[0], ts_vis_[-1] num, dt = 12, 0.12 t, y = torch.meshgrid([ torch.linspace(t_start, t_end, num).to(device), torch.linspace(*ylims, num).to(device) ]) t, y = t.reshape(-1, 1), y.reshape(-1, 1) fty = model.f(t=t, y=y).reshape(num, num) dt = torch.zeros(num, num).fill_(dt).to(device) dy = fty * dt dt_, dy_, t_, y_ = dt.cpu().numpy(), dy.cpu().numpy( ), t.cpu().numpy(), y.cpu().numpy() plt.quiver(t_, y_, dt_, dy_, alpha=0.3, edgecolors='k', width=0.0035, scale=50) if args.hide_ticks: plt.xticks([], []) plt.yticks([], []) plt.scatter(ts_, ys_[:, 0], marker='x', zorder=3, color='k', s=35) # Data. if args.data != "irregular_sine": plt.scatter(ts_, ys_[:, 1], marker='x', zorder=3, color='k', s=35) # Data. plt.ylim(ylims) plt.xlabel('$t$') plt.ylabel('$Y_t$') plt.tight_layout() if global_step % args.save_fig == 0: plt.savefig(img_path, dpi=args.dpi) current_fig = plt.gcf() summary.add_figure('Predictions plot', current_fig, global_step) logging.info(f'Predictions plot saved to tensorboard') plt.close() logging.info(f'Saved figure at: {img_path}') if args.save_ckpt: torch.save( { 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, os.path.join(args.train_dir, 'ckpts', f'global_step_{global_step}.ckpt')) # for preemption torch.save( { 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'global_step': global_step, 'logpy_metric': logpy_metric.val, 'kl_metric': kl_metric.val, 'loss_metric': loss_metric.val }, os.path.join(args.train_dir, 'ckpts', f'state.ckpt')) # Train. optimizer.zero_grad() zs, kl = model(ts=ts_ext, batch_size=args.batch_size) zs = zs.squeeze() zs = zs[ 1: -1] # Drop first and last which are only used to penalize out-of-data region and spread uncertainty. likelihood_constructor = { "laplace": distributions.Laplace, "normal": distributions.Normal, "cauchy": distributions.Cauchy }[args.likelihood] likelihood = likelihood_constructor(loc=zs, scale=args.scale) # Proper summation of log-likelihoods. logpy = 0. ys_split = ys.split(split_size=1, dim=-1) for _ys in ys_split: logpy = logpy + likelihood.log_prob(_ys).sum(dim=0).mean(dim=0) logpy = logpy / len(ys_split) loss = -logpy * nll_scheduler.val + kl * kl_scheduler.val loss.backward() optimizer.step() scheduler.step() kl_scheduler.step() nll_scheduler.step(global_step) logpy_metric.step(logpy) kl_metric.step(kl) loss_metric.step(loss) logging.info(f'global_step: {global_step}, ' f'logpy: {logpy_metric.val:.3f}, ' f'kl: {kl_metric.val:.3f}, ' f'loss: {loss_metric.val:.3f}') summary.add_scalar('KL Schedler', kl_scheduler.val, global_step) summary.add_scalar('NLL Schedler', nll_scheduler.val, global_step) summary.add_scalar('Loss', loss_metric.val, global_step) summary.add_scalar('KL', kl_metric.val, global_step) summary.add_scalar('Log(py) Likelihood', logpy_metric.val, global_step) logging.info(f'Logged loss, kl, logpy to tensorboard') summary.close()
def plot_arrows_embedding_smg(self, choice="auto", quiver_scale="auto", color_arrow='cluster', scale_type="relative", new_fig=False, plot_random=True, scatter_kwargs={}, plot_scatter=False, **quiver_kwargs): """Plots velocity on the embedding cell-wise Arguments --------- choice: int, default = "auto" the number of cells to randomly pick to plot the arrows (To avoid overcrowding) quiver_scale: float, default="auto" Rescaling factor applied to the arrow field to enhance visibility If "auto" the scale is selected using the randomized (negative) control (even if `plot_random`=False) If a float is provided the interpretation of the value depends on the parameter `scale_type`, see below. NOTE: Despite a similar option than plot_grid_arrows, here there is no strong motivation to calculate the scale relative to the randomized control This is because the randomized doesn't have to have smaller velocity cell-wise, there might be for example scattered cells that will have strong velocity but they will, correctly just average out when calculating the average velocity field. scale_type: str, default="relative" How to interpret `quiver_scale`: If "relative" (default) the value will be used as a scaling factor and multiplied by the value from "auto" (it follows that quiver_scale="auto" is equivalent to quiver_scale=1) If "absolute" the value will be passed to the matplotlib quiver function plot_scatter: bool, default = False whether to plot the points scatter_kwargs: Dict A dictionary containing all the keywords arguments to pass to matplotlib scatter by default the following are passed: c="0.8", alpha=0.4, s=10, edgecolor=(0, 0, 0, 1), lw=0.3. But they can be overridden. color_arrow: str, default = "cluster" the color of the arrows, if "cluster" the arrows are colored the same as the cluster new_fig: bool, default=False whether to create a new figure plot_random: bool, default=True whether to plot the randomized control next to the plot **quiver_kwargs: dict keyword arguments to pass to quiver By default the following are passed angles='xy', scale_units='xy', minlength=1.5. But they can be overridden. Returns ------- Nothing, just plots the tsne with arrows """ if choice == "auto": choice = int(self.S.shape[1] / 3) logging.warning( f"Only {choice} arrows will be shown to avoid overcrowding, you can choose the exact number setting the `choice` argument" ) _quiver_kwargs = {"angles": 'xy', "scale_units": 'xy', "minlength": 1.5} _scatter_kwargs = dict(c="0.8", alpha=0.4, s=10, edgecolor=(0, 0, 0, 1), lw=0.3) _scatter_kwargs.update(scatter_kwargs) if new_fig: if plot_random and hasattr(self, "delta_embedding_random"): plt.figure(figsize=(22, 12)) else: plt.figure(figsize=(14, 14)) ix_choice = np.random.choice(self.embedding.shape[0], size=choice, replace=False) # Determine quiver scale if scale_type == "relative": if hasattr(self, "delta_embedding_random"): plot_scale = np.linalg.norm( np.max(self.flow_grid, 0) - np.min(self.flow_grid, 0), 2) # Diagonal of the plot arrows_scale = np.percentile( np.linalg.norm(self.delta_embedding_random, 2, 1), 80) # Tipical length of an arrow if quiver_scale == "auto": quiver_scale = arrows_scale / (plot_scale * 0.005) else: quiver_scale = quiver_scale * arrows_scale / (plot_scale * 0.005) else: raise ValueError( """`scale_type` was set to 'relative' but the randomized control was not computed when running estimate_transition_prob Please run estimate_transition_prob or set `scale_type` to `absolute`""" ) else: logging.warning( "The arrow scale was set to be 'absolute' make sure you know how to properly interpret the plots" ) if color_arrow == "cluster": colorandum = self.colorandum[ix_choice, :] else: colorandum = color_arrow _quiver_kwargs.update({"color": colorandum}) _quiver_kwargs.update(quiver_kwargs) if plot_random and hasattr(self, "delta_embedding_random"): plt.subplot(122) plt.title("Randomized") if plot_scatter: plt.scatter(self.embedding[:, 0], self.embedding[:, 1], **_scatter_kwargs) plt.quiver(self.embedding[ix_choice, 0], self.embedding[ix_choice, 1], self.delta_embedding_random[ix_choice, 0], self.delta_embedding_random[ix_choice, 1], scale=quiver_scale, **_quiver_kwargs) plt.axis("off") plt.subplot(121) plt.title("Data") if plot_scatter: plt.scatter(self.embedding[:, 0], self.embedding[:, 1], **_scatter_kwargs) plt.quiver(self.embedding[ix_choice, 0], self.embedding[ix_choice, 1], self.delta_embedding[ix_choice, 0], self.delta_embedding[ix_choice, 1], scale=quiver_scale, **_quiver_kwargs) patches = [] for i in range(len(np.unique(self.ca['SampleID']))): patches.append( mpatches.Patch(color=cm.get_cmap('Set2')(i), label=f'Time {i+1}')) plt.legend(handles=patches)
plt.xlabel("x1") plt.ylabel("x2") plt.title("Plot of 2D synthetic data") #scatter plot of data samples plt.show() #(b) print("\n(b)") eigval, eigvec = np.linalg.eig(np.cov(D.T)) print("Eigenvalues: ",eigval) print("Eigenvectors: \n",eigvec) origin=[0,0] plt.scatter(x,y,marker='x',c='blue') plt.xlabel("x1") plt.ylabel("x2") plt.title("Plot of 2D synthetic data and eigen directions") plt.quiver(*origin,*eigvec[:,0],color="red",scale=7) #Eigen direction1 plt.quiver(*origin,*eigvec[:,1],color="red",scale=3) #Eigen direction2 plt.show() #(c) print("\n(c)") def projection(d,v): a=[] for i in range(len(d)): proj=(np.dot(d[i],v)/(np.dot(v,v)))*v a.append(proj.tolist()) return(np.array(a)) a1=projection(D,eigvec[:,0]) #projecting data on to first eigen direction a2=projection(D,eigvec[:,1]) #projecting data on to second eigen direction projc=np.dot(D,eigvec) #projection of whole data
data2 = loadmat('../data/ex7data1.mat') X2 = data2['X'] scaler = StandardScaler() scaler.fit(X2) U, S, V = linalg.svd(scaler.transform(X2).T) print(U) print(S) plt.scatter(X2[:, 0], X2[:, 1], s=30, edgecolors='b', facecolors='None', linewidth=1) # setting aspect ratio to 'equal' in order to show orthogonality of principal components in the plot plt.gca().set_aspect('equal') plt.quiver(scaler.mean_[0], scaler.mean_[1], U[0, 0], U[0, 1], scale=S[0], color='r') plt.quiver(scaler.mean_[0], scaler.mean_[1], U[1, 0], U[1, 1], scale=S[1], color='r') plt.show() print(S[1])
def plot_grid_arrows_smg(self, quiver_scale="auto", scale_type="relative", min_mass=1, min_magnitude=None, scatter_kwargs_dict=None, plot_dots=False, plot_random=False, **quiver_kwargs): """Plots vector field averaging velocity vectors on a grid Arguments --------- quiver_scale: float, default="auto" Rescaling factor applied to the arrow field to enhance visibility If "auto" the scale is selected using the randomized (negative) control (even if `plot_random`=False) If a float is provided the interpretation of the value depends on the parameter `scale_type`, see below. NOTE: In the situation where "auto" is resulting in very small or big velocities, pass a float to this parameter The float will be interpreted as a scaling, importantly both the data and the control will be scaled in this way you can rescale the velocity arbitrarily without the risk of observing just an overfit of the noise scale_type: str, default="relative" How to interpret `quiver_scale`: If "relative" (default) the value will be used as a scaling factor and multiplied by the value from "auto" (it follows that quiver_scale="auto" is equivalent to quiver_scale=1) If "absolute" the value will be passed to the matplotlib quiver function (not recommended if you are not sure what this implies) min_mass: float, default=1 the minimum density around a grid point for it to be considered and plotted min_magnitude: float, default=None the minimum magnitude of the velocity for it to be considered and plotted scatter_kwargs_dict: dict, default=None a dictionary of keyword arguments to pass to scatter by default the following are passed: s=20, zorder=-1, alpha=0.2, lw=0, c=self.colorandum. But they can be overridden. plot_dots: bool, default=True whether to plot dots in correspondence of all low velocity grid points plot_random: bool, default=True whether to plot the randomized control next to the plot **quiver_kwargs: dict keyword arguments to pass to quiver By default the following are passed angles='xy', scale_units='xy', minlength=1.5. But they can be overridden. """ # plt.figure(figsize=(10, 10)) _quiver_kwargs = {"angles": 'xy', "scale_units": 'xy', "minlength": 1.5} _quiver_kwargs.update(quiver_kwargs) scatter_dict = { "s": 20, "zorder": -1, "alpha": 0.2, "lw": 0, "c": self.colorandum } if scatter_kwargs_dict is not None: scatter_dict.update(scatter_kwargs_dict) # Determine quiver scale if scale_type == "relative": if hasattr(self, "flow_rndm"): plot_scale = np.linalg.norm( np.max(self.flow_grid, 0) - np.min(self.flow_grid, 0), 2) # Diagonal of the plot arrows_scale = np.percentile( np.linalg.norm( self.flow_rndm[self.total_p_mass >= min_mass, :], 2, 1), 90) # Tipical lenght of an arrow if quiver_scale == "auto": quiver_scale = arrows_scale / (plot_scale * 0.0025) else: quiver_scale = quiver_scale * arrows_scale / (plot_scale * 0.0025) else: raise ValueError( """"`scale_type` was set to 'relative' but the randomized control was not computed when running estimate_transition_prob Please run estimate_transition_prob or set `scale_type` to `absolute`""" ) else: logging.warning( "The arrow scale was set to be 'absolute' make sure you know how to properly interpret the plots" ) mass_filter = self.total_p_mass < min_mass if min_magnitude is None: XY, UV = np.copy(self.flow_grid), np.copy(self.flow) if not plot_dots: UV = UV[~mass_filter, :] XY = XY[~mass_filter, :] else: UV[mass_filter, :] = 0 else: XY, UV = np.copy(self.flow_grid), np.copy(self.flow_norm) if not plot_dots: UV = UV[~(mass_filter | (self.flow_norm_magnitude < min_magnitude)), :] XY = XY[~(mass_filter | (self.flow_norm_magnitude < min_magnitude)), :] else: UV[mass_filter | (self.flow_norm_magnitude < min_magnitude), :] = 0 if plot_random: if min_magnitude is None: XY, UV_rndm = np.copy(self.flow_grid), np.copy(self.flow_rndm) if not plot_dots: UV_rndm = UV_rndm[~mass_filter, :] XY = XY[~mass_filter, :] else: UV_rndm[mass_filter, :] = 0 else: XY, UV_rndm = np.copy(self.flow_grid), np.copy(self.flow_norm_rndm) if not plot_dots: UV_rndm = UV_rndm[ ~(mass_filter | (self.flow_norm_magnitude_rndm < min_magnitude)), :] XY = XY[~(mass_filter | (self.flow_norm_magnitude_rndm < min_magnitude)), :] else: UV_rndm[mass_filter | (self.flow_norm_magnitude_rndm < min_magnitude), :] = 0 plt.subplot(122) plt.title("Randomized") plt.scatter(self.flow_embedding[:, 0], self.flow_embedding[:, 1], **scatter_dict) plt.quiver(XY[:, 0], XY[:, 1], UV_rndm[:, 0], UV_rndm[:, 1], scale=quiver_scale, zorder=20000, **_quiver_kwargs) plt.axis("off") plt.subplot(121) plt.title("Data") plt.scatter(self.flow_embedding[:, 0], self.flow_embedding[:, 1], **scatter_dict) plt.quiver(XY[:, 0], XY[:, 1], UV[:, 0], UV[:, 1], scale=quiver_scale, zorder=20000, **_quiver_kwargs)
import pickle as pkl import matplotlib.pyplot as plt import numpy as np infile = './Policy Hard Iter 22 Policy Map.pkl' with open(infile,'rb') as f: arr = pkl.load(f, encoding='latin1') lookup = {'None': (0,0), '>': (1,0), 'v': (0,-1), '^':(0,1), '<':(-1,0)} n= len(arr) arr = np.array(arr) X, Y = np.meshgrid(range(1,n+1), range(1,n+1)) U = X.copy() V = Y.copy() for i in range(n): for j in range(n): U[i,j]=lookup[arr[n-i-1,j]][0] V[i,j]=lookup[arr[n-i-1,j]][1] plt.figure() #plt.title('Arrows scale with plot width, not view') Q = plt.quiver(X, Y, U, V,headaxislength=5,pivot='mid',angles='xy', scale_units='xy', scale=1) plt.xlim((0,n+1)) plt.ylim((0,n+1)) plt.tight_layout()
def plot_direction_field(f, x_range, y_range, ode_desc=None, xaxis="x", yaxis="y", res=20, x_res=None, y_res=None, arrow_len=1.0, integral_curves_at=[], curves_res=1000): """ Plot a dircetion field of a first oder ODE y' = f(x, y) and some integral curves in that dircetion field. Arguments: f: Some function witch computes the right hand side of the ODE. x_range: The range of x values to plot (x_min, x_max). y_range: The range of y values to plot (y_min, y_max). Options: ode_desc=None: A textual description of the ode (e.g. a formula or a name). (if ode_desc is None, no title is set) xaxis="x": Name of the x axis. yaxis"y": Name of the y axis. res=20: Number of arrows to draw per axis. res is ignored if a resultion of a specific axis is set. arrow_len=1.0: Length of the arrows to draw. integral_curves_at=[]: A list of (x_0, y_0) tuples as starting points for the integral curves. The integral curves are drawn both forward and backward in x. curves_res=1000: Number of points to use for the computation of the integral curves. (for forward and backward each) """ # set up all the labels if ode_desc is not None: plt.title("Direction field of {}".format(ode_desc)) plt.xlabel(xaxis) plt.ylabel(yaxis) x_res = res if x_res is None else x_res y_res = res if y_res is None else y_res # plot the dircetion field x = np.linspace(x_range[0], x_range[1], x_res) y = np.linspace(y_range[0], y_range[1], y_res) X, Y = np.meshgrid(x, y) F = f(X, Y) arrow_angle = np.arctan(F) arrow_x = arrow_len * np.cos(arrow_angle) arrow_y = arrow_len * np.sin(arrow_angle) plt.quiver(X, Y, arrow_x, arrow_y) # plot all integral curves g = lambda y, x: f(x, y) plt.ylim(y_range) for (x0, y0) in integral_curves_at: # backward in x xs_backward = np.linspace(x0, x_range[0], curves_res) ys_backward = odeint(g, y0, xs_backward)[:,0] plt.plot(xs_backward, ys_backward, 'r') # forward in x xs_forward = np.linspace(x0, x_range[1], curves_res) ys_forward = odeint(g, y0, xs_forward)[:,0] plt.plot(xs_forward, ys_forward, 'r')
from models import gaussian as model import numpy as np import matplotlib.pyplot as plt from palettable import colorbrewer #plt.rcParams['image.cmap'] = colorbrewer.sequential.Blues_9.mpl_colormap cmap = colorbrewer.sequential.Blues_9.mpl_colormap if __name__ == "__main__": nx,dx = 32, 2. flux = 3. fwhm = 37. p = 0.052 m = model(flux,fwhm,p,nx,dx) # plot model psi = np.arctan(m.U/m.Q)/2. plt.imshow(m.I,cmap=cmap) plt.quiver(np.cos(psi)*m.I,np.sin(m.I),headwidth=0) #plt.ion() plt.show()
return [-sx2*a1-sx*a2+sxy,-sx*a1-len(x)*a2+sy] #return [-16*a1-8*a2+3,-8*a1-4*a2+1] y1=np.linspace(-15,15,100) y2=np.linspace(-15,15,100) t=0 Y1,Y2=np.meshgrid(y1,y2) u,v=np.zeros(Y1.shape),np.zeros(Y2.shape) NI,NJ=Y1.shape for i in range(NI): for j in range(NJ): xx=Y1[i,j] yy=Y2[i,j] yprime=f([xx,yy],t) u[i,j]=yprime[0]/np.sqrt(np.square(yprime[0])+np.square(yprime[1])) v[i,j]=yprime[1]/np.sqrt(np.square(yprime[0])+np.square(yprime[1])) Q=plt.quiver(Y1,Y2,u,v,color='b',headwidth=1,headlength=3) plt.savefig('plot1.png',dpi=1000) y0=[[-10,-10],[-5,-10],[0,-10],[5,-10],[10,-10],[-10,10],[-5,10],[0,10],[5,10],[10,10],[-10,-5],[-10,0],[-10,5],[-10,10],[10,-5],[10,0],[10,5],[10,10]] tspan=np.linspace(0,100,100) for ya in y0: ys=odeint(f,ya,tspan) plt.plot(ys[:,0],ys[:,1],'r-') plt.plot(ys[-1,0],ys[-1,1],'o') plt.savefig('plot2.png',dpi=1000) plt.show()
plt.figure(figsize=(10, 8)) plt.contourf(lon31g,lat31g,ssh_GOFS[:,:],cmap=cmocean.cm.curl,**kw) cbar = plt.colorbar() cbar.ax.tick_params(labelsize=16) plt.axis('equal') plt.xlim(-98,-79.5) plt.ylim(15,32.5) plt.title('GOFS 3.1 SSH and surface velocity on '+str(time_GOFS)[0:13],size=22,y=1.03) plt.xticks(fontsize=16) plt.yticks(fontsize=16) cbar.ax.set_ylabel('meters',fontsize=16) plt.contour(bath_lon,bath_lat,bath_elev,[0],colors='k') plt.contourf(bath_lon,bath_lat,bath_elev,[0,10000],colors='seashell') plt.quiver(lon31g[::5],lat31g[::5],su_GOFS[::5,::5],sv_GOFS[::5,::5] ,scale=3,scale_units='inches',\ alpha=0.7) file = folder_fig + 'GOFS_SSH_GoMex_' + str(time_GOFS)[0:13] plt.savefig(file,bbox_inches = 'tight',pad_inches = 0.1) #%% Figure sst at 200 meters kw = dict(levels = np.linspace(10,25,31)) okdepth = np.where(depth_GOFS >= 200)[0][0] temp_200_GOFS = np.asarray(GOFS_ts['water_temp'][oktime_GOFS,okdepth,botm:top,left:right]) su_GOFS = GOFS_uv['water_u'][oktime_GOFS,okdepth,botm:top,left:right] sv_GOFS = GOFS_uv['water_v'][oktime_GOFS,okdepth,botm:top,left:right] plt.figure(figsize=(10, 8)) plt.contourf(lon31g,lat31g,temp_200_GOFS,cmap=cmocean.cm.thermal,**kw) cbar = plt.colorbar()
def create_plot(snapshot, folder, output_folder='.', output_plot=True, show_plot=False): """ Creates a plot as per instructions inside the function. As of 10.13.20 this was a plot of ECM-organoid simulations: a base layer of a contour plot of either the anisotropy or the oxygen, the cells in the smulation as a scatter plot, and finally the ECM orientation overlaid with a quiver plot. Parameters ---------- snapshot : Base name of PhysiCell output files - eg 'output00000275' --> 'output' + '%08d' folder : str Path to input data output_folder : str Path for image output output_plot : bool True = image file will be made. Image output is required for movie production. show_plot : bool True = plot is displayed. Expected to be false for large batches. Returns ------- Nothing : Produces a png image from the input PhysiCell data. """ #################################################################################################################### #################################### Load data ######################## #################################################################################################################### # load cell and microenvironment data mcds = pyMCDS(snapshot + '.xml', folder) # loads and reads ECM data mcds.load_ecm(snapshot + '_ECM.mat', folder) # Get cell positions and attributes, microenvironment, and ECM data for plotting. # Cells cell_df = mcds.get_cell_df() #### Diffusion microenvironment xx, yy = mcds.get_2D_mesh() # Mesh plane_oxy = mcds.get_concentrations('oxygen', 0.0) # Oxyen (used for contour plot) #### ECM microenvironment xx_ecm, yy_ecm = mcds.get_2D_ECM_mesh() # Mesh plane_anisotropy = mcds.get_ECM_field('anisotropy', 0.0) # Anistropy (used for scaling and contour plot) # plane_anisotropy = micro # Used for contour plot #################################################################################################################### #################################### Preprocessing ######################## #################################################################################################################### #### Helper varialbes and functions ###### # Number of contours (could include as a parameter) num_levels = 10 # 25 works well for ECM, 38 works well for oxygen # Make levels for contours levels_o2 = np.linspace(1e-14, 38, num_levels) # levels_ecm = np.linspace(1e-14, 1.0, num_levels) levels_ecm = np.linspace(0.90, 0.93, num_levels) # for the march environment - need to especially highlight small changes in anistoropy. # Old function and scripting to scale and threshold anisotorpy values for later use in scaling lenght of ECM fibers # for visualization purposes. # micro = plane_anisotropy # micro_scaled = micro # # def curve(x): # #return (V_max * x) / (K_M + x) # return 0.5 if x > 0.5 else x # for i in range(len(micro)): # for j in range(len(micro[i])): # #micro_scaled[i][j] = 10 * math.log10(micro[i][j] + 1) / math.log10(2) # micro_scaled[i][j] = curve(micro[i][j]) ##### Process data for plotting - weight fibers by anisotropy, mask out 0 anisotropy ECM units, get cell radii and types # Anisotropy strictly runs between 0 and 1. Element by element mulitplication produces weighted lengths between 0 - 1 # for vizualization scaled_ECM_x = np.multiply(mcds.data['ecm']['ECM_fields']['x_fiber_orientation'][:, :, 0], plane_anisotropy) scaled_ECM_y = np.multiply(mcds.data['ecm']['ECM_fields']['y_fiber_orientation'][:, :, 0], plane_anisotropy) # if we want the arrows the same length instead ECM_x = mcds.data['ecm']['ECM_fields']['x_fiber_orientation'][:, :, 0] ECM_y = mcds.data['ecm']['ECM_fields']['y_fiber_orientation'][:, :, 0] # mask out zero vectors mask = plane_anisotropy > 0.0001 # get unique cell types and radii cell_df['radius'] = (cell_df['total_volume'].values * 3 / (4 * np.pi)) ** (1 / 3) types = cell_df['cell_type'].unique() colors = ['yellow', 'blue'] #################################################################################################################### #################################### Plotting ######################## #################################################################################################################### # start plot and make correct size fig, ax = plt.subplots(figsize=(12, 12)) plt.ylim(-500, 500) plt.xlim(-500, 500) # add contour layer # cs = plt.contourf(xx, yy, plane_oxy, cmap="Greens_r", levels=levels_o2) cs = plt.contourf(xx_ecm, yy_ecm, plane_anisotropy, cmap="YlGnBu", levels=levels_ecm) # Add cells layer for i, ct in enumerate(types): plot_df = cell_df[cell_df['cell_type'] == ct] for j in plot_df.index: circ = Circle((plot_df.loc[j, 'position_x'], plot_df.loc[j, 'position_y']), radius=plot_df.loc[j, 'radius'], alpha=0.7, edgecolor='black') ax.add_artist(circ) # add quiver layer with scaled arrows ### # q = ax.quiver(xx_ecm[mask], yy_ecm[mask], scaled_ECM_x[mask], scaled_ECM_y[mask], pivot='middle', angles='xy', scale_units='inches', scale=2.0, headwidth=0, # width=0.0015) ## What is the deal with the line segment lengths shifting as the plots progress when I don't ue teh scaling?? # add ECM orientation vectors unscaled by anistorpy ### plt.quiver(xx, yy, ECM_x, ECM_y, pivot='middle', angles='xy', scale_units='inches', scale=3.0, headwidth=0) # ax.axis('scaled') #used to be 'equal' https://stackoverflow.com/questions/45057647/difference-between-axisequal-and-axisscaled-in-matplotlib # This changes teh axis from -750,750 to ~-710,730. It looks better with scaled compared to axix, but either way it changes the plot limits # Labels and title (will need removed for journal - they will be added manually) ax.set_xlabel('x [micron]') ax.set_ylabel('y [micron]') fig.colorbar(cs, ax=ax) plt.title(snapshot) # Carefully place the command to make the plot square AFTER the color bar has been added. ax.axis('scaled') fig.tight_layout() plt.xticks(fontsize=20) plt.yticks(fontsize=20) plt.ylim(-500, 500) plt.xlim(-500, 500) # Plot output if output_plot is True: plt.savefig(output_folder + snapshot + '.png') if show_plot is True: plt.show()