def demo(): from pylab import hold, linspace, plot, show hold(True) #y = [9,6,1,3,8,4,2] #y = [9,11,13,3,-2,0,2] y = [9,11,2,3,8,0,2] #y = [9,9,1,3,8,2,2] xeq = linspace(0,1,len(y)) x = xeq+0 #x[1],x[-2] = x[0],x[-1] #x[1],x[-2] = x[2],x[-3] #x[1],x[2] = x[2],x[1] #x[1],x[-2] = x[2]-0.001,x[-2]+0.001 #x[1],x[-2] = x[1]-x[1]/2,x[-1]-x[1]/2 t = linspace(x[0],x[-1],400) plot(xeq,y,':oy') plot(t,bspline(y,t,clamp=False),'-.y') # bspline plot(t,bspline(y,t,clamp=True),'-y') # bspline xt,yt = pbs(x,y,t,clamp=False) plot(xt,yt,'-.b') # pbs xt,yt = pbs(x,y,t,clamp=True) plot(xt,yt,'-b') # pbs #xt,yt = pbs(x,y,t,clamp=True, parametric=True) #plot(xt,yt,'-g') # pbs plot(sorted(x),y,':ob') show()
def zeroPaddData(self,desiredLength,paddmode='zero',where='end'): #zero padds the time domain data, it is possible to padd at the beginning, #or at the end, and further gaussian or real zero padding is possible #might not work for gaussian mode! desiredLength=int(desiredLength) #escape the function if desiredLength<0: return 0 #calculate the paddvectors if paddmode=='gaussian': paddvec=py.normal(0,py.std(self.getPreceedingNoise())*0.05,desiredLength) else: paddvec=py.ones((desiredLength,self.tdData.shape[1]-1)) paddvec*=py.mean(self.tdData[-20:,1:]) timevec=self.getTimes() if where=='end': #timeaxis: newtimes=py.linspace(timevec[-1],timevec[-1]+desiredLength*self.dt,desiredLength) paddvec=py.column_stack((newtimes,paddvec)) longvec=py.row_stack((self.tdData,paddvec)) else: newtimes=py.linspace(timevec[0]-(desiredLength+1)*self.dt,timevec[0],desiredLength) paddvec=py.column_stack((newtimes,paddvec)) longvec=py.row_stack((paddvec,self.tdData)) self.setTDData(longvec)
def contourFromFunction(XYfunction,plotPoints=100,\ xrange=None,yrange=None,numContours=20,alpha=1.0, contourLines=None): """ Given a 2D function, plots constant contours over the given range. If the range is not given, the current plotting window range is used. """ # set up x and y ranges currentAxis = pylab.axis() if xrange is not None: xvalues = pylab.linspace(xrange[0],xrange[1],plotPoints) else: xvalues = pylab.linspace(currentAxis[0],currentAxis[1],plotPoints) if yrange is not None: yvalues = pylab.linspace(yrange[0],yrange[1],plotPoints) else: yvalues = pylab.linspace(currentAxis[2],currentAxis[3],plotPoints) #coordArray = _coordinateArray2D(xvalues,yvalues) # add extra dimension to this to make iterable? # bug here! need to fix for contour plots z = map( lambda y: map(lambda x: XYfunction(x,y), xvalues), yvalues) if contourLines: pylab.contour(xvalues,yvalues,z,contourLines,alpha=alpha) else: pylab.contour(xvalues,yvalues,z,numContours,alpha=alpha)
def griddata( X, Y, Z, xl, yl, xr, yr, dx): # define grid. xi, yi = p.meshgrid( p.linspace(xl,xr, int((xr-xl)/dx)+1), p.linspace(yl,yr, int((yr-yl)/dx)+1)) # grid the data. zi = mgriddata(X,Y,Z,xi,yi) New = grid( zi, xl, yl, dx) return New
def data2fig(data, X, options, legend_title, xlabel, ylabel=r'Reachability~$\reachability$'): if options['grayscale']: colors = options['graycm'](pylab.linspace(0, 1, len(data.keys()))) else: colors = options['color'](pylab.linspace(0, 1, len(data.keys()))) fig = MyFig(options, figsize=(10, 8), xlabel=r'Sources~$\sources$', ylabel=ylabel, grid=False, aspect='auto', legend=True) for j, nhdp_ht in enumerate(sorted(data.keys())): d = data[nhdp_ht] try: mean_y = [scipy.mean(d[n]) for n in X] except KeyError: logging.warning('key \"%s\" not found, continuing...', nhdp_ht) continue confs_y = [confidence(d[n])[2] for n in X] poly = [conf2poly(X, list(numpy.array(mean_y)+numpy.array(confs_y)), list(numpy.array(mean_y)-numpy.array(confs_y)), color=colors[j])] patch_collection = PatchCollection(poly, match_original=True) patch_collection.set_alpha(0.3) patch_collection.set_linestyle('dashed') fig.ax.add_collection(patch_collection) fig.ax.plot(X, mean_y, label='$%d$' % nhdp_ht, color=colors[j]) fig.ax.set_xticks(X) fig.ax.set_xticklabels(['$%s$' % i for i in X]) fig.ax.set_ylim(0,1) fig.legend_title = legend_title return fig
def bistability_analysis(): f2_range = linspace(0, 0.4, 41) t = linspace(0, 50000, 1000) ion() ss_aBax_vals_up = [] ss_aBax_vals_down = [] for f2 in f2_range: model.parameters['Bid_0'].value = f2 * 1e-1 bax_total = 2e-1 # Do "up" portion of hysteresis plot model.parameters['aBax_0'].value = 0 model.parameters['cBax_0'].value = bax_total x = odesolve(model, t) figure('up') plot(t, x['aBax_']/bax_total) ss_aBax_vals_up.append(x['aBax_'][-1]/bax_total) # Do "down" portion of hysteresis plot model.parameters['aBax_0'].value = bax_total model.parameters['cBax_0'].value = 0 x = odesolve(model, t) figure('down') plot(t, x['aBax_']/bax_total) ss_aBax_vals_down.append(x['aBax_'][-1]/bax_total) figure() plot(f2_range, ss_aBax_vals_up, 'r') plot(f2_range, ss_aBax_vals_down, 'g')
def test_operation_approx(): def flux_qubit_potential(phi_m, phi_p): return 2 + alpha - 2 * pl.cos(phi_p)*pl.cos(phi_m) - alpha * pl.cos(phi_ext - 2*phi_p) alpha = 0.7 phi_ext = 2 * np.pi * 0.5 phi_m = pl.linspace(0, 2*np.pi, 100) phi_p = pl.linspace(0, 2*np.pi, 100) X,Y = pl.meshgrid(phi_p, phi_m) Z = flux_qubit_potential(X, Y).T # the diagram creatinos from diagram.operations.computations import multiply from diagram.ternary import AEV3DD aevdd = AEV3DD() diagram3 = aevdd.create(Z, 0, True) diagram4 = aevdd.create(Z, 0, True) aevdd_mat = multiply(diagram3, diagram4, 9).to_matrix(77, True) aevdd_mat_approx = multiply(diagram3, diagram4, 9, approximation_precision=1, in_place='1').to_matrix(27, True) pl.plt.figure() fig, ax = pl.plt.subplots() p = ax.pcolor(X/(2*pl.pi), Y/(2*pl.pi), Z, cmap=pl.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max()) cb = fig.colorbar(p, ax=ax) p = ax.pcolor(X/(2*pl.pi), Y/(2*pl.pi), Z, cmap=pl.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max()) cb = fig.colorbar(p, ax=ax) # cnt = ax.contour(Z, cmap=pl.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1]) pl.show()
def plotDistribution(self): # plot frequency count for the entire vocabulary threshold = 1000 size = len(self.listOfDict[0]) x = linspace(1, size, size) y = sorted(self.listOfDict[0].values(), reverse=True) fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) axes.plot(x, y, 'r') axes.set_xlabel('x') axes.set_ylabel('y') axes.set_title('title'); plt.show() #plot frequency count for all words with count over a given threshold (e.g. number of files read) threshold = self.numFilesRead size = len(self.listOfDict[0]) size_relevant = sum(1 for i in self.listOfDict[0].values() if i>threshold) x = linspace(1, size_relevant, size_relevant) y = sorted([i for i in self.listOfDict[0].values() if i>threshold], reverse=True) fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) axes.plot(x, y, 'r') axes.set_xlabel('x') axes.set_ylabel('y') axes.set_title('title'); plt.show()
def Plot_field_gp(): r = pl.linspace(-1.*mm,1.*mm,50) z = pl.linspace(0,g,50) X, Y = np.meshgrid(z, r) for z,r in zip(np.ravel(X), np.ravel(Y)): print z*1000,r*1000,SpaceChargeField(r,0,z,0,0,0.5*mm)*0.001
def plot_interfaces(current_data): from pylab import linspace, plot xl = linspace(xp1,xp2,100) yl = linspace(yp1,yp2,100) plot(xl,yl,'g') xl = linspace(xlimits[0],xlimits[1],100) plot(xl,0.0*xl,'b')
def mk_grid(llx, ulx, nx, lly, uly, ny): # Get the Galaxy info #galaxies = mk_galaxy_struc() galaxies = pickle.load(open('galaxies.pickle','rb')) galaxies = filter(lambda galaxy: galaxy.ston_I > 30., galaxies) galaxies = pyl.asarray(filter(lambda galaxy: galaxy.ICD_IH < 0.5, galaxies)) # Make the low mass grid first x = [galaxy.Mass for galaxy in galaxies] y = [galaxy.ICD_IH *100 for galaxy in galaxies] bins_x =pyl.linspace(llx, ulx, nx) bins_y = pyl.linspace(uly, lly, ny) grid = [] for i in range(bins_x.size-1): xmin = bins_x[i] xmax = bins_x[i+1] for j in range(bins_y.size-1): ymax = bins_y[j] ymin = bins_y[j+1] cond=[cond1 and cond2 and cond3 and cond4 for cond1, cond2, cond3, cond4 in zip(x>=xmin, x<xmax, y>=ymin, y<ymax)] grid.append(galaxies.compress(cond)) return grid
def demo(): from pylab import hold, linspace, subplot, plot, legend, show hold(True) #y = [9,6,1,3,8,4,2] #y = [9,11,13,3,-2,0,2] y = [9, 11, 2, 3, 8, 0] #y = [9,9,1,3,8,2,2] x = linspace(0, 1, len(y)) t = linspace(x[0], x[-1], 400) subplot(211) plot(t, bspline(y, t, clamp=False), '-.y', label="unclamped bspline") # bspline # bspline plot(t, bspline(y, t, clamp=True), '-y', label="clamped bspline") plot(sorted(x), y, ':oy', label="control points") legend() #left, right = _derivs(t, bspline(y, t, clamp=False)) #print(left, (y[1] - y[0]) / (x[1] - x[0])) subplot(212) xt, yt = pbs(x, y, t, clamp=False) plot(xt, yt, '-.b', label="unclamped pbs") # pbs xt, yt = pbs(x, y, t, clamp=True) plot(xt, yt, '-b', label="clamped pbs") # pbs #xt,yt = pbs(x,y,t,clamp=True, parametric=True) # plot(xt,yt,'-g') # pbs plot(sorted(x), y, ':ob', label="control points") legend() show()
def plot_elecs_and_neurons(neuron_dict, ext_sim_dict, neural_sim_dict): pl.close('all') fig_all = pl.figure(figsize=[15,15]) ax_all = fig_all.add_axes([0.1, 0.1, 0.8, 0.8], frameon=False) for elec in xrange(len(ext_sim_dict['elec_z'])): ax_all.plot(ext_sim_dict['elec_z'][elec], ext_sim_dict['elec_y'][elec], color='b',\ marker='$E%i$'%elec, markersize=20 ) legends = [] for i, neur in enumerate(neuron_dict): folder = os.path.join(neural_sim_dict['output_folder'], neuron_dict[neur]['name']) coor = np.load(os.path.join(folder,'coor.npy')) x,y,z = coor n_compartments = len(x) fig = pl.figure(figsize=[10, 10]) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], frameon=False) # Plot the electrodes for elec in xrange(len(ext_sim_dict['elec_z'])): ax.plot(ext_sim_dict['elec_z'][elec], ext_sim_dict['elec_y'][elec], color='b',\ marker='$%i$'%elec, markersize=20 ) # Plot the neuron xmid, ymid, zmid = np.load(folder + '/coor.npy') xstart, ystart,zstart = np.load(folder + '/coor_start.npy') xend, yend, zend = np.load(folder + '/coor_end.npy') diam = np.load(folder + '/diam.npy') length = np.load(folder + '/length.npy') n_compartments = len(diam) for comp in xrange(n_compartments): if comp == 0: xcoords = pl.array([xmid[comp]]) ycoords = pl.array([ymid[comp]]) zcoords = pl.array([zmid[comp]]) diams = pl.array([diam[comp]]) else: if zmid[comp] < 0.400 and zmid[comp] > -.400: xcoords = pl.r_[xcoords, pl.linspace(xstart[comp], xend[comp], length[comp]*3*1000)] ycoords = pl.r_[ycoords, pl.linspace(ystart[comp], yend[comp], length[comp]*3*1000)] zcoords = pl.r_[zcoords, pl.linspace(zstart[comp], zend[comp], length[comp]*3*1000)] diams = pl.r_[diams, pl.linspace(diam[comp], diam[comp], length[comp]*3*1000)] argsort = pl.argsort(-xcoords) ax.scatter(zcoords[argsort], ycoords[argsort], s=20*(diams[argsort]*1000)**2, c=xcoords[argsort], edgecolors='none', cmap='gray') ax_all.plot(zmid[0], ymid[0], marker='$%i$'%i, markersize=20, label='%i: %s' %(i, neur)) #legends.append('%i: %s' %(i, neur)) ax.axis(ext_sim_dict['plot_range']) ax.axis('equal') ax.axis(ext_sim_dict['plot_range']) ax.set_xlabel('z [mm]') ax.set_ylabel('y [mm]') fig.savefig(os.path.join(neural_sim_dict['output_folder'],\ 'neuron_figs', '%s.png' % neur)) ax_all.axis('equal') ax.axis(ext_sim_dict['plot_range']) ax_all.set_xlabel('z [mm]') ax_all.set_ylabel('y [mm]') ax_all.legend() fig_all.savefig(os.path.join(neural_sim_dict['output_folder'], 'fig.png'))
def main(): """ This shows the use of SynChan with Izhikevich neuron. This can be used for creating a network of Izhikevich neurons. """ simtime = 200.0 stepsize = 10.0 model_dict = make_model() vm, inject, gk, spike = setup_data_recording(model_dict['neuron'], model_dict['pulse'], model_dict['synapse'], model_dict['spike_in']) mutils.setDefaultDt(elecdt=0.01, plotdt2=0.25) mutils.assignDefaultTicks(solver='ee') moose.reinit() mutils.stepRun(simtime, stepsize) pylab.subplot(411) pylab.plot(pylab.linspace(0, simtime, len(vm.vector)), vm.vector, label='Vm (mV)') pylab.legend() pylab.subplot(412) pylab.plot(pylab.linspace(0, simtime, len(inject.vector)), inject.vector, label='Inject (uA)') pylab.legend() pylab.subplot(413) pylab.plot(spike.vector, pylab.ones(len(spike.vector)), '|', label='input spike times') pylab.legend() pylab.subplot(414) pylab.plot(pylab.linspace(0, simtime, len(gk.vector)), gk.vector, label='Gk (mS)') pylab.legend() pylab.show()
def gfe4(): x2=plt.linspace(1e-20,.13,90000) xmin2=((4*np.pi*(SW.R)**3)/3)*1e-20 xmax2=((4*np.pi*(SW.R)**3)/3)*.13 xff2 = plt.linspace(xmin2,xmax2,90000) thigh=100 plt.figure() plt.title('Grand free energy per volume vs ff @ T=%0.4f'%Tlist[thigh]) plt.ylabel('Grand free energy per volume') plt.xlabel('filling fraction') plt.plot(xff2,SW.phi(Tlist[thigh],x2,nR[thigh]),color='#f36118',linewidth=3) #plt.axvline(nL[thigh]) #plt.axvline(nR[thigh]) #plt.axhline(SW.phi(Tlist[thigh],nR[thigh])) #plt.plot(x2,x2-x2,'c') plt.plot(nL[thigh]*((4*np.pi*(SW.R)**3)/3),SW.phi(Tlist[thigh],nL[thigh],nR[thigh]),'ko') plt.plot(nR[thigh]*((4*np.pi*(SW.R)**3)/3),SW.phi(Tlist[thigh],nR[thigh],nR[thigh]),'ko') plt.axhline(SW.phi(Tlist[thigh],nR[thigh],nR[thigh]),color='c',linewidth=2) print(Tlist[100]) print(nL[100],nR[100]) plt.savefig('figs/gfe_cotangent.pdf') plt.figure() plt.plot(xff2,SW.phi(Tlist[thigh],x2,nR[thigh]),color='#f36118',linewidth=3) plt.plot(nL[thigh]*((4*np.pi*(SW.R)**3)/3),SW.phi(Tlist[thigh],nL[thigh],nR[thigh]),'ko') plt.plot(nR[thigh]*((4*np.pi*(SW.R)**3)/3),SW.phi(Tlist[thigh],nR[thigh],nR[thigh]),'ko') plt.axhline(SW.phi(Tlist[thigh],nR[thigh],nR[thigh]),color='c',linewidth=2) plt.xlim(0,0.0003) plt.ylim(-.000014,0.000006) print(Tlist[100]) print(nL[100],nR[100]) plt.savefig('figs/gfe_insert_cotangent.pdf')
def draw_bandstructure( jobname, kspace, band, ext=".csv", format="pdf", filled=True, levels=15, lines=False, labeled=False, legend=False ): # clf() fig = figure(figsize=fig_size) ax = fig.add_subplot(111, aspect="equal") x, y, z = loadtxt(jobname + ext, delimiter=", ", skiprows=1, usecols=(1, 2, 4 + band), unpack=True) if kspace.dimensions == 1: pylab.plot(x, y, z) elif kspace.dimensions == 2: xi = linspace(-0.5, 0.5, kspace.x_res) yi = linspace(-0.5, 0.5, kspace.y_res) zi = griddata(x, y, z, xi, yi) if filled: cs = ax.contourf(xi, yi, zi, levels, **contour_filled) legend and colorbar(cs, **colorbar_style) cs = lines and ax.contour(xi, yi, zi, levels, **contour_lines) labeled and lines and clabel(cs, fontsize=8, inline=1) else: cs = ax.contour(xi, yi, zi, levels, **contour_plain) legend and colorbar(cs, **colorbar_style) labeled and clabel(cs, fontsize=8, inline=1) ax.set_xlim(-0.5, 0.5) ax.set_ylim(-0.5, 0.5) savefig(jobname + format, format=format, transparent=True)
def grid(x, y, z , resX=90, resY=90): "Convert 3 column data to matplotlib grid" xi = pl.linspace(min(x), max(x), resX) yi = pl.linspace(min(y), max(y), resY) Z = pl.griddata(x, y, z, xi, yi , interp='linear') X, Y = pl.meshgrid(xi, yi ) return X, Y, Z
def _plot_bar3d(self): logging.debug('') if self.dimension > 0: return array = self._data_to_array() # width, depth, and height of the bars (array == height values == dz) dx = list(numpy.array([1.0/len(array[0]) for x in range(0, array.shape[1])])) dy = list(numpy.array([1.0/len(array) for x in range(0, array.shape[0])])) dx *= len(array) dy *= len(array[0]) dz = array.flatten()+0.00000001 # dirty hack to cirumvent ValueError # x,y,z position of each bar x = pylab.linspace(0.0, 1.0, len(array[0]), endpoint=False) y = pylab.linspace(0.0, 1.0, len(array), endpoint=False) xpos, ypos = pylab.meshgrid(x, y) xpos = xpos.flatten() ypos = ypos.flatten() zpos = numpy.zeros(array.shape).flatten() fig = MyFig(self.options, xlabel='Probability p', ylabel='Fraction of Nodes', zlabel='Fraction of Executions', ThreeD=True) fig.ax.set_zlim3d(0.0, 1.01) fig.ax.set_xlim3d(0.0, 1.01) fig.ax.set_ylim3d(0.0, 1.01) fig.ax.set_autoscale_on(False) assert(len(dx) == len(dy) == len(array.flatten()) == len(xpos) == len(ypos) == len(zpos)) fig.ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color=['#CCBBDD']) try: self.figures['bar3d'] = fig.save('bar3d' + str(self.data_filter)) except ValueError, err: logging.warning('%s', err)
def plot_wire_surface_pcolor(self): """ Plot the fraction of executions as a function of the fraction of nodes for each source. Three plots are created: wireframe, surface, and pseudocolor. """ logging.debug('') if self.dimension > 0: return array = self._data_to_array() x = pylab.linspace(0.0, 1.0, len(array[0])+1) y = pylab.linspace(0.0, 1.0, len(array)+1) X, Y = pylab.meshgrid(x, y) #fig_wire = MyFig(self.options, xlabel='Probability p', ylabel='Fraction of Nodes', ThreeD=True) #fig_surf = MyFig(self.options, xlabel='Probability p', ylabel='Fraction of Nodes', ThreeD=True) fig_pcol = MyFig(self.options, xlabel='Probability p', ylabel='Fraction of Nodes') #fig_wire.ax.plot_wireframe(X, Y, array) #fig_surf.ax.plot_surface(X, Y, array, rstride=1, cstride=1, linewidth=1, antialiased=True) pcolor = fig_pcol.ax.pcolor(X, Y, array, cmap=cm.jet, vmin=0.0, vmax=1.0) cbar = fig_pcol.fig.colorbar(pcolor, shrink=0.8, aspect=10) cbar.ax.set_yticklabels(pylab.linspace(0.0, 1.0, 11), fontsize=0.8*self.options['fontsize']) #for ax in [fig_wire.ax, fig_surf.ax]: #ax.set_zlim3d(0.0, 1.01) #ax.set_xlim3d(0.0, 1.01) #ax.set_ylim3d(0.0, 1.01) #self.figures['wireframe'] = fig_wire.save('wireframe_' + str(self.data_filter)) #self.figures['surface'] = fig_surf.save('surface_' + str(self.data_filter)) self.figures['pcolor'] = fig_pcol.save('pcolor_' + str(self.data_filter))
def showSF( N, label = '' ): fig = P.figure() ax1 = fig.add_subplot(1, 2, 1, projection='3d') ax2 = fig.add_subplot(1, 2, 2) Nx = 21 Ny = 21 nLevels = 12 tix = P.linspace( 0.0, 1.0, Nx ) tiy = P.linspace( 0.0, 1.0, Ny ) (X,Y) = P.meshgrid( tix, tiy ) z = g.RVector( len( X.flat ) ) for i, x in enumerate( X.flat ): p = g.RVector3( X.flat[ i ], Y.flat[ i ] ) z[ i ] = N(p) Z = P.ma.masked_where( z == -99., z ) Z = Z.reshape( Ny, Nx ) ax2.contourf( X, Y, Z ) ax2.set_aspect( 'equal' ) surf = ax1.plot_surface( X, Y, Z, rstride = 1, cstride = 1, cmap=P.cm.jet,linewidth=0 ) ax2.set_title( label + N.__str__() ) fig.colorbar( surf )
def int_f(a, fs=1.): """ A fourier-based integrator. =========== Parameters: =========== a : *array* (1D) The array which should be integrated fs : *float* sampling time of the data ======== Returns: ======== y : *array* (1D) The integrated array """ if False: # version with "mirrored" code xp = hstack([a, a[::-1]]) int_fluc = int_f0(xp, float(fs))[:len(a)] baseline = mean(a) * arange(len(a)) / float(fs) return int_fluc + baseline - int_fluc[0] # old version baseline = mean(a) * arange(len(a)) / float(fs) int_fluc = int_f0(a, float(fs)) return int_fluc + baseline - int_fluc[0] # old code - remove eventually (comment on 02/2014) # periodify if False: baseline = linspace(a[0], a[-1], len(a)) a0 = a - baseline m = a0[-1] - a0[-2] b2 = linspace(0, -.5 * m, len(a)) baseline -= b2 a0 += b2 a2 = hstack([a0, -1. * a0[1:][::-1]]) # "smooth" periodic signal dbase = baseline[1] - baseline[0] t_vec = arange(len(a)) / float(fs) baseint = baseline[0] * t_vec + .5 * dbase * t_vec ** 2 # define frequencies T = len(a2) / float(fs) freqs = 1. / T * arange(len(a2)) freqs[len(freqs) // 2 + 1 :] -= float(fs) spec = fft.fft(a2) spec_i = zeros_like(spec, dtype=complex) spec_i[1:] = spec[1:] / (2j * pi* freqs[1:]) res_int = fft.ifft(spec_i).real[:len(a0)] + baseint return res_int - res_int[0]
def cplot(f, re=[-5,5], im=[-5,5], points=2000, color=default_color_function, verbose=False, file=None, dpi=None): """ Plots the given complex-valued function *f* over a rectangular part of the complex plane specified by the pairs of intervals *re* and *im*. For example:: cplot(lambda z: z, [-2, 2], [-10, 10]) cplot(exp) cplot(zeta, [0, 1], [0, 50]) By default, the complex argument (phase) is shown as color (hue) and the magnitude is show as brightness. You can also supply a custom color function (*color*). This function should take a complex number as input and return an RGB 3-tuple containing floats in the range 0.0-1.0. To obtain a sharp image, the number of points may need to be increased to 100,000 or thereabout. Since evaluating the function that many times is likely to be slow, the 'verbose' option is useful to display progress. NOTE: This function requires matplotlib (pylab). """ import pylab pylab.clf() rea, reb = re ima, imb = im dre = reb - rea dim = imb - ima M = int(sqrt(points*dre/dim)+1) N = int(sqrt(points*dim/dre)+1) x = pylab.linspace(rea, reb, M) y = pylab.linspace(ima, imb, N) # Note: we have to be careful to get the right rotation. # Test with these plots: # cplot(lambda z: z if z.real < 0 else 0) # cplot(lambda z: z if z.imag < 0 else 0) w = pylab.zeros((N, M, 3)) for n in xrange(N): for m in xrange(M): z = mpc(x[m], y[n]) try: v = color(f(z)) except plot_ignore: v = (0.5, 0.5, 0.5) w[n,m] = v if verbose: print n, "of", N pylab.imshow(w, extent=(rea, reb, ima, imb), origin='lower') pylab.xlabel('Re(z)') pylab.ylabel('Im(z)') if file: pylab.savefig(file, dpi=dpi) else: pylab.show()
def create_figure(): psd = test_eigenfre_music() f = linspace(-0.5, 0.5, len(psd)) plot(f, 10 * log10(psd/max(psd)), '--',label='MUSIC 15') savefig('psd_eigenfre_music.png') psd = test_eigenfre_ev() f = linspace(-0.5, 0.5, len(psd)) plot(f, 10 * log10(psd/max(psd)), '--',label='EV 15') savefig('psd_eigenfre_ev.png')
def potAddStreamParticles(self): """ Takes the dimensions of plot widget and plots stream lines by advecting the particles for a small time step dt """ noOfParticlesAtX = int(self.axisRange[3] - self.axisRange[2])*3 noOfParticlesAtY = int(self.axisRange[1] - self.axisRange[0])*2 self.potStreakParticles = [particle(x, y) for x in linspace(self.axisRange[1], self.axisRange[0], \ noOfParticlesAtY) for y in linspace(self.axisRange[2]+0.1, self.axisRange[3]-0.1, noOfParticlesAtX)] self.graphicWidget.item.grid(False) self.graphicWidget.plotStreakParticles(self.potStreakParticles, tag = "velMagnitude") self.graphicWidget.fig.canvas.draw()
def mandel_serial(): m = zeros((N,N)) i=-1; for x in linspace(-2, 1, num=N): i += 1 j = -1 for y in linspace(-2, 1, num=N): j += 1 m[j,i] = mandel_pixel(x, y) return m
def displayPlots(): clock = moose.Clock( '/clock' ) # look up global clock totR = moose.element( '/model/graphs/conc1/tot_PSD_R.Co' ) PP1 = moose.element( '/model/moregraphs/conc4/PP1_dash_active.Co' ) Ca = moose.element( '/model/graphs/conc1/Ca.Co' ) pylab.plot( pylab.linspace( 0, clock.currentTime, len( totR.vector )), totR.vector, label='membrane Receptor' ) pylab.plot( pylab.linspace( 0, clock.currentTime, len( PP1.vector ) ), PP1.vector, label='active PP1' ) pylab.plot( pylab.linspace( 0, clock.currentTime, len( Ca.vector ) ), Ca.vector, label='Ca' ) pylab.legend() pylab.show()
def plot_fig7(vm): ax_7 = pylab.subplot(111) ax_7.plot(pylab.linspace(0, simtime/tau, len(vm[0].vector)), (vm[0].vector-Em)/(Ek - Em), label='(1,2)->(3,4)->(5,6)->(7,8)') ax_7.plot(pylab.linspace(0, simtime/tau, len(vm[1].vector)), (vm[1].vector-Em)/(Ek - Em), label='(7,8)->(5,6)->(3,4)->(1,2)') ax_7.plot(pylab.linspace(0, simtime/tau, len(vm[2].vector)), (vm[2].vector-Em)/(Ek - Em), label='control') pylab.legend() pylab.show()
def plot_risetimes(a, b, **kwargs): # plt.ion() # if kwargs is not None: # for key, value in kwargs.iteritems(): # if key == 'file_list': # file_list = value # if key == 'scan_line': # scan_line = value # varray = plt.array(get_value_from_cfg(file_list, scan_line)) n_files = a.shape[-1] cmap = plt.get_cmap('jet') c = [cmap(i) for i in plt.linspace(0, 1, n_files)] fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10)) [ax.set_color_cycle(c) for ax in (ax1, ax2)] r = [] for i in xrange(n_files): x, y = a[:,i], b[:,i] # xo, yo = x, y #, get_envelope(x, y) xo, yo = get_envelope(x, y) p = plt.polyfit(xo, np.log(yo), 1) # Right way to fit... a la Nicolas - the fit expert! l = ax1.plot(x, plt.log(plt.absolute(y))) lcolor = l[-1].get_color() ax1.plot(xo, plt.log(yo), color=lcolor, marker='o', mec=None) ax1.plot(x, p[1] + x * p[0], color=lcolor, ls='--', lw=3) l = ax2.plot(x, y) lcolor = l[-1].get_color() ax2.plot(xo, yo, 'o', color=lcolor) xi = plt.linspace(plt.amin(x), plt.amax(x)) yi = plt.exp(p[1] + p[0] * xi) ax2.plot(xi, yi, color=lcolor, ls='--', lw=3) print p[1], p[0], 1 / p[0] # plt.draw() # ax1.cla() # ax2.cla() r.append(1/p[0]) ax2.set_ylim(0, 1000) plt.figure(2) plt.plot(r, lw=3, c='purple') # plt.gca().set_ylim(0, 10000) # ax3 = plt.subplot(111) # ax3.semilogy(x, y) # ax3.semilogy(xo, yo) return r
def ideal(): x2=plt.linspace(1e-20,.13,40000) tt=plt.linspace(1e-20,1.2,4000) plt.figure() plt.title('ideal gas') plt.ylabel('f') plt.xlabel('n') #plt.plot(x2,SW.fid(Tlist[100],x2),color='#f36118') #plt.plot(tt,SW.fid(tt,nR[100])) plt.show()
def eval_critical(options, tuples_for_pb, ps, cepsilon=0.01): fig_abs = MyFig(options, figsize=(10, 8), xlabel=r'Probability $p_s$', ylabel=r'abs', aspect='auto', legend=True, grid=False) fig_mean = MyFig(options, figsize=(10, 8), xlabel=r'Probability $p_s$', ylabel=r'mean', aspect='auto', legend=True, grid=False) fig_majority = MyFig(options, figsize=(10, 8), xlabel=r'Probability $p_s$', ylabel=r'Majority', aspect='auto', legend=True, grid=False) if options['grayscale']: colors = options['graycm'](pylab.linspace(0, 1.0, len(tuples_for_pb))) else: colors = options['color'](pylab.linspace(0, 1.0, len(tuples_for_pb))) crit_ranges = options['crit_range'] p_c = list() for i, pb in enumerate(sorted(tuples_for_pb.keys())): if '%.2f' % pb not in crit_ranges: continue minps = crit_ranges['%.2f' % pb][0] maxps = crit_ranges['%.2f' % pb][1] rs = tuples_for_pb[pb] tuples = zip(*rs) if len(tuples) == 0: continue crit_abs = [max(t)-min(t) for t in tuples] crit_abs_min = min(crit_abs) crit_abs_ps = [pos for pos, c in enumerate(crit_abs) if abs(c - crit_abs_min) < cepsilon] fig_abs.ax.plot(ps, crit_abs, label='%.2f' % pb, color=colors[i]) crit_mean = [scipy.mean(t) for t in tuples] crit_mean_min = min(crit_mean) crit_mean_ps = [pos for pos, c in enumerate(crit_mean) if abs(c - crit_mean_min) < cepsilon] fig_mean.ax.plot(ps, crit_mean, label='%.2f' % pb, color=colors[i]) majority = list() for t in tuples: all_pairs = list(k_subsets(t, 2)) c = [abs(a-b) for a,b in all_pairs] counter = Counter(c) times = set(counter.values()) y = scipy.mean([val for (val, occ) in counter.iteritems() if occ == max(times)]) #y = scipy.mean(c) majority.append(y) fig_majority.ax.plot(ps, majority, label='%.2f' % pb, color=colors[i]) start = list(ps).index(minps) stop = min(list(ps).index(maxps)+1, len(ps)) metric = majority pc = ps[list(metric).index(min(metric[start:stop]))] p_c.append((pb, pc)) fig_abs.legend_title = '$p_b$' fig_abs.save('value-eval-abs') fig_mean.legend_title = '$p_b$' fig_mean.save('value-eval-pairs') fig_majority.legend_title = '$p_b$' fig_majority.save('value-eval-pairs') return p_c
def cplot(ctx, f, re=[-5, 5], im=[-5, 5], points=2000, color=None, verbose=False, file=None, dpi=None, axes=None): """ Plots the given complex-valued function *f* over a rectangular part of the complex plane specified by the pairs of intervals *re* and *im*. For example:: cplot(lambda z: z, [-2, 2], [-10, 10]) cplot(exp) cplot(zeta, [0, 1], [0, 50]) By default, the complex argument (phase) is shown as color (hue) and the magnitude is show as brightness. You can also supply a custom color function (*color*). This function should take a complex number as input and return an RGB 3-tuple containing floats in the range 0.0-1.0. To obtain a sharp image, the number of points may need to be increased to 100,000 or thereabout. Since evaluating the function that many times is likely to be slow, the 'verbose' option is useful to display progress. .. note :: This function requires matplotlib (pylab). """ if color is None: color = ctx.default_color_function import pylab if file: axes = None fig = None if not axes: fig = pylab.figure() axes = fig.add_subplot(111) rea, reb = re ima, imb = im dre = reb - rea dim = imb - ima M = int(ctx.sqrt(points * dre / dim) + 1) N = int(ctx.sqrt(points * dim / dre) + 1) x = pylab.linspace(rea, reb, M) y = pylab.linspace(ima, imb, N) # Note: we have to be careful to get the right rotation. # Test with these plots: # cplot(lambda z: z if z.real < 0 else 0) # cplot(lambda z: z if z.imag < 0 else 0) w = pylab.zeros((N, M, 3)) for n in xrange(N): for m in xrange(M): z = ctx.mpc(x[m], y[n]) try: v = color(f(z)) except ctx.plot_ignore: v = (0.5, 0.5, 0.5) w[n, m] = v if verbose: print(str(n) + ' of ' + str(N)) rea, reb, ima, imb = [float(_) for _ in [rea, reb, ima, imb]] axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower') axes.set_xlabel('Re(z)') axes.set_ylabel('Im(z)') if fig: if file: pylab.savefig(file, dpi=dpi) else: pylab.show()
def foo(t): if t < 10.0: return 0.0 elif 10.0 <= t < 10.0 * np.pi: return 1.0 else: return 10.0 * sin(t * 0.5) def values_before_zero(t): return array([0.0, 0.0]) def model(Y, t): x, y = Y(t) x_tau, y_tau = Y(t - tau) return array([foo(t), x_tau]) tt = linspace(0, 100, 1000) yy = ddeint(model, values_before_zero, tt) fig, ax = subplots(1, figsize=(8, 4)) ax.plot(tt, [x[0] for x in yy], label="trace1") ax.plot(tt, [x[1] for x in yy], label="trace2") fig.legend(loc='upper center', borderaxespad=2.0) fig.show()
def plot_histogram(self): configurations_per_variant = self.configurations_per_variant colors = self.options['color'](pylab.linspace( 0, 0.8, configurations_per_variant)) labels = [] for li_of_frac in self.label_info: s = str() for i, (param, value) in enumerate(li_of_frac): if i > 0: s += '\n' s += '%s=%s' % (_cname_to_latex(param), value) labels.append(s) labels *= len(self.configurations['p']) ps = list( pylab.flatten(self.configurations_per_p * [p] for p in self.configurations['p'])) ################################################################# # histogram plot ################################################################# fig2 = MyFig(self.options, rect=[0.1, 0.2, 0.8, 0.75], figsize=(max(self.length(), 10), 10), xlabel='Probability p', ylabel='Fraction of Nodes', aspect='auto') patches = [] for i, fracs in enumerate(self.fraction_of_nodes): hist, bin_edges = numpy.histogram(fracs, bins=pylab.linspace( 0, 1, self.options['bins'])) hist_norm = hist / float(len(fracs)) * 0.4 yvals = list(bin_edges.repeat(2))[1:-1] yvals.extend(list(bin_edges.repeat(2))[1:-1][::-1]) xvals = list((i + 1 + hist_norm).repeat(2)) xvals.extend(list((i + 1 - hist_norm).repeat(2))[::-1]) i = (i % self.configurations_per_p) poly = Polygon(zip(xvals, yvals), edgecolor='black', facecolor=colors[i], closed=True) patches.append(poly) patch_collection = PatchCollection(patches, match_original=True) fig2.ax.add_collection(patch_collection) fig2.ax.set_xticks(range(1, self.length() + 1)) fig2.ax.set_xticklabels(ps, fontsize=self.options['fontsize'] * 0.6) for x in range(0, self.length(), self.configurations_per_p): fig2.ax.plot([x + 0.5, x + 0.5], [0.0, 1.0], linestyle='dotted', color='red', alpha=0.8) fig2.ax.set_ylim(0, 1) ################################################################# # create some dummy elements for the legend ################################################################# if configurations_per_variant > 1: proxies = [] #for i in range(0, len(self.configurations['gossip'])): for i in range(0, configurations_per_variant): r = Rectangle((0, 0), 1, 1, facecolor=colors[i % configurations_per_variant], edgecolor='black') proxies.append((r, labels[i])) fig2.ax.legend([proxy for proxy, label in proxies], [label for proxy, label in proxies], loc='lower right') self.figures['histogram'] = fig2.save('histogram_' + str(self.data_filter))
def plot_distribution(self): logging.debug('') if self.dimension > 0: return probabilities = pylab.linspace(0, 1, 20) distributions = {} for name, func, extra_args in [('data', None, []), ('normal', stats.norm, [])]: #('chi2', stats.chi2), ('gamma', stats.gamma)]: fig_cdf = MyFig(self.options, xlabel='Fraction of Nodes', ylabel='CDF', grid=True, legend=True) fig_cdf.ax.plot([0.0, 1.0], [0.0, 1.0], color='black', linestyle='solid') fig_qq = MyFig(self.options, xlabel='Fraction of Nodes', ylabel='%s Distribution' % name, grid=True, legend=True) fig_qq.ax.plot([0.0, 1.0], [0.0, 1.0], color='black', linestyle='solid') fig_qq.ax.set_xlim(0.0, 1.0) fig_qq.ax.set_ylim(0.0, 1.0) distributions[name] = (func, fig_cdf, fig_qq, extra_args) #fig_cdf_data = MyFig(self.options, xlabel='Fraction of Nodes', ylabel='CDF', grid=True, legend=True) #fig_cdf_data.ax.plot([0.0, 1.2], [0.0, 1.2], color='black', linestyle='solid') ######################################### #fig_qq_normal = MyFig(self.options, xlabel='Fraction of Nodes', ylabel='Normal Distribution', grid=True, legend=True) #fig_qq_normal.ax.plot([0.0, 1.0], [0.0, 1.0], color='black', linestyle='solid') #fig_qq_normal.ax.set_xlim(0.0, 1.0) #fig_qq_normal.ax.set_ylim(0.0, 1.0) #fig_cdf_normal = MyFig(self.options, xlabel='x', ylabel='CDF', grid=True, legend=True) #fig_cdf_normal.ax.plot([0.0, 1.2], [0.0, 1.2], color='black', linestyle='solid') ######################################### #fig_qq_gamma = MyFig(self.options, xlabel='Fraction of Nodes', ylabel='Gamma Distribution', grid=True, legend=True) #fig_qq_gamma.ax.plot([0.0, 1.0], [0.0, 1.0], color='black', linestyle='solid') #fig_qq_gamma.ax.set_xlim(0.0, 1.0) #fig_qq_gamma.ax.set_ylim(0.0, 1.0) #fig_cdf_gamma = MyFig(self.options, xlabel='x', ylabel='CDF', grid=True, legend=True, aspect='auto') #fig_cdf_gamma.ax.set_xlim(0.0, 1.0) ######################################### #fig_qq_2normal = MyFig(self.options, xlabel='Fraction of Nodes', ylabel='Normal Distribution', grid=True, legend=True) #fig_qq_2normal.ax.plot([0.0, 1.0], [0.0, 1.0], color='black', linestyle='solid') #fig_qq_2normal.ax.set_xlim(0.0, 1.0) #fig_qq_2normal.ax.set_ylim(0.0, 1.0) #fig_cdf_2normal = MyFig(self.options, xlabel='x', ylabel='CDF', grid=True, legend=True) #fig_cdf_2normal.ax.plot([0.0, 1.2], [0.0, 1.2], color='black', linestyle='solid') ######################################### #fig_qq_2chi2 = MyFig(self.options, xlabel='Fraction of Nodes', ylabel='2x Chi Square Distribution', grid=True, legend=True) #fig_qq_2chi2.ax.plot([0.0, 1.0], [0.0, 1.0], color='black', linestyle='solid') #fig_qq_2chi2.ax.set_xlim(0.0, 1.0) #fig_qq_2chi2.ax.set_ylim(0.0, 1.0) #fig_cdf_2chi2 = MyFig(self.options, xlabel='x', ylabel='CDF', grid=True, legend=True) #fig_cdf_2chi2.ax.plot([0.0, 1.2], [0.0, 1.2], color='black', linestyle='solid') colors = self.options['color'](pylab.linspace(0, 0.8, self.length())) markers = self.options['markers'] for j, data in enumerate(self.fraction_of_nodes): label = 'p=%.2f' % self.configurations['p'][j] avr = scipy.average(data) sigma = scipy.std(data) quantiles_data = stats.mstats.mquantiles(data, prob=probabilities) for name in distributions: func, fig_cdf, fig_qq, extra_args = distributions[name] if func: quantiles_stat = func.ppf(probabilities, *extra_args, loc=avr, scale=sigma) fig_qq.ax.plot(quantiles_data, quantiles_stat, 'o', color=colors[j], linestyle='-', label=label, marker=markers[j]) else: quantiles_stat = quantiles_data fig_cdf.ax.plot(quantiles_stat, probabilities, 'o', color=colors[j], linestyle='-', label=label, marker=markers[j]) #fig_cdf_data.ax.plot(quantiles_data, probabilities, 'o', color=colors[j], linestyle='-', label=label) ########################################################################### # Normal Distribution ########################################################################### #quantiles_normal = stats.norm.ppf(probabilities, loc=avr, scale=sigma) #fig_cdf_normal.ax.plot(quantiles_normal, probabilities, 'o', color=colors[j], linestyle='-', label=label) #fig_qq_normal.ax.plot(quantiles_data, quantiles_normal, 'o', color=colors[j], linestyle='-', label=label) ########################################################################### # Gamma Distribution ########################################################################### #quantiles_gamma = stats.gamma.ppf(probabilities, 0.4, loc=avr, scale=sigma) #_quantiles_gamma = [] #for x in quantiles_gamma: #if x != numpy.infty: #_quantiles_gamma.append(min(1.0,x)) #else: #_quantiles_gamma.append(x) #quantiles_gamma = numpy.array(_quantiles_gamma) #fig_cdf_gamma.ax.plot(quantiles_gamma, probabilities, 'o', color=colors[j], linestyle='-', label=label) #fig_qq_gamma.ax.plot(quantiles_data, quantiles_gamma, 'o', color=colors[j], linestyle='-', label=label) ########################################################################### # 2x Chi Square Distribution ########################################################################### #data_2chi2 = [] #for i in range(0, 5000): #sigma_2chi2 = p*0.5 #dv = 0.5 #if random.normalvariate(0.6, 0.1) < p: #exp_2chi2 = 1.0 #d = stats.chi2.rvs(dv, loc=exp_2chi2, scale=sigma_2chi2, size=1) #d = exp_2chi2-abs(exp_2chi2-d) #else: #exp_2chi2 = 0.05 #d = stats.chi2.rvs(dv, loc=exp_2chi2, scale=sigma_2chi2, size=1) #data_2chi2.append(max(min(d[0], 1.0), 0.0)) #quantiles_data_2chi2 = stats.mstats.mquantiles(data_2chi2, prob=probabilities) #fig_cdf_2chi2.ax.plot(quantiles_data_2chi2, probabilities, 'o', linestyle='-', label='p=%.2f' % p, color=colors[j]) #fig_qq_2chi2.ax.plot(quantiles_data, quantiles_data_2chi2, 'o', color=colors[j], linestyle='-', label=label) ########################################################################### # 2x Normal Distribution ########################################################################### #data_2normal = [] #for i in range(0, 2000): #if random.uniform(0.0, 1.0) < p: #exp_2normal = 1.0 #sigma_2normal = 0.05 #d = stats.norm.rvs(loc=exp_2normal, scale=sigma_2normal, size=1) #d = exp_2normal-abs(exp_2normal-d) #else: #exp_2normal = 0.05 #sigma_2normal = 0.05 #d = stats.norm.rvs(loc=exp_2normal, scale=sigma_2normal, size=1) #data_2normal.append(max(min(d[0], 1.0), 0.0)) #quantiles_data_2normal = stats.mstats.mquantiles(data_2normal, prob=probabilities) #fig_cdf_2normal.ax.plot(quantiles_data_2normal, probabilities, 'o', linestyle='-', label='p=%.2f' % p, color=colors[j]) #fig_qq_2normal.ax.plot(quantiles_data, quantiles_data_2normal, 'o', color=colors[j], linestyle='-', label=label) for name in distributions: func, fig_cdf, fig_qq, extra_args = distributions[name] if func: fig_qq.save('distribution-qq_%s_%s' % (name, str(self.data_filter))) fig_cdf.save('distribution-cdf_%s_%s' % (name, str(self.data_filter)))
def _main(): parser = argparse.ArgumentParser() parser.add_argument("-m", "--model", default=[], nargs='*', type=str, help="Frozen model file to test") parser.add_argument("-fd_dih", "--full_dimension_dih", default=9, type=int, help="The dimensionality of FES") parser.add_argument("-fd_dist", "--full_dimension_dist", default=3, type=int, help="The dimensionality of FES") parser.add_argument("-ns", "--num_step", default=1000000, type=int, help="number of mc step") parser.add_argument("-nw", "--num_walker", default=3000, type=int, help="number of walker") args = parser.parse_args() model = args.model fd_dih = args.full_dimension_dih fd_dist = args.full_dimension_dist ns = args.num_step nw = args.num_walker positons = [] energies = [] forces = [] graph = load_graph(model[0]) bins = 30 xx = pylab.linspace(0, 200, bins) yy = pylab.linspace(0.2, 4.1, bins) pp_hist1cv1 = np.zeros((1, len(xx))) pp_hist1cv2 = np.zeros((1, len(yy))) pp_hist2d = np.zeros((1, len(xx), len(yy))) delta1 = 200.0 / bins delta2 = (4.1 - 0.2) / bins with tf.Session(graph=graph) as sess: walker = Walker(fd_dih, fd_dist, nw, sess) for ii in range(100): pp, ee, ff = walker.sample(compute_ef) for ii in range(ns + 1): pp, ee, ff = walker.sample(compute_ef) ##all 1d ##certain 2d pp_hist_new2d, pp_hist_new1cv1, pp_hist_new1cv2 = my_hist2d( pp, xx, yy, delta1, delta2, fd_dih, fd_dist) pp_hist2d = (pp_hist2d * ii + pp_hist_new2d) / (ii + 1) pp_hist1cv1 = (pp_hist1cv1 * ii + pp_hist_new1cv1) / (ii + 1) pp_hist1cv2 = (pp_hist1cv2 * ii + pp_hist_new1cv2) / (ii + 1) if np.mod(ii, 50000) == 0: zz1 = -np.log(pp_hist1cv1 + 1e-7) / beta zz1 *= f_cvt / 4.184 ##kcal zz1 = zz1 - np.min(zz1) fp = open("1CV1_index0.dat", "a") for temp in zz1[0]: fp.write(str(temp) + ' ') fp.write('\n') fp.close() zz2 = -np.log(pp_hist1cv2 + 1e-7) / beta zz2 *= f_cvt / 4.184 ##kcal zz2 = zz2 - np.min(zz2) fp = open("1CV2_index1.dat", "a") for temp in zz2[0]: fp.write(str(temp) + ' ') fp.write('\n') fp.close() zz2d = np.transpose(-np.log(pp_hist2d + 1e-10), (0, 2, 1)) / beta zz2d *= f_cvt / 4.184 zz2d = zz2d - np.min(zz2d) np.savetxt("2d_step%d.dat" % ii, zz2d[0]) np.savetxt("position%d.dat" % ii, pp)
1:2 * ny:2] = (9 * c7) / 16.0 + (9 * c6) / 16.0 + (3 * c5) / 16.0 + ( 3 * c4) / 16.0 - (3 * c2) / 16.0 - c1 / 8.0 - (3 * c0) / 16.0 return Xn, Yn, qn fh = tables.openFile("s120-pb-advection-rb_chi_1.h5") grid = fh.root.StructGrid lower = grid._v_attrs.vsLowerBounds upper = grid._v_attrs.vsUpperBounds cells = grid._v_attrs.vsNumCells dx = (upper[0] - lower[0]) / cells[0] dy = (upper[1] - lower[1]) / cells[1] Xc = pylab.linspace(lower[0] + 0.5 * dx, upper[0] - 0.5 * dx, cells[0]) Yc = pylab.linspace(lower[1] + 0.5 * dy, upper[1] - 0.5 * dy, cells[1]) T = pylab.linspace(0, 4 * math.pi, 41) for i in range(41): print "Workin on %d" % i fh = tables.openFile("s120-pb-advection-rb_chi_%d.h5" % i) # get solution q = fh.root.StructGridField Xn, Yn, qn_1 = projectOnFinerGrid_f3(Xc, Yc, q) pylab.pcolormesh(Xn, Yn, pylab.transpose(qn_1), vmin=0.0, vmax=0.5) pylab.title('T=%f' % T[i]) pylab.axis('image')
# Andreas Müller, 2008 # [email protected] # # this code may be freely used under GNU GPL conditions """ Simulate channel with sampling frequency offset """ import math, random, pylab import dab_tb, ber NUM_BYTES = 1000000 MODES = [1, 2, 3, 4] MODES = [1] SAMPLE_RATE_ERROR = pylab.linspace(0.99, 1.01, 50) PLOT_FORMAT = ['-', '-x', '--x', '-.x', ':x'] # initialise test flowgraph tb = dab_tb.dab_ofdm_testbench(autocorrect_sample_rate=True, ber_sink=True) tb.gen_random_bytes(NUM_BYTES) # prepeare plot pylab.xlabel("Sampling frequency offset (ratio)") pylab.ylabel("BER") # open logfile logfile = open("sampling_frequency_offset_ber_log.txt", 'w') logfile.write("number of bytes: " + str(NUM_BYTES) + "\nRange of sampling rate ratios: " + str(SAMPLE_RATE_ERROR) +
A, b, V = lowpass(10000.0, 10000.0, 1e-9, 1e-9, 1.586, 1.0) Vo = V[3] print simplify(Vo) w = p.logspace(0, 8, 801) ss = 1j * w hf = lambdify(s, Vo, 'numpy') v = hf(ss) p.loglog(w, abs(v), lw=2) p.title('Low pass filter') p.xlabel('$\omega(rad/s)$') p.ylabel('$Magnitude$') p.grid(True) p.show() #unit step response H = sp.lti([0.1], [6.31517e-12, 8.9455e-7, 0.0631517, 0]) t, x = sp.impulse(H, None, p.linspace(0, 0.0001, 1001)) p.title('unit step response of low pass filter') p.xlabel('t') p.ylabel('$V_o(t)$') p.plot(t, x) p.grid(True) p.show() #for sinusoidal input H = sp.lti([0.1], [6.31517e-12, 8.9455e-7, 0.0631517]) t = p.linspace(0.0, 1e-2, 10001) u = p.sin(2e3 * p.pi * t) + p.cos(2e6 * p.pi * t) t, y, svec = sp.lsim(H, u, t) p.title('response of low pass filter for sinusoidal input') p.xlabel('t') p.ylabel('$V_o(t)$')
non_linear=0.5) return img # Get the Galaxy info galaxies = pickle.load(open('galaxies.pickle', 'rb')) galaxies = filter(lambda galaxy: galaxy.ston_I > 30., galaxies) galaxies = pyl.asarray(filter(lambda galaxy: galaxy.ICD_IH < 0.5, galaxies)) # Make the low mass grid first x = [galaxy.Mass for galaxy in galaxies] y = [galaxy.ICD_IH * 100 for galaxy in galaxies] ll = 8.5 ul = 12 bins_x = pyl.linspace(ll, ul, 8) bins_y = pyl.linspace(50, 0, 6) grid = [] for i in range(bins_x.size - 1): xmin = bins_x[i] xmax = bins_x[i + 1] for j in range(bins_y.size - 1): ymax = bins_y[j] ymin = bins_y[j + 1] cond = [ cond1 and cond2 and cond3 and cond4 for cond1, cond2, cond3, cond4 in zip( x >= xmin, x < xmax, y >= ymin, y < ymax)
from math import asin, sin from matplotlib import pyplot as plt from pylab import linspace from qc import Calc #构建点集 h = linspace(0.01, 17, 10000) #初始化函数 a = Calc() a.set_raw_values(u1=0, hm=17) #计算h/hm y = [] for i in linspace(0.01, 17, 10000): y.append(i / a.get_raw_values('hm')) #计算D光 LD = [] DetalLD = [] a.set_raw_values(r1=105.1175, r2=-74.7353, r3=-215.38763374564763) a.set_raw_values(d1=5.32, d2=2.5) a.set_raw_values(n1=1, n1s=1.51633, n2=1.51633, n2s=1.6727, n3=1.6727, n3s=1) a.do_update() ld = a.get_raw_values('l') for i in h: L3s = a.get_L3s(i) DetalLD.append(L3s - ld) LD.append(L3s) print("h/hm=0.707时球差:", a.get_L3s(0.707 * a.get_raw_values('hm')) - ld) print("h/hm=1时球差:", a.get_L3s(a.get_raw_values('hm')) - ld)
def plot_color_vs_mass_hist(): galaxies = mk_galaxy_struc() # Definitions for the axes left, width = 0.1, 0.65 bottom, height = 0.1, 0.65 bottom_h = left_h = left + width + 0.02 rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] # Add the figures # Mass vs color plot I-H f1 = pyl.figure(1, figsize=(8, 8)) f1s1 = f1.add_axes(rect_scatter) f1s2 = f1.add_axes(rect_histx) f1s3 = f1.add_axes(rect_histy) # Mass vs color plot J-H f2 = pyl.figure(2, figsize=(8, 8)) f2s1 = f2.add_axes(rect_scatter) f2s2 = f2.add_axes(rect_histx) f2s3 = f2.add_axes(rect_histy) #f2s1 = f2.add_subplot(111) # Mass vs color plot Z-H f3 = pyl.figure(3, figsize=(8, 8)) f3s1 = f3.add_axes(rect_scatter) f3s2 = f3.add_axes(rect_histx) f3s3 = f3.add_axes(rect_histy) #f3s1 = f3.add_subplot(111) mass1 = [] color1 = [] mass2 = [] color2 = [] mass3 = [] color3 = [] for i in range(len(galaxies)): # Color vs Mass Plots if galaxies[i].ston_I > 30.0: if galaxies[i].Mips >= 10.0: f1s1.plot(galaxies[i].Mass, galaxies[i].Imag - galaxies[i].Hmag, c='#FFAB19', marker='o', markersize=9) mass1.append(galaxies[i].Mass) # Get the right mass color1.append(galaxies[i].Imag - galaxies[i].Hmag) # Get the color if galaxies[i].ICD_IH > 0.1: f1s1.plot(galaxies[i].Mass, galaxies[i].Imag - galaxies[i].Hmag, c='None', marker='s', markersize=10) else: f1s1.plot(galaxies[i].Mass, galaxies[i].Imag - galaxies[i].Hmag, c='#196DFF', marker='*') elif 20.0 < galaxies[i].ston_I and galaxies[i].ston_I < 30.0: f1s1.plot(galaxies[i].Mass, galaxies[i].Imag - galaxies[i].Hmag, c='0.8', marker='s', alpha=0.4) else: f1s1.plot(galaxies[i].Mass, galaxies[i].Imag - galaxies[i].Hmag, c='0.8', marker='.', alpha=0.4) if galaxies[i].ston_Z > 30.0: if galaxies[i].Mips >= 10.0: f2s1.plot(galaxies[i].Mass, galaxies[i].Zmag - galaxies[i].Hmag, c='#FFAB19', marker='o', markersize=9) mass2.append(galaxies[i].Mass) # Get the right mass color2.append(galaxies[i].Zmag - galaxies[i].Hmag) # Get the color if galaxies[i].ICD_ZH > 0.05: f2s1.plot(galaxies[i].Mass, galaxies[i].Zmag - galaxies[i].Hmag, c='None', marker='s', markersize=10) else: f2s1.plot(galaxies[i].Mass, galaxies[i].Zmag - galaxies[i].Hmag, c='#196DFF', marker='*') elif 20.0 < galaxies[i].ston_Z and galaxies[i].ston_Z < 30.0: f2s1.plot(galaxies[i].Mass, galaxies[i].Zmag - galaxies[i].Hmag, c='0.8', marker='s', alpha=0.4) else: f2s1.plot(galaxies[i].Mass, galaxies[i].Zmag - galaxies[i].Hmag, c='0.8', marker='.', alpha=0.4) if galaxies[i].ston_J > 30.0: if galaxies[i].Mips >= 10.0: f3s1.plot(galaxies[i].Mass, galaxies[i].Jmag - galaxies[i].Hmag, c='#FFAB19', marker='o', markersize=9) mass3.append(galaxies[i].Mass) # Get the right mass color3.append(galaxies[i].Jmag - galaxies[i].Hmag) # Get the color if galaxies[i].ICD_JH > 0.03: f3s1.plot(galaxies[i].Mass, galaxies[i].Jmag - galaxies[i].Hmag, c='None', marker='s', markersize=10) else: f3s1.plot(galaxies[i].Mass, galaxies[i].Jmag - galaxies[i].Hmag, c='#196DFF', marker='*') elif 20.0 < galaxies[i].ston_J and galaxies[i].ston_J < 30.0: f3s1.plot(galaxies[i].Mass, galaxies[i].Jmag - galaxies[i].Hmag, c='0.8', marker='s', alpha=0.4) else: f3s1.plot(galaxies[i].Mass, galaxies[i].Jmag - galaxies[i].Hmag, c='0.8', marker='.', alpha=0.4) ############ # FIGURE 1 # ############ pyl.figure(1) f1s1.set_xscale('log') f1s1.set_xlim(3e7, 1e12) f1s1.set_ylim(0, 4.5) f1s1.set_xlabel(r"$Log_{10}(M_{\odot})$", fontsize=20) f1s1.set_ylabel("$(I-H)_{Observed}$", fontsize=20) f1s1.tick_params(axis='both', pad=7) binsx = pyl.logspace(7, 12) binsy = pyl.linspace(f1s1.get_ylim()[0], f1s1.get_ylim()[1] + 0.25) f1s2.hist(mass1, bins=binsx) f1s2.set_xlim(f1s1.get_xlim()) f1s2.tick_params(labelbottom='off') f1s2.set_xscale('log') f1s3.hist(color1, bins=binsy, orientation='horizontal') f1s3.set_ylim(f1s1.get_ylim()) f1s3.tick_params(labelleft='off') pyl.savefig('color_vs_mass_hist_IH.eps') ############ # FIGURE 2 # ############ pyl.figure(2) f2s1.set_xscale('log') f2s1.set_xlim(3e7, 1e12) f2s1.set_xlabel(r"$Log_{10}(M_{\odot})$", fontsize=20) f2s1.set_ylabel("$(Z-H)_{Observed}$", fontsize=20) f2s1.tick_params(axis='both', pad=7) binsx = pyl.logspace(7, 12) binsy = pyl.linspace(f2s1.get_ylim()[0], f2s1.get_ylim()[1] + 0.25) f2s2.hist(mass2, bins=binsx) f2s2.set_xlim(f2s1.get_xlim()) f2s2.tick_params(labelbottom='off') f2s2.set_xscale('log') f2s3.hist(color2, bins=binsy, orientation='horizontal') f2s3.set_ylim(f2s1.get_ylim()) f2s3.tick_params(labelleft='off') pyl.savefig('color_vs_mass_hist_ZH.eps') ############ # FIGURE 3 # ############ pyl.figure(3) f3s1.set_xscale('log') f3s1.set_xlim(3e7, 1e12) f3s1.set_ylim(-0.5, 2) f3s1.set_xlabel(r"$Log_{10}(M_{\odot})$", fontsize=20) f3s1.set_ylabel("$(J-H)_{Observed}$", fontsize=20) f3s1.tick_params(axis='both', pad=7) binsx = pyl.logspace(7, 12) binsy = pyl.linspace(f3s1.get_ylim()[0], f3s1.get_ylim()[1] + 0.25) f3s2.hist(mass3, bins=binsx) f3s2.set_xlim(f3s1.get_xlim()) f3s2.tick_params(labelbottom='off') f3s2.set_xscale('log') f3s3.hist(color3, bins=binsy, orientation='horizontal') f3s3.set_ylim(f3s1.get_ylim()) f3s3.tick_params(labelleft='off') pyl.savefig('color_vs_mass_hist_JH.eps') pyl.show()
from pylab import subplot, plot, linspace, savefig from numpy import sin, cos, sinh, cosh, pi x = linspace(-pi, pi, 100) subplot(221) plot(x, sin(x)) subplot(222) plot(x, cos(x)) subplot(223) plot(x, sinh(x)) subplot(224) plot(x, cosh(x)) savefig('figuraejemplo3.pdf', format='pdf')
def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \ wireframe=False, file=None, dpi=None, axes=None): """ Plots the surface defined by `f`. If `f` returns a single component, then this plots the surface defined by `z = f(x,y)` over the rectangular domain with `x = u` and `y = v`. If `f` returns three components, then this plots the parametric surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`. For example, to plot a simple function:: >>> from sympy.mpmath import * >>> f = lambda x, y: sin(x+y)*cos(y) >>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP Plotting a donut:: >>> r, R = 1, 2.5 >>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)] >>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP .. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher. """ import pylab import mpl_toolkits.mplot3d as mplot3d if file: axes = None fig = None if not axes: fig = pylab.figure() axes = mplot3d.axes3d.Axes3D(fig) ua, ub = u va, vb = v du = ub - ua dv = vb - va if not isinstance(points, (list, tuple)): points = [points, points] M, N = points u = pylab.linspace(ua, ub, M) v = pylab.linspace(va, vb, N) x, y, z = [pylab.zeros((M, N)) for i in xrange(3)] xab, yab, zab = [[0, 0] for i in xrange(3)] for n in xrange(N): for m in xrange(M): fdata = f(ctx.convert(u[m]), ctx.convert(v[n])) try: x[m, n], y[m, n], z[m, n] = fdata except TypeError: x[m, n], y[m, n], z[m, n] = u[m], v[n], fdata for c, cab in [(x[m, n], xab), (y[m, n], yab), (z[m, n], zab)]: if c < cab[0]: cab[0] = c if c > cab[1]: cab[1] = c if wireframe: axes.plot_wireframe(x, y, z, rstride=4, cstride=4) else: axes.plot_surface(x, y, z, rstride=4, cstride=4) axes.set_xlabel('x') axes.set_ylabel('y') axes.set_zlabel('z') if keep_aspect: dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]] maxd = max(dx, dy, dz) if dx < maxd: delta = maxd - dx axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0) if dy < maxd: delta = maxd - dy axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0) if dz < maxd: delta = maxd - dz axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0) if fig: if file: pylab.savefig(file, dpi=dpi) else: pylab.show()
""" DDE where the delay depends on Y(t). """ from pylab import cos, linspace, subplots from ddeint import ddeint def model(Y, t): return -Y(t - 3 * cos(Y(t))**2) def values_before_zero(t): return 1 tt = linspace(0, 30, 2000) yy = ddeint(model, values_before_zero, tt) fig, ax = subplots(1, figsize=(4, 4)) ax.plot(tt, yy) ax.figure.savefig("variable_delay.jpeg")
def solve(self): """ """ s = '::: solving TransientSolver :::' text = colored(s, 'blue') print text firn = self.firn config = self.config fe = self.fe fv = self.fv fd = self.fd if config['age']['on']: fa = self.fa t0 = config['t_start'] tm = config['t_mid'] tf = config['t_end'] dt = config['time_step'] dt_list = config['dt_list'] if dt_list != None: numt1 = (tm-t0)/dt_list[0] + 1 # number of time steps numt2 = (tf-tm)/dt_list[1] + 1 # number of time steps times1 = linspace(t0,tm,numt1) # array of times to evaluate in seconds times2 = linspace(tm,tf,numt2) # array of times to evaluate in seconds dt1 = dt_list[0] * ones(len(times1)) dt2 = dt_list[1] * ones(len(times2)) times = hstack((times1,times2)) dts = hstack((dt1, dt2)) else: numt = (tf-t0)/dt + 1 # number of time steps times = linspace(t0,tf,numt) # array of times to evaluate in seconds dts = dt * ones(len(times)) firn.t = t0 self.times = times self.dts = dts for t,dt in zip(times[1:], dts[1:]): # update timestep : firn.dt = dt firn.dt_v.assign(dt) # update boundary conditions : firn.update_Hbc() firn.update_rhoBc() firn.update_wBc() #firn.update_omegaBc() # newton's iterative method : fe.solve() fd.solve() fv.solve() if config['age']['on']: fa.solve() # update firn object : firn.update_vars(t) firn.update_height_history() if config['free_surface']['on']: if dt_list != None: if t > tm+dt: firn.update_height() else: firn.update_height() # update model parameters : if t != times[-1]: firn.H_1.assign(firn.H) firn.U_1.assign(firn.U) firn.omega_1.assign(firn.omega) firn.w_1.assign(firn.w) firn.a_1.assign(firn.a) firn.m_1.assign(firn.m) # update the plotting parameters : if config['plot']['on']: self.plot.update_plot() #plt.draw() s = '>>> Time: %i yr <<<' text = colored(s, 'red', attrs=['bold']) print text % (t / firn.spy) if config['plot']['on']: pass
# [email protected] # # ********************************************* import pylab as pl import PyOFTK LMBD = (1.0526 + 0.00047) / (2 * 1.45) LMBD2 = 0.34485 LNGTH = 100 fbg1 = PyOFTK.apodizedFBG(3.0, 62.5, 0.04, 0.0, 1e-1, 1e-2, LMBD2) fbg2 = PyOFTK.apodizedFBG(3.0, 62.5, 0.04, 0.0, 1e-1, 5e-3, LMBD2) fbg3 = PyOFTK.apodizedFBG(3.0, 62.5, 0.04, 0.0, 1e-1, 1e-3, LMBD2) print "Longueur d'onde de Bragg: " + str(fbg1.braggWavelength) + " um" wvl1 = pl.linspace(fbg1.braggWavelength - 0.00047, fbg1.braggWavelength - 0.00050, LNGTH) wvl2 = pl.linspace(fbg1.braggWavelength + 0.010, fbg1.braggWavelength + 0.050, LNGTH) beta2Grating1 = pl.zeros(LNGTH, float) beta2Grating2 = pl.zeros(LNGTH, float) beta2Grating3 = pl.zeros(LNGTH, float) beta3Grating1 = pl.zeros(LNGTH, float) beta3Grating2 = pl.zeros(LNGTH, float) beta3Grating3 = pl.zeros(LNGTH, float) for i in range(LNGTH): beta2Grating1[i] = fbg1.gBeta2(wvl2[i]) * 1e24 beta2Grating2[i] = fbg2.gBeta2(wvl2[i]) * 1e24 beta2Grating3[i] = fbg3.gBeta2(wvl2[i]) * 1e24
return pylab.log(x) + 2*pylab.log10(x) def t(x): return pylab.sin(pylab.sqrt(abs(5*x))) def u(x): return pylab.maximum(pylab.sin(x), pylab.cos(x)**2) def v(x): return pylab.minimum(pylab.sin(x), pylab.cos(2*x))''' #Setting the range of function a, b, n = -2 * pylab.pi, 2 * pylab.pi, 1000 #在 [a,b] 間產生 n 個點存到 xs xs = pylab.linspace(a, b, n) #畫圖 pylab.plot(xs, f(xs), 'blue') pylab.plot(xs, g(xs), 'green') pylab.grid() pylab.xlabel("X") pylab.ylabel("Y") pylab.title( "f(x)=max( abs(x sin(x)),abs(x cos(x)) ),g(x)=min( abs(x sin(x)), abs(x cos(x)) )" ) pylab.savefig('Output/homework.png')
def plot_redundancy(self): """ Plot the fraction of (unique) packets received from the source (0.0 - 1.0) over the total number of packets received as fraction (0.0 - infty). A cubic curve is fit to the data points. """ if self.dimension > 0: return cursor = self.options['db_conn'].cursor() max_total, = cursor.execute(''' SELECT MAX(total) FROM eval_fracsOfHosts ''').fetchone() fig_all = MyFig(self.options, xlabel='Fraction of rx Packets (incl. duplicates)', ylabel='Fraction of rx Packets sent by %s' % self.src, legend=True, aspect='auto') fig_all.ax.plot([1, 1], [0, 1], linestyle='dashed', color='grey') fig_all_median = MyFig( self.options, xlabel='Fraction of rx Packets (incl. duplicates)', ylabel='Fraction of rx Packets sent by %s' % self.src, legend=True, aspect='auto') fig_all_median.ax.plot([1, 1], [0, 1], linestyle='dashed', color='grey') colors = self.options['color'](pylab.linspace(0, 0.8, len(self.tag_keys))) markers = self.options['markers'] max_x = 0 ellipses = [] for j, tag_key in enumerate(self.tag_keys): fig = MyFig(self.options, xlabel='Fraction of rx Packets (incl. duplicates)', ylabel='Fraction of rx Packets sent by %s' % self.src, aspect='auto') results = cursor.execute( ''' SELECT total, frac FROM eval_fracsOfHosts WHERE src=? AND tag_key=? ''', (self.src, tag_key)).fetchall() assert (len(results)) fig.ax.plot([1, 1], [0, 1], linestyle='-', color='grey') xvals = [x[0] for x in results] yvals = [y[1] for y in results] label = 'p=%.2f' % self.configurations['p'][j] fig.ax.scatter(xvals, yvals, s=15, color=colors[j]) fig.ax.set_xlim((0, max_total)) max_x = max(max_x, max_total) fig.ax.set_ylim((0, 1)) z = numpy.polyfit(xvals, yvals, 3) poly = numpy.poly1d(z) median_y = scipy.median(yvals) mean_y = scipy.mean(yvals) ci_y = confidence(yvals) median_x = scipy.median(xvals) mean_x = scipy.mean(xvals) ci_x = confidence(xvals) ellipse = Ellipse((mean_x, mean_y), ci_x[1] - ci_x[0], ci_y[1] - ci_y[0], edgecolor=colors[j], facecolor=colors[j], alpha=0.5) ellipses.append(ellipse) selected_xvals = numpy.arange(min(xvals), max(xvals), 0.4) fig.ax.plot(selected_xvals, poly(selected_xvals), "-", color=colors[j]) fig.ax.plot([0.0, 10], [median_y, median_y], linestyle="dashed", color=colors[j]) fig_all.ax.plot(selected_xvals, poly(selected_xvals), "-", color=colors[j], label=label, marker=markers[j]) fig_all.ax.plot([0.0, 10], [median_y, median_y], linestyle="dashed", color=colors[j], alpha=0.6, marker=markers[j]) fig_all_median.ax.plot(ci_x, [mean_y, mean_y], color=colors[j], label=label, marker=markers[j]) fig_all_median.ax.plot([mean_x, mean_x], ci_y, color=colors[j], marker=markers[j]) fig.save('redundancy_%s_%s' % (label, str(self.data_filter))) fig_all.ax.axis((0, max(max_x, 1), 0, 1)) fig_all_median.ax.axis((0, max(max_x, 1), 0, 1)) patch_collection = PatchCollection(ellipses, match_original=True) fig_all_median.ax.add_collection(patch_collection) fig_all.save('redundancy_%s' % str(self.data_filter)) fig_all_median.save('redundancy_median_%s' % str(self.data_filter))
def evaluate(self, x, derivative=0, smooth=0, simple='auto'): """ smooth=0 is how much to smooth the spline data simple='auto' is whether we should just use straight interpolation you may want smooth > 0 for this, when derivative=1 """ if simple == 'auto': simple = self.simple # make it into an array if it isn't one, and remember that we did is_array = True if not type(x) == type(_pylab.array([])): x = _pylab.array([x]) is_array = False if simple: # loop over all supplied x data, and come up with a y for each y = [] for n in range(0, len(x)): # get a window of data around x if smooth: [xtemp, ytemp, etemp] = _fun.trim_data(self.xdata, self.ydata, None, [x[n] - smooth, x[n] + smooth]) else: i1 = _fun.index_nearest(x[n], self.xdata) # if the nearest data point is lower than x, use the next point to interpolate if self.xdata[i1] <= x[n] or i1 <= 0: i2 = i1 + 1 else: i2 = i1 - 1 # if we're at the max, extrapolate if i2 >= len(self.xdata): print x[n], "is out of range. extrapolating" i2 = i1 - 1 x1 = self.xdata[i1] y1 = self.ydata[i1] x2 = self.xdata[i2] y2 = self.ydata[i2] slope = (y2 - y1) / (x2 - x1) xtemp = _numpy.array([x[n]]) ytemp = _numpy.array([y1 + (x[n] - x1) * slope]) # calculate the slope based on xtemp and ytemp (if smoothing) # or just use the raw slope if smoothing=0 if derivative == 1: if smooth: y.append( (_numpy.average(xtemp * ytemp) - _numpy.average(xtemp) * _numpy.average(ytemp)) / (_numpy.average(xtemp * xtemp) - _numpy.average(xtemp)**2)) else: y.append(slope) # otherwise just average (even with one element) elif derivative == 0: y.append(_numpy.average(ytemp)) if is_array: return _numpy.array(y) else: return y[0] if smooth: y = [] for n in range(0, len(x)): # take 20 data points from x+/-smooth xlow = max(self.xmin, x[n] - smooth) xhi = min(self.xmax, x[n] + smooth) xdata = _pylab.linspace(xlow, xhi, 20) ydata = _interpolate.splev(xdata, self.pfit, derivative) y.append(_numpy.average(ydata)) if is_array: return _numpy.array(y) else: return y[0] else: return _interpolate.splev(x, self.pfit, derivative)
def _plot_propagation(options): """ Plot the fraction of packets each router received from its neighbors. This can be used to (visually) detect routers that depend on a particular neighbors. tags: Can be used with any tag/configuration. One plot is generated for each! """ locs = options['locs'] cursor = options['db_conn'].cursor() colors = options['color2'](pylab.linspace(0, 1, 101)) units2meter = options['units2meter'] ################################################################################# ## Get mapping of hosts and interface addresses ################################################################################# cursor.execute(''' SELECT DISTINCT(host), rx_if FROM rx ''') addr2host = {} for host, rx_if in cursor.fetchall(): addr2host[rx_if] = host ################################################################################ # Evaluate for all sources ################################################################################ for i, src in enumerate(options['src']): logging.info('src=%s (%d/%d)', src, i + 1, len(options['src'])) options['prefix'] = src ################################################################################# ## Get all hostnames ################################################################################# #cursor.execute('SELECT host FROM addr') #dsts = sorted([str(d[0]) for d in cursor.fetchall()]) ################################################################################ # Evaluate received packets for each tag for each node ################################################################################ tags = cursor.execute(''' SELECT key, id FROM tag ''').fetchall() for j, (tag_key, tag_id) in enumerate(tags): logging.info('\ttag=%s (%d/%d)', tag_id, j + 1, len(tags)) results = cursor.execute( ''' SELECT host, total, frac FROM eval_fracsOfHosts WHERE src=? AND tag_key=? ''', (src, tag_key)).fetchall() ################################################################################ # Draw figure for current tag ################################################################################ fig = MyFig(options, rect=[0.1, 0.1, 0.8, 0.7], xlabel='x Coordinate', ylabel='y Coordinate') fig3d = MyFig(options, rect=[0.1, 0.1, 0.8, 0.7], xlabel='x Coordinate', ylabel='y Coordinate', zlabel='z Coordinate', ThreeD=True) fig.ax.set_autoscalex_on(False) fig.ax.set_autoscaley_on(False) min_x = min_y = numpy.infty max_x = max_y = max_z = 0 circ_max = 5 line_max = 10 floor_factor = 2 floor_skew = -0.25 line_min = 1 # first draw the links.... for host, _total, _frac in results: try: xpos, ypos, zpos = locs[host] except KeyError: logging.warning('no position found for node %s', host) continue xpos = xpos * units2meter ypos = ypos * units2meter zpos = zpos * units2meter prevs = cursor.execute( ''' SELECT prev, frac FROM eval_prevHopFraction WHERE src=? AND tag_key=? and cur=? ''', (src, tag_key, host)).fetchall() for prev, frac in prevs: try: prev_xpos, prev_ypos, prev_zpos = locs[addr2host[prev]] except KeyError: logging.warning('no position found for node %s', prev) continue prev_xpos = prev_xpos * units2meter prev_ypos = prev_ypos * units2meter prev_zpos = prev_zpos * units2meter fig.ax.plot([ xpos + zpos * floor_skew * floor_factor, prev_xpos + prev_zpos * floor_skew * floor_factor ], [ ypos + zpos * floor_factor, prev_ypos + prev_zpos * floor_factor ], linestyle='-', color=colors[frac * 100], linewidth=max(line_max * frac, line_min), alpha=0.3) fig3d.ax.plot([xpos, prev_xpos], [ypos, prev_ypos], [zpos, prev_zpos], linestyle='-', color=colors[frac * 100], linewidth=max(line_max * frac, line_min), alpha=0.3) # ...then draw the nodes for host, _total, frac in results: try: xpos, ypos, zpos = locs[host] except KeyError: logging.warning('no position found for node %s', host) continue xpos = xpos * units2meter ypos = ypos * units2meter zpos = zpos * units2meter max_x = max(xpos, max_x) max_y = max(ypos, max_y) min_x = min(xpos, min_x) min_y = min(ypos, min_y) max_z = max(zpos, max_z) fig.ax.plot(xpos + zpos * floor_skew * floor_factor, ypos + zpos * floor_factor, 'o', color=colors[int(frac * 100)], ms=max(frac * circ_max, 1)) fig3d.ax.plot([xpos], [ypos], [zpos], 'o', color=colors[int(frac * 100)], ms=max(frac * circ_max, 1)) drawBuildingContours(fig3d.ax, options) fig.ax.axis((min_x - 10, max_x + 10, min_y - 10, max_y + 10 + max_z * floor_factor + 10)) colorbar_ax = fig.fig.add_axes([0.1, 0.875, 0.8, 0.025]) colorbar_ax3d = fig3d.fig.add_axes([0.1, 0.875, 0.8, 0.025]) alinspace = numpy.linspace(0, 1, 100) alinspace = numpy.vstack((alinspace, alinspace)) for tax in [colorbar_ax, colorbar_ax3d]: tax.imshow(alinspace, aspect='auto', cmap=options['color2']) tax.set_xticks(range(0, 101, 25)) tax.set_xticklabels(numpy.arange(0.0, 101.0, 0.25), fontsize=0.8 * options['fontsize']) tax.set_yticks([]) tax.set_title(tag_id, size=options['fontsize']) fig.save('propagation2d_%s' % tag_id) fig3d.save('propagation3d_%s' % tag_id)
#!/usr/local/bin/python import sys pi = 3.14159265359 def cs(eta): return (1 + eta + eta**2 - eta**3) / (1 - eta)**3 * eta * 6. / pi if __name__ == "__main__": nopt = len(sys.argv) if nopt < 2: print "\n!! Provide an input packing fraction !!" if nopt == 2: P = cs(float(sys.argv[1])) print P if nopt == 3: import pylab as pl etas = pl.linspace(float(sys.argv[1]), float(sys.argv[2]), 100) pl.plot(etas, cs(etas)) pl.show()
Energy = 10000 #struct = pyasf.unit_cell("1521772") struct = pyasf.unit_cell("cif/LiNbO3_28294.cif") #Li Nb O3 Sub = reflectivity.Substrate(struct) v_par = sp.Matrix([0, 0, 1]) v_perp = sp.Matrix([1, 0, 0]) #2,1,0 Sub.calc_orientation(v_par, v_perp) layer1 = reflectivity.Epitaxial_Layer(struct, thickness) layer1.calc_orientation(v_par, v_perp) crystal = reflectivity.Sample(Sub, layer1) crystal.set_Miller(R) crystal.calc_g0_gH(Energy) thBragg = float( layer1.calc_Bragg_angle(Energy).subs(layer1.structure.subs).evalf()) angle = pl.linspace(0.9955, 1.0045, 501) * thBragg crystal.calc_reflectivity(angle, Energy) layer1.calc_amplitudes(angle, Energy) Sub.calc_amplitudes(angle, Energy) XRl = layer1.XR XRs = Sub.XR XT = layer1.XT crystal.print_values(angle, Energy) pl.plot(data[:, 0], data[:, 1], label='GID_sl', color='red') # pl.plot(angle-thBragg,abs(XT)**2-1) pl.plot(pl.degrees(angle - thBragg), abs(XRl)**2,
def _main(): parser = argparse.ArgumentParser() parser.add_argument("-m", "--model", default=[], nargs='*', type=str, help="Frozen model file to test") parser.add_argument("-fd", "--full_dimension", default=3, type=int, help="The dimensionality of FES") parser.add_argument("-ns", "--num_step", default=1000000, type=int, help="number of mc step") parser.add_argument("-nw", "--num_walker", default=2000, type=int, help="number of walker") parser.add_argument("-cv1", "--cv1_index", default=1, type=int, help="cv1 index") parser.add_argument("-cv2", "--cv2_index", default=2, type=int, help="cv2 index") args = parser.parse_args() model = args.model fd = args.full_dimension ns = args.num_step nw = args.num_walker cv1 = args.cv1_index cv2 = args.cv2_index positons = [] energies = [] forces = [] graph = load_graph(model[0]) bins = 25 xx = pylab.linspace(0, 2 * np.pi, bins) yy = pylab.linspace(0, 2 * np.pi, bins) pp_hist = np.zeros((fd, len(xx))) pp_hist2d = np.zeros((1, len(xx), len(yy))) delta = 2.0 * np.pi / bins with tf.Session(graph=graph) as sess: walker = Walker(fd, nw, sess) for ii in range(100): pp, ee, ff = walker.sample(compute_ef) for ii in range(ns): pp, ee, ff = walker.sample(compute_ef) ##all 1d pp_hist_new = my_hist1d(pp, xx, delta, fd) pp_hist = (pp_hist * ii + pp_hist_new) / (ii + 1) ##certain 2d pp_hist_new2d = my_hist2d(pp, xx, yy, delta, cv1, cv2) pp_hist2d = (pp_hist2d * ii + pp_hist_new2d) / (ii + 1) if np.mod(ii, 50000) == 0: zz = -np.log(pp_hist + 1e-7) / beta zz *= f_cvt / 4.184 ##kcal zz = zz - np.min(zz) for jj in range(fd): fp = open("1CV_index%d.dat" % jj, "a") for temp in zz[jj]: fp.write(str(temp) + ' ') fp.write('\n') fp.close() zz2d = np.transpose(-np.log(pp_hist2d + 1e-10), (0, 2, 1)) / beta zz2d *= f_cvt / 4.184 zz2d = zz2d - np.min(zz2d) np.savetxt("2CV_step%d.dat" % ii, zz2d[0])
#VMT = 4*pi*RMuffinTin**3/3. # Volume of MT VMT = (4 / 3.) * pi * pow(RMuffinTin, 3) Vinter = fcc.Volume - VMT # Volume of the interstitial region print "Muffin-Tin radius = ", RMuffinTin print "Volume of the MT sphere = ", VMT print "Volume of the unit cell = ", fcc.Volume print "Volume of the interstitial = ", Vinter fcc.GenerateReciprocalVectors( 4, CutOffK ) # Reciprocal bravais lattice is built, K points taken into account only for |K|<CutOff fcc.ChoosePointsInFBZ( nkp, 0) # Choose the path in the 1BZ or the k-points in the irreducible 1BZ # Radial mesh -- only linear mesh can be used in connection to Numerov algorithm. R0 = linspace(0, RMuffinTin, N) R0[0] = 1e-10 R = R0[::-1] # Interstital overlap does not change through iterations Olap_I = ComputeInterstitialOverlap(fcc.Km, RMuffinTin, fcc.Volume) # We interpolate atomic charge on the new mesh within Muffin-Tin sphere TotRho = interpolate.splev(R0, AtomRhoSpline) for itt in range(Nitt): # self-consistent loop print '%d) Preparing potential' % itt UHartree = SolvePoisson(Z, R0, TotRho) # Adding exchange-correlation part Vxc = [XC.Vx(rsi) + XC.Vc(rsi) for rsi in rs(TotRho)]
def plot(self, cmap, filename=None, starttime=T1, endtime=T2, show_percentiles=False, percentiles=[10, 50, 90], show_class_models=True, grid=True, title_comment=False): """ Plot the QC resume figure If a filename is specified the plot is saved to this file, otherwise a plot window is shown. :type filename: str (optional) :param filename: Name of output file :type show_percentiles: bool (optional) :param show_percentiles: Enable/disable plotting of approximated percentiles. These are calculated from the binned histogram and are not the exact percentiles. :type percentiles: list of ints :param percentiles: percentiles to show if plotting of percentiles is selected. :type show_class_models: bool (optional) :param show_class_models: Enable/disable plotting of class models. :type grid: bool (optional) :param grid: Enable/disable grid in histogram plot. :type cmap: cmap :param cmap: Colormap for PPSD. """ # COMMON PARAMETERS psd_db_limits = (-180, -110) psdh_db_limits = (-200, -90) f_limits = (5e-3, 20) per_left = (10, 1, .1) per_right = (100, 10, 1) # ----------------- # Select Time window # ----------- times_used = array(self.times_used) starttime = max(min(times_used), starttime) endtime = min(max(times_used), endtime) bool_times_select = (times_used > starttime) & (times_used < endtime) times_used = times_used[bool_times_select] psd = self.psd[bool_times_select, :] spikes = self.spikes[bool_times_select] hist_stack = self._QC__get_ppsd(time_lim=(starttime, endtime)) Hour = arange(0, 23, 1) HourUsed = array([t.hour for t in times_used]) Day_span = (endtime - starttime) / 86400. # ----------- # FIGURE and AXES fig = plt.figure(figsize=(9.62, 13.60), facecolor='w', edgecolor='k') ax_ppsd = fig.add_axes([0.1, 0.68, 0.9, 0.28]) ax_coverage = fig.add_axes([0.1, 0.56, 0.64, 0.04]) ax_spectrogram = fig.add_axes([0.1, 0.31, 0.64, 0.24]) ax_spectrogramhour = fig.add_axes([0.76, 0.31, 0.20, 0.24]) ax_freqpsd = fig.add_axes([0.1, 0.18, 0.64, 0.12]) ax_freqpsdhour = fig.add_axes([0.76, 0.18, 0.20, 0.12]) ax_spikes = fig.add_axes([0.1, 0.05, 0.64, 0.12]) ax_spikeshour = fig.add_axes([0.76, 0.05, 0.20, 0.12]) ax_col_spectrogram = fig.add_axes([0.76, 0.588, 0.20, 0.014]) ax_col_spectrogramhour = fig.add_axes([0.76, 0.57, 0.20, 0.014]) ########################### COVERAGE ax_coverage.xaxis_date() ax_coverage.set_yticks([]) # plot data coverage starts = date2num([a.datetime for a in times_used]) ends = date2num([a.datetime for a in times_used + PPSD_LENGTH]) for start, end in zip(starts, ends): ax_coverage.axvspan(start, end, 0, 0.7, alpha=0.5, lw=0) # plot data really available aa = [(start, end) for start, end in self.times_data if ( (end - start) > PPSD_LENGTH)] # avoid very small gaps otherwise very long to plot for start, end in aa: start = date2num(start.datetime) end = date2num(end.datetime) ax_coverage.axvspan(start, end, 0.7, 1, facecolor="g", lw=0) # plot gaps aa = [(start, end) for start, end in self.times_gaps if ( (end - start) > PPSD_LENGTH)] # avoid very small gaps otherwise very long to plot for start, end in aa: start = date2num(start.datetime) end = date2num(end.datetime) ax_coverage.axvspan(start, end, 0.7, 1, facecolor="r", lw=0) # Compute uncovered periods starts_uncov = ends[:-1] ends_uncov = starts[1:] # Keep only major uncovered periods ga = (ends_uncov - starts_uncov) > (PPSD_LENGTH) / 86400 starts_uncov = starts_uncov[ga] ends_uncov = ends_uncov[ga] ax_coverage.set_xlim(starttime.datetime, endtime.datetime) # labels ax_coverage.xaxis.set_ticks_position('top') ax_coverage.tick_params(direction='out') ax_coverage.xaxis.set_major_locator(mdates.AutoDateLocator()) if Day_span > 5: ax_coverage.xaxis.set_major_formatter(DateFormatter('%D')) else: ax_coverage.xaxis.set_major_formatter(DateFormatter('%D-%Hh')) for label in ax_coverage.get_xticklabels(): label.set_fontsize(10) for label in ax_coverage.get_xticklabels(): label.set_ha("right") label.set_rotation(-25) ########################### SPECTROGRAM ax_spectrogram.xaxis_date() t = date2num([a.datetime for a in times_used]) f = 1. / self.per_octaves T, F = np.meshgrid(t, f) spectro = ax_spectrogram.pcolormesh( T, F, transpose(psd), cmap=spectro_cmap) spectro.set_clim(*psd_db_limits) spectrogram_colorbar = colorbar(spectro, cax=ax_col_spectrogram, orientation='horizontal', ticks=linspace(psd_db_limits[0], psd_db_limits[1], 5), format='%i') spectrogram_colorbar.set_label("dB") spectrogram_colorbar.set_clim(*psd_db_limits) spectrogram_colorbar.ax.xaxis.set_ticks_position('top') spectrogram_colorbar.ax.xaxis.label.set_position((1.1, .2)) spectrogram_colorbar.ax.yaxis.label.set_horizontalalignment('left') spectrogram_colorbar.ax.yaxis.label.set_verticalalignment('bottom') ax_spectrogram.grid(which="major") ax_spectrogram.semilogy() ax_spectrogram.set_ylim(f_limits) ax_spectrogram.set_xlim(starttime.datetime, endtime.datetime) ax_spectrogram.set_xticks(ax_coverage.get_xticks()) setp(ax_spectrogram.get_xticklabels(), visible=False) ax_spectrogram.yaxis.set_major_formatter(FormatStrFormatter("%.2f")) ax_spectrogram.set_ylabel('Frequency [Hz]') ax_spectrogram.yaxis.set_label_coords(-0.08, 0.5) ########################### SPECTROGRAM PER HOUR #psdH=array([array(psd[HourUsed==h,:]).mean(axis=0) for h in Hour]) psdH = zeros((size(Hour), size(self.per_octaves))) for i, h in enumerate(Hour): a = array(psd[HourUsed == h, :]) A = ma.masked_array( a, mask=~((a > psdh_db_limits[0]) & (a < psdh_db_limits[1]))) psdH[i, :] = ma.getdata(A.mean(axis=0)) psdH = array([psdH[:, i] - psdH[:, i].mean() for i in arange(0, psdH.shape[1])]) H24, F = np.meshgrid(Hour, f) spectroh = ax_spectrogramhour.pcolormesh(H24, F, psdH, cmap=cm.RdBu_r) spectroh.set_clim(-8, 8) spectrogram_per_hour_colorbar = colorbar(spectroh, cax=ax_col_spectrogramhour, orientation='horizontal', ticks=linspace(-8, 8, 5), format='%i') spectrogram_per_hour_colorbar.set_clim(-8, 8) ax_spectrogramhour.semilogy() ax_spectrogramhour.set_xlim((0, 23)) ax_spectrogramhour.set_ylim(f_limits) ax_spectrogramhour.set_xticks(arange(0, 23, 4)) ax_spectrogramhour.set_xticklabels(arange(0, 23, 4), visible=False) ax_spectrogramhour.yaxis.set_ticks_position('right') ax_spectrogramhour.yaxis.set_label_position('right') ax_spectrogramhour.yaxis.grid(True) ax_spectrogramhour.xaxis.grid(False) ########################### PSD BY PERIOD RANGE t = date2num([a.datetime for a in times_used]) ax_freqpsd.xaxis_date() for pp in zip(per_left, per_right): mpsd = self._QC__get_psd(time_lim=(starttime, endtime), per_lim=pp) mpsdH = zeros(size(Hour)) + NaN for i, h in enumerate(Hour): a = array(mpsd[HourUsed == h]) A = ma.masked_array( a, mask=~((a > psdh_db_limits[0]) & (a < psdh_db_limits[1]))) mpsdH[i] = ma.getdata(A.mean()) ax_freqpsd.plot(t, mpsd) ax_freqpsdhour.plot(Hour, mpsdH - mpsdH.mean()) ax_freqpsd.set_ylim(psd_db_limits) ax_freqpsd.set_xlim(starttime.datetime, endtime.datetime) ax_freqpsd.set_xticks(ax_coverage.get_xticks()) setp(ax_freqpsd.get_xticklabels(), visible=False) ax_freqpsd.set_ylabel('Amplitude [dB]') ax_freqpsd.yaxis.set_label_coords(-0.08, 0.5) ax_freqpsd.yaxis.grid(False) ax_freqpsd.xaxis.grid(True) ########################### PSD BY PERIOD RANGE PER HOUR ax_freqpsdhour.set_xlim((0, 23)) ax_freqpsdhour.set_ylim((-8, 8)) ax_freqpsdhour.set_yticks(arange(-6, 7, 2)) ax_freqpsdhour.set_xticks(arange(0, 23, 4)) ax_freqpsdhour.set_xticklabels(arange(0, 23, 4), visible=False) ax_freqpsdhour.yaxis.set_ticks_position('right') ax_freqpsdhour.yaxis.set_label_position('right') ########################### SPIKES ax_spikes.xaxis_date() ax_spikes.bar(t, spikes, width=1. / 24) ax_spikes.set_ylim((0, 50)) ax_spikes.set_xlim(starttime.datetime, endtime.datetime) ax_spikes.set_yticks(arange(10, 45, 10)) ax_spikes.set_xticks(ax_coverage.get_xticks()) #setp(ax_spikes.get_xticklabels(), visible=False) ax_spikes.set_ylabel("Detections [#/hour]") ax_spikes.yaxis.set_label_coords(-0.08, 0.5) ax_spikes.yaxis.grid(False) ax_spikes.xaxis.grid(True) # labels ax_spikes.xaxis.set_ticks_position('bottom') ax_spikes.tick_params(direction='out') ax_spikes.xaxis.set_major_locator(mdates.AutoDateLocator()) if Day_span > 5: ax_spikes.xaxis.set_major_formatter(DateFormatter('%D')) else: ax_spikes.xaxis.set_major_formatter(DateFormatter('%D-%Hh')) for label in ax_spikes.get_xticklabels(): label.set_fontsize(10) for label in ax_spikes.get_xticklabels(): label.set_ha("right") label.set_rotation(25) ########################### SPIKES PER HOUR mspikesH = array([array(spikes[[HourUsed == h]]).mean() for h in Hour]) ax_spikeshour.bar(Hour, mspikesH - mspikesH.mean(), width=1.) ax_spikeshour.set_xlim((0, 23)) ax_spikeshour.set_ylim((-8, 8)) ax_spikeshour.set_xticks(arange(0, 23, 4)) ax_spikeshour.set_yticks(arange(-6, 7, 2)) ax_spikeshour.set_ylabel("Daily variation") ax_spikeshour.set_xlabel("Hour [UTC]") ax_spikeshour.yaxis.set_ticks_position('right') ax_spikeshour.yaxis.set_label_position('right') ax_spikeshour.yaxis.set_label_coords(1.3, 1) ########################### plot gaps for start, end in zip(starts_uncov, ends_uncov): ax_spectrogram.axvspan( start, end, 0, 1, facecolor="w", lw=0, zorder=100) ax_freqpsd.axvspan( start, end, 0, 1, facecolor="w", lw=0, zorder=100) ax_spikes.axvspan(start, end, 0, 1, facecolor="w", lw=0, zorder=100) # LEGEND leg = [str(xx) + '-' + str(yy) + ' s' for xx, yy in zip(per_left, per_right)] hleg = ax_freqpsd.legend( leg, loc=3, bbox_to_anchor=(-0.015, 0.75), ncol=size(leg)) for txt in hleg.get_texts(): txt.set_fontsize(8) # PPSD X, Y = np.meshgrid(self.xedges, self.yedges) ppsd = ax_ppsd.pcolormesh(X, Y, hist_stack.T, cmap=cmap) ppsd_colorbar = plt.colorbar(ppsd, ax=ax_ppsd) ppsd_colorbar.set_label("PPSD [%]") color_limits = (0, 30) ppsd.set_clim(*color_limits) ppsd_colorbar.set_clim(*color_limits) ax_ppsd.grid(b=grid, which="major") if show_percentiles: hist_cum = self.__get_normalized_cumulative_histogram( time_lim=(starttime, endtime)) # for every period look up the approximate place of the percentiles for percentile in percentiles: periods, percentile_values = self.get_percentile( percentile=percentile, hist_cum=hist_cum, time_lim=(starttime, endtime)) ax_ppsd.plot(periods, percentile_values, color="black") # Noise models model_periods, high_noise = get_nhnm() ax_ppsd.plot(model_periods, high_noise, '0.4', linewidth=2) model_periods, low_noise = get_nlnm() ax_ppsd.plot(model_periods, low_noise, '0.4', linewidth=2) if show_class_models: classA_periods, classA_noise, classB_periods, classB_noise = get_class() ax_ppsd.plot(classA_periods, classA_noise, 'r--', linewidth=3) ax_ppsd.plot(classB_periods, classB_noise, 'g--', linewidth=3) ax_ppsd.semilogx() ax_ppsd.set_xlim(1. / f_limits[1], 1. / f_limits[0]) ax_ppsd.set_ylim((-200, -80)) ax_ppsd.set_xlabel('Period [s]') ax_ppsd.get_xaxis().set_label_coords(0.5, -0.05) ax_ppsd.set_ylabel('Amplitude [dB]') ax_ppsd.xaxis.set_major_formatter(FormatStrFormatter("%.2f")) # TITLE title = "%s %s -- %s (%i segments)" title = title % (self.id, starttime.date, endtime.date, len(times_used)) if title_comment: fig.text(0.82, 0.978, title_comment, bbox=dict( facecolor='red', alpha=0.5), fontsize=15) ax_ppsd.set_title(title) # a=str(UTCDateTime().format_iris_web_service()) plt.draw() if filename is not None: plt.savefig(filename) plt.close() else: plt.show()
def Atom_charge(Z, core, mix=0.3, RmaxAtom=10., Natom=3001, precision=1e-5, Nitt=100): #def Atom_charge(Z, core, mix=0.3, RmaxAtom=10., Natom=3001, precision=1e-5, Nitt=1000): """ Computes Atomic electronic density and atomic Energy Input: Z -- Nucleolus charge core -- States treated as core in LAPW (example: [3,2,0] # 1s,2s,3s, 1p,2p, no-d) mix -- Mixing parameter for density RmaxAtom -- The end of the radial mesh (maximum r) Natom -- Number of points in radial mesh precision -- How precise total energy we need Nitt -- Maximum number of itterations """ XC = excor.ExchangeCorrelation(5) #XC = excor.ExchangeCorrelation(3) # Exchange correlations class; VWN seems to be the best (look http://physics.nist.gov/PhysRefData/DFTdata/Tables/ptable.html) R0 = linspace(1e-10, RmaxAtom, Natom) # Radial mesh Ra = R0[::-1] # Inverse radial mesh Veff = -ones(len(Ra), dtype=float) / Ra catm = [c + 1 for c in core ] # We add one more state to core to get atomic states Etot_old = 0 # Finds bound states (coreRho, coreE, coreZ, states) = FindCoreStates(catm, Ra, Veff, Z) # Sorts them according to energy states.sort(Atom_cmpb) # Computes charge (rho, Ebs) = Atom_ChargeDensity(states, Ra, Veff, Z) rho = rho[::-1] for itt in range(Nitt): # Here we have increasing R -> # Hartree potential UHartree = SolvePoisson(Z, R0, rho) # Adding exchange-correlation part Vxc = [XC.Vx(rsi) + XC.Vc(rsi) for rsi in rs(rho)] ExcVxc = [XC.EcVc(rsi) + XC.ExVx(rsi) for rsi in rs(rho)] Veff = (UHartree - Z) / R0 + Vxc Veff = Veff[::-1] # Here we have decreasing R <- # Finds bound states (coreRho, coreE, coreZ, states) = FindCoreStates(catm, Ra, Veff, Z) # Sorts them according to energy states.sort(Atom_cmpb) # Computes charge (nrho, Ebs) = Atom_ChargeDensity(states, Ra, Veff, Z) # Total energy #pot = (ExcVxc*R0**2-0.5*UHartree*R0)*nrho[::-1]*4*pi pot = (ExcVxc * pow(R0, 2) - 0.5 * UHartree * R0) * nrho[::-1] * 4 * pi Etot = integrate.simps(pot, R0) + Ebs Ediff = abs(Etot - Etot_old) print RED, ' %d), Etot = %f, Eband = %f, Ediff = %f' % ( itt, Etot, Ebs, Ediff), DEFAULT_COLOR #print ' %d), Etot = %f, Eband = %f, Ediff = %f' % (itt, Etot, Ebs, Ediff) # Mixing rho = mix * nrho[::-1] + (1 - mix) * rho Etot_old = Etot if Ediff < precision: break return (R0, rho)
pass return my_fun2 """ Pylab ----------------- it can also capture matplotlib figures on the fly, maintaining all the configurazione in the appropriate way""" import pylab from pylab import show fig, ax = pylab.subplots(1, 1, figsize=(8, 4)) x = pylab.linspace(0, 10, 101) ax.plot(x, x**2) show() """to show the plot it is necessary to explicitly call the show method, no shortcut available! The show function show all the figure that has not already been shown, so calling it twice in a row will do nothing. """ pylab.show() """if you want to show a figure for the second time, you will have to call a specifi :code:`figure.show`. """ fig.show() """if external libraries are used, they interact in the expected way """
def plot_box(self): """ Plots the fraction of nodes that received a particular packet from the source as a box-and-whisker with the probability p on the x-axis. """ logging.debug('') configurations_per_variant = self.configurations_per_variant gossip_variants_count = len(self.configurations['gossip']) colors = self.options['color'](pylab.linspace( 0, 0.8, configurations_per_variant)) labels = [] for li_of_frac in self.label_info: s = str() for i, (param, value) in enumerate(li_of_frac): if i > 0: s += '\n' s += '%s=%s' % (_cname_to_latex(param), value) labels.append(s) labels *= len(self.configurations['p']) ps = list( pylab.flatten(self.configurations_per_p * [p] for p in self.configurations['p'])) ################################################################# # box plot ################################################################# array = numpy.zeros([len(self.fraction_of_nodes[0]), self.length()]) for i, fracs in enumerate(self.fraction_of_nodes): array[:, i] = fracs fig = MyFig(self.options, rect=[0.1, 0.2, 0.8, 0.75], figsize=(max(self.length(), 10), 10), xlabel='Probability p', ylabel='Fraction of Nodes', aspect='auto') fig.ax.set_ylim(0, 1) box_dict = fig.ax.boxplot(array, notch=1, sym='rx', vert=1) #box_dict = fig.ax.boxplot(array, notch=1, sym='rx', vert=1, patch_artist=False) for j, box in enumerate(box_dict['boxes']): j = (j % self.configurations_per_p) box.set_color(colors[j]) for _flier in box_dict['fliers']: _flier.set_color('lightgrey') fig.ax.set_xticklabels(ps, fontsize=self.options['fontsize'] * 0.6) # draw vertical line to visually mark different probabilities for x in range(0, self.length(), self.configurations_per_p): fig.ax.plot([x + 0.5, x + 0.5], [0.0, 1.0], linestyle='dotted', color='red', alpha=0.8) ################################################################# # create some dummy elements for the legend ################################################################# if configurations_per_variant > 1: proxies = [] for i in range(0, configurations_per_variant): r = Rectangle((0, 0), 1, 1, edgecolor=colors[i % configurations_per_variant], facecolor='white') proxies.append((r, labels[i])) fig.ax.legend([proxy for proxy, label in proxies], [label for proxy, label in proxies], loc='lower right') self.figures['boxplot'] = fig.save('boxplot_' + str(self.data_filter))
def main(): import optparse from numpy import sum # Parse command line parser = optparse.OptionParser(usage=USAGE) parser.add_option("-p", "--plot", action="store_true", help="Generate pdf with IR-spectrum") parser.add_option("-i", "--info", action="store_true", help="Set up/ Calculate vibrations & quit") parser.add_option("-s", "--suffix", action="store", help="Call suffix for binary e.g. 'mpirun -n 4 '", default='') parser.add_option("-r", "--run", action="store", help="path to FHI-aims binary",default='') parser.add_option("-x", "--relax", action="store_true", help="Relax initial geometry") parser.add_option("-m", "--molden", action="store_true", help="Output in molden format") parser.add_option("-w", "--distort", action="store_true", help="Output geometry distorted along imaginary modes") parser.add_option("-t", "--submit", action="store", help="""\ Path to submission script, string <jobname> will be replaced by name + counter, string <outfile> will be replaced by filename""") parser.add_option("-d", "--delta", action="store", type="float", help="Displacement", default=0.0025) options, args = parser.parse_args() if options.info: print __doc__ sys.exit(0) if len(args) != 2: parser.error("Need exactly two arguments") AIMS_CALL=options.suffix+' '+options.run hessian_thresh = -1 name=args[0] mode=args[1] delta=options.delta run_aims=False if options.run!='': run_aims=True submit_script = options.submit is not None if options.plot: import matplotlib as mpl mpl.use('Agg') from pylab import figure if options.plot or mode=='1': from pylab import savetxt, transpose, eig, argsort, sort,\ sign, pi, dot, sum, linspace, argmin, r_, convolve # Constant from scipy.constants bohr=constants.value('Bohr radius')*1.e10 hartree=constants.value('Hartree energy in eV') at_u=constants.value('atomic mass unit-kilogram relationship') eV=constants.value('electron volt-joule relationship') c=constants.value('speed of light in vacuum') Ang=1.0e-10 hbar=constants.value('Planck constant over 2 pi') Avo=constants.value('Avogadro constant') kb=constants.value('Boltzmann constant in eV/K') hessian_factor = eV/(at_u*Ang*Ang) grad_dipole_factor=(eV/(1./(10*c)))/Ang #(eV/Ang -> D/Ang) ir_factor = 1 # Asign all filenames inputgeomerty = 'geometry.in.'+name inputcontrol = 'control.in.'+name atomicmasses = 'masses.'+name+'.dat'; xyzfile = name+'.xyz'; moldenname =name+'.molden'; hessianname = 'hessian.'+name+'.dat'; graddipolename = 'grad_dipole.'+name+'.dat'; irname = 'ir.'+name+'.dat'; deltas=array([-delta,delta]) coeff=array([-1,1]) c_zero = - 1. / (2. * delta) f=open('control.in','r') # read control.in template template_control=f.read() f.close if submit_script: f=open(options.submit,'r') # read submission script template template_job=f.read() f.close folder='' # Dummy ########### Central Point ################################################## if options.relax and mode=='0': # First relax input geometry filename=name+'.out' folder=name+'_relaxation' if not os.path.exists(folder): os.mkdir(folder) # Create folder shutil.copy('geometry.in', folder+'/geometry.in') # Copy geometry new_control=open(folder+'/control.in','w') new_control.write(template_control+'relax_geometry trm 1E-3\n') # Relax! new_control.close() os.chdir(folder) # Change directoy print 'Central Point' if run_aims: os.system(AIMS_CALL+' > '+filename) # Run aims and pipe the output # into a file named 'filename' if submit_script: replace_submission(template_job, name, 0, filename) os.chdir('..') ############################################################################ # Check for relaxed geometry if os.path.exists(folder+'/geometry.in.next_step'): geometry=open(folder+'/geometry.in.next_step','r') else: geometry=open('geometry.in','r') # Read input geometry n_line=0 struc=structure() lines=geometry.readlines() for line in lines: n_line= n_line+1 if line.rfind('set_vacuum_level')!=-1: # Vacuum Level struc.vacuum_level=float(split_line(line)[-1]) if line.rfind('lattice_vector')!=-1: # Lattice vectors and periodic lat=split_line(line)[1:] struc.lattic_vector=append(struc.lattic_vector,float64(array(lat)) [newaxis,:],axis=0) struc.periodic=True if line.rfind('atom')!=-1: # Set atoms line_vals=split_line(line) at=Atom(line_vals[-1],line_vals[1:-1]) if n_line<len(lines): nextline=lines[n_line] if nextline.rfind('constrain_relaxation')!=-1: # constrained? at=Atom(line_vals[-1],line_vals[1:-1],True) else: at=Atom(line_vals[-1],line_vals[1:-1]) struc.join(at) geometry.close() n_atoms= struc.n() n_constrained=n_atoms-sum(struc.constrained) # Atomic mass file mass_file=open(atomicmasses,'w') mass_vector=zeros([0]) for at_unconstrained in struc.atoms[struc.constrained==False]: mass_vector=append(mass_vector,ones(3)*1./sqrt(at_unconstrained.mass())) line='{0:10.5f}'.format(at_unconstrained.mass()) for i in range(3): line=line+'{0:11.4f}'.format(at_unconstrained.coord[i]) line=line+'{0:}\n'.format(at_unconstrained.kind) mass_file.writelines(line) mass_file.close() # Init dip = zeros([n_constrained*3,3]) hessian = zeros([n_constrained*3,n_constrained*3]) index=0 counter=1 # Set up / Read folders for displaced atoms for atom in arange(n_atoms)[struc.constrained==False]: for coord in arange(3): for delta in deltas: filename=name+'.i_atom_'+str(atom)+'.i_coord_'+str(coord)+'.displ_'+\ str(delta)+'.out' folder=name+'.i_atom_'+str(atom)+'.i_coord_'+str(coord)+'.displ_'+\ str(delta) if mode=='0': # Put new geometry and control.in into folder struc_new=copy.deepcopy(struc) struc_new.atoms[atom].coord[coord]=\ struc_new.atoms[atom].coord[coord]+delta geoname='geometry.i_atom_'+str(atom)+'.i_coord_'+str(coord)+\ '.displ_'+str(delta)+'.in' if not os.path.exists(folder): os.mkdir(folder) new_geo=open(folder+'/geometry.in','w') newline='#\n# temporary structure-file for finite-difference '+\ 'calculation of forces\n' newline=newline+'# displacement {0:8.4f} of \# atom '.format(delta)+\ '{0:5} direction {1:5}\n#\n'.format(atom,coord) new_geo.writelines(newline+struc_new.to_str()) new_geo.close() ######################### # Editing starts # Copying restart files for occupation calculation shutil.copy("path_to_restart_files"+folder+"/restart", folder) # Editing ends ######################### new_control=open(folder+'/control.in','w') template_control=template_control.replace('relax_geometry', '#relax_geometry') new_control.write(template_control+'compute_forces .true. \n'+\ 'final_forces_cleaned '+\ '.true. \noutput dipole \n') new_control.close() os.chdir(folder) # Change directoy print 'Processing atom: '+str(atom+1)+'/'+str(n_atoms)+', coord.: '+\ str(coord+1)+'/'+str(3)+', delta: '+str(delta) if run_aims: os.system(AIMS_CALL+' > '+filename)# Run aims and pipe the output # into a file named 'filename' if submit_script: replace_submission(template_job, name, counter, filename) # os.system('qsub job.sh') # Mind the environment variables os.chdir('..') if mode=='1': # Read output forces_reached=False atom_count=0 data=open(folder+'/'+filename) for line in data.readlines(): if line.rfind('Dipole correction potential jump')!=-1: dip_jump = float(split_line(line)[-2]) # Periodic if line.rfind('| Total dipole moment [eAng]')!=-1: dip_jump = float64(split_line(line)[-3:]) # Cluster if forces_reached and atom_count<n_atoms: # Read Forces struc.atoms[atom_count].force=float64(split_line(line)[2:]) atom_count=atom_count+1 if atom_count==n_atoms: forces_reached=False if line.rfind('Total atomic forces')!=-1: forces_reached=True data.close() if struc.periodic: dip[index,2]=dip[index,2]+dip_jump*coeff[deltas==delta]*c_zero else: dip[index,:]=dip[index,:]+dip_jump*coeff[deltas==delta]*c_zero forces=array([]) for at_unconstrained in struc.atoms[struc.constrained==False]: forces=append(forces,coeff[deltas==delta]*at_unconstrained.force) hessian[index,:]=hessian[index,:]+forces*c_zero counter=counter+1 index=index+1 if mode=='1': # Calculate vibrations print 'Entering hessian diagonalization' print 'Number of atoms = '+str(n_atoms) print 'Name of Hessian input file = '+hessianname print 'Name of grad dipole input file = '+graddipolename print 'Name of Masses input file = '+atomicmasses print 'Name of XYZ output file = '+xyzfile print 'Threshold for Matrix elements = '+str(hessian_thresh) if (hessian_thresh < 0.0): print ' All matrix elements are taken'+\ ' into account by default\n' savetxt(hessianname,hessian) savetxt(graddipolename,dip) mass_mat=mass_vector[:,newaxis]*mass_vector[newaxis,:] hessian[abs(hessian)<hessian_thresh]=0.0 hessian=hessian*mass_mat*hessian_factor hessian=(hessian+transpose(hessian))/2. # Diagonalize hessian (scipy) print 'Solving eigenvalue system for Hessian Matrix' freq, eig_vec = eig(hessian) print 'Done ... ' eig_vec=eig_vec[:,argsort(freq)] freq=sort(sign(freq)*sqrt(abs(freq))) ZPE=hbar*(freq)/(2.0*eV) freq = (freq)/(200.*pi*c) grad_dipole = dip * grad_dipole_factor eig_vec = eig_vec*mass_vector[:,newaxis]*ones(len(mass_vector))[newaxis,:] infrared_intensity = sum(dot(transpose(grad_dipole),eig_vec)**2,axis=0)*\ ir_factor reduced_mass=sum(eig_vec**2,axis=0) norm = sqrt(reduced_mass) eig_vec = eig_vec/norm # The rest is output, xyz, IR,... print 'Results\n' print 'List of all frequencies found:' print 'Mode number Frequency [cm^(-1)] Zero point energy [eV] '+\ 'IR-intensity [D^2/Ang^2]' for i in range(len(freq)): print '{0:11}{1:25.8f}{2:25.8f}{3:25.8f}'.format(i+1,freq[i],ZPE[i], infrared_intensity[i]) print '\n' print 'Summary of zero point energy for entire system:' print '| Cumulative ZPE = {0:15.8f} eV'.format(sum(ZPE)) print '| without first six eigenmodes = {0:15.8f} eV\n'.format(sum(ZPE)- sum(ZPE[:6])) print 'Stability checking - eigenvalues should all be positive for a '+\ 'stable structure. ' print 'The six smallest frequencies should be (almost) zero:' string='' for zz in ZPE[:6]: string=string+'{0:25.8f}'.format(zz) print string print 'Compare this with the largest eigenvalue, ' print '{0:25.8f}'.format(freq[-1]) nums=arange(n_atoms)[struc.constrained==False] nums2=arange(n_atoms)[struc.constrained] newline='' newline_ir='[INT]\n' if options.molden: newline_molden='[Molden Format]\n[GEOMETRIES] XYZ\n' newline_molden=newline_molden+'{0:6}\n'.format(n_atoms)+'\n' for i_atoms in range(n_constrained): newline_molden=newline_molden+'{0:6}'.format( struc.atoms[nums[i_atoms]].kind) for i_coord in range(3): newline_molden=newline_molden+'{0:10.4f}'.format( struc.atoms[nums[i_atoms]].coord[i_coord]) newline_molden=newline_molden+'\n' newline_molden=newline_molden+'[FREQ]\n' for i in range(len(freq)): newline_molden=newline_molden+'{0:10.3f}\n'.format(freq[i]) newline_molden=newline_molden+'[INT]\n' for i in range(len(freq)): newline_molden=newline_molden+'{0:17.6e}\n'.format( infrared_intensity[i]) newline_molden=newline_molden+'[FR-COORD]\n' newline_molden=newline_molden+'{0:6}\n'.format(n_atoms)+'\n' for i_atoms in range(n_constrained): newline_molden=newline_molden+'{0:6}'.format( struc.atoms[nums[i_atoms]].kind) for i_coord in range(3): newline_molden=newline_molden+'{0:10.4f}'.format( struc.atoms[nums[i_atoms]].coord[i_coord]/bohr) newline_molden=newline_molden+'\n' newline_molden=newline_molden+'[FR-NORM-COORD]\n' for i in range(len(freq)): newline=newline+'{0:6}\n'.format(n_atoms) if freq[i]>0: newline=newline+'stable frequency at ' elif freq[i]<0: newline=newline+'unstable frequency at ' if options.distort and freq[i]<-50: struc_new=copy.deepcopy(struc) for i_atoms in range(n_constrained): for i_coord in range(3): struc_new.atoms[i_atoms].coord[i_coord]=\ struc_new.atoms[i_atoms].coord[i_coord]+\ eig_vec[(i_atoms)*3+i_coord,i] geoname=name+'.distorted.vibration_'+str(i+1)+'.geometry.in' new_geo=open(geoname,'w') newline_geo='#\n# distorted structure-file for based on eigenmodes\n' newline_geo=newline_geo+\ '# vibration {0:5} :{1:10.3f} 1/cm\n#\n'.format(i+1,freq[i]) new_geo.writelines(newline_geo+struc_new.to_str()) new_geo.close() elif freq[i]==0: newline=newline+'translation or rotation ' newline=newline+'{0:10.3f} 1/cm IR int. is '.format(freq[i]) newline=newline+'{0:10.4e} D^2/Ang^2; red. mass is '.format( infrared_intensity[i]) newline=newline+'{0:5.3f} a.m.u.; force const. is '.format( 1.0/reduced_mass[i]) newline=newline+'{0:5.3f} mDyne/Ang.\n'.format(((freq[i]*(200*pi*c))**2)* (1.0/reduced_mass[i])*at_u*1.e-2) if options.molden: newline_molden=newline_molden+\ 'vibration {0:6}\n'.format(i+1) for i_atoms in range(n_constrained): newline=newline+'{0:6}'.format(struc.atoms[nums[i_atoms]].kind) for i_coord in range(3): newline=newline+'{0:10.4f}'.format( struc.atoms[nums[i_atoms]].coord[i_coord]) for i_coord in range(3): newline=newline+'{0:10.4f}'.format(eig_vec[(i_atoms)*3+i_coord,i]) if options.molden: newline_molden=newline_molden+'{0:10.4f}'.format( eig_vec[(i_atoms)*3+i_coord,i]/bohr) newline=newline+'\n' if options.molden: newline_molden=newline_molden+'\n' for i_atoms in range(n_atoms-n_constrained): newline=newline+'{0:6}'.format(struc.atoms[nums2[i_atoms]].kind) for i_coord in range(3): newline=newline+'{0:10.4f}'.format( struc.atoms[nums2[i_atoms]].coord[i_coord]) for i_coord in range(3): newline=newline+'{0:10.4f}'.format(0.0) newline=newline+'\n' newline_ir=newline_ir+'{0:10.4e}\n'.format(infrared_intensity[i]) xyz=open(xyzfile,'w') xyz.writelines(newline) xyz.close() ir=open(irname,'w') ir.writelines(newline_ir) ir.close() if options.molden: molden=open(moldenname,'w') molden.writelines(newline_molden) molden.close() if mode=='1' and options.plot: x=linspace(freq.min()-500,freq.max()+500,1000) z=zeros(len(x)) for i in range(len(freq)): z[argmin(abs(x-freq[i]))]=infrared_intensity[i] window_len=150 gauss=signal.gaussian(window_len,10) s=r_[z[window_len-1:0:-1],z,z[-1:-window_len:-1]] z_convolve=convolve(gauss/gauss.sum(),s,mode='same')[ window_len-1:-window_len+1] fig=figure(0) ax=fig.add_subplot(111) ax.plot(x,z_convolve,'r',lw=2) ax.set_xlim([freq.min()-500,freq.max()+500]) ax.set_ylim([-0.01,ax.get_ylim()[1]]) ax.set_yticks([]) ax.set_xlabel('Frequency [1/cm]',size=20) ax.set_ylabel('Intensity [a.u.]',size=20) fig.savefig(name+'_IR_spectrum.pdf') print '\n Done. '
import pylab data = pylab.loadtxt('thermo_volts.txt') T = data[:, 0] V = 1.0e-3 * data[:, 1] fit = pylab.polyfit(V, T, 1) V_fit = pylab.linspace(V.min(), V.max(), 500) T_fit = pylab.polyval(fit, V_fit) print 'fit poly: T*%f + %f' % (fit[0], fit[1]) pylab.plot(V, T, 'bo') pylab.plot(V_fit, T_fit, 'r') pylab.xlabel('(V)') pylab.ylabel('(T)') pylab.title('thermocouple calibration -- data sheet') pylab.show()