def plot_tracks(src, fakewcs, spa=None, **kwargs): # NOTE -- MAGIC 61 = monthly; this is ASSUMEd below. tt = np.linspace(2010., 2015., 61) t0 = TAITime(None, mjd=TAITime.mjd2k + 365.25*10) #rd0 = src.getPositionAtTime(t0) #print 'rd0:', rd0 xx,yy = [],[] rr,dd = [],[] for t in tt: #print 'Time', t rd = src.getPositionAtTime(t0 + (t - 2010.)*365.25*24.*3600.) ra,dec = rd.ra, rd.dec rr.append(ra) dd.append(dec) ok,x,y = fakewcs.radec2pixelxy(ra,dec) xx.append(x - 1.) yy.append(y - 1.) if spa is None: spa = [None,None,None] for rows,cols,sub in spa: if sub is not None: plt.subplot(rows,cols,sub) ax = plt.axis() plt.plot(xx, yy, 'k-', **kwargs) plt.axis(ax) return rr,dd,tt
def plot_roc(self, roc=None): """Plot ROC curves .. plot:: :include-source: :width: 80% from dreamtools import rocs r = rocs.ROC() r.scores = [.9,.5,.6,.7,.1,.2,.6,.4,.7,.9, .2] r.classes = [1,0,1,0,0,1,1,0,0,1,1] r.plot_roc() """ if roc == None: roc = self.get_roc() from pylab import plot, xlim, ylim ,grid, title, xlabel, ylabel x = roc['fpr'] plot(x, roc['tpr'], '-o') plot([0,1], [0,1],'r') ylim([0, 1]) xlim([0, 1]) grid(True) title("ROC curve (AUC=%s)" % self.compute_auc(roc)) xlabel("FPR") ylabel("TPR")
def test1(): import numpy as np import pylab from scipy import sparse from regreg.algorithms import FISTA from regreg.atoms import l1norm from regreg.container import container from regreg.smooth import quadratic Y = np.random.standard_normal(500); Y[100:150] += 7; Y[250:300] += 14 sparsity = l1norm(500, lagrange=1.0) #Create D D = (np.identity(500) + np.diag([-1]*499,k=1))[:-1] D = sparse.csr_matrix(D) fused = l1norm.linear(D, lagrange=19.5) loss = quadratic.shift(-Y, lagrange=0.5) p = container(loss, sparsity, fused) soln1 = blockwise([sparsity, fused], Y) solver = FISTA(p) solver.fit(max_its=800,tol=1e-10) soln2 = solver.composite.coefs #plot solution pylab.figure(num=1) pylab.clf() pylab.scatter(np.arange(Y.shape[0]), Y, c='r') pylab.plot(soln1, c='y', linewidth=6) pylab.plot(soln2, c='b', linewidth=2)
def simulationWithoutDrugNick(numViruses, maxPop, maxBirthProb, clearProb, numTrials): """ Run the simulation and plot the graph for problem 3 (no drugs are used, viruses do not have any drug resistance). For each of numTrials trial, instantiates a patient, runs a simulation for 300 timesteps, and plots the average virus population size as a function of time. numViruses: number of SimpleVirus to create for patient (an integer) maxPop: maximum virus population for patient (an integer) maxBirthProb: Maximum reproduction probability (a float between 0-1) clearProb: Maximum clearance probability (a float between 0-1) numTrials: number of simulation runs to execute (an integer) """ #Instantiate the viruses first, the patient second viruses= [ SimpleVirus(maxBirthProb, clearProb) for i in range(numViruses) ] patient = Patient(viruses, maxPop) #Execute the patient.update method 300 times for 100 trials steps = 300 countList = [0 for i in range(300)] for trial in range(numTrials): for timeStep in range(steps): countList[timeStep] += patient.update() avgList = [ countList[i]/float(numTrials) for i in range(steps) ] #Plot a diagram with xAxis=timeSteps, yAxis=average virus population xAxis = [ x for x in range(steps) ] pylab.figure(2) pylab.plot(xAxis, avgList, 'ro', label='Simple Virus') pylab.xlabel('Number of elapsed time steps') pylab.ylabel('Average size of the virus population') pylab.title('Virus growth in a patient without the aid of any drag') pylab.legend() pylab.show()
def testTelescope(self): import matplotlib matplotlib.use('AGG') import matplotlib.mlab as ml import pylab as pl import time w0 = 8.0 k = 2*np.pi/3.0 gb = GaussianBeam(w0, k) lens = ThinLens(150, 150) gb2 = lens*gb self.assertAlmostEqual(gb2._z0, gb._z0 + 2*150.0) lens2 = ThinLens(300, 600) gb3 = lens2*gb2 self.assertAlmostEqual(gb3._z0, gb2._z0 + 2*300.0) self.assertAlmostEqual(gb._w0, gb3._w0/2.0) z = np.arange(0, 150) z2 = np.arange(150, 600) z3 = np.arange(600, 900) pl.plot(z, gb.w(z, k), z2, gb2.w(z2, k), z3, gb3.w(z3, k)) pl.grid() pl.xlabel('z') pl.ylabel('w') pl.savefig('testTelescope1.png') time.sleep(0.1) pl.close('all')
def plot_xc(t_years): """Plot the location of the calving front.""" x = x_c(t_years * secpera) / 1000.0 # convert to km _, _, y_min, y_max = axis() hold(True) plot([x, x], [y_min, y_max], '--g')
def plotLists(xList, xLabel=None, eListTitle=None, eList=None, eLabel=None, fListTitle=None, fList=None, fLabel=None): if h2o.python_username!='kevin': return import pylab as plt print "xList", xList print "eList", eList print "fList", fList font = {'family' : 'normal', 'weight' : 'normal', 'size' : 26} ### plt.rc('font', **font) plt.rcdefaults() if eList: if eListTitle: plt.title(eListTitle) plt.figure() plt.plot (xList, eList) plt.xlabel(xLabel) plt.ylabel(eLabel) plt.draw() if fList: if fListTitle: plt.title(fListTitle) plt.figure() plt.plot (xList, fList) plt.xlabel(xLabel) plt.ylabel(fLabel) plt.draw() if eList or fList: plt.show()
def check_vpd_ks2_astrometry(): """ Check the VPD and quiver plots for our KS2-extracted, re-transformed astrometry. """ catFile = workDir + '20.KS2_PMA/wd1_catalog.fits' tab = atpy.Table(catFile) good = (tab.xe_160 < 0.05) & (tab.ye_160 < 0.05) & \ (tab.xe_814 < 0.05) & (tab.ye_814 < 0.05) & \ (tab.me_814 < 0.05) & (tab.me_160 < 0.05) tab2 = tab.where(good) dx = (tab2.x_160 - tab2.x_814) * ast.scale['WFC'] * 1e3 dy = (tab2.y_160 - tab2.y_814) * ast.scale['WFC'] * 1e3 py.clf() q = py.quiver(tab2.x_814, tab2.y_814, dx, dy, scale=5e2) py.quiverkey(q, 0.95, 0.85, 5, '5 mas', color='red', labelcolor='red') py.savefig(workDir + '20.KS2_PMA/vec_diffs_ks2_all.png') py.clf() py.plot(dy, dx, 'k.', ms=2) lim = 30 py.axis([-lim, lim, -lim, lim]) py.xlabel('Y Proper Motion (mas)') py.ylabel('X Proper Motion (mas)') py.savefig(workDir + '20.KS2_PMA/vpd_ks2_all.png') idx = np.where((np.abs(dx) < 10) & (np.abs(dy) < 10))[0] print('Cluster Members (within dx < 10 mas and dy < 10 mas)') print((' dx = {dx:6.2f} +/- {dxe:6.2f} mas'.format(dx=dx[idx].mean(), dxe=dx[idx].std()))) print((' dy = {dy:6.2f} +/- {dye:6.2f} mas'.format(dy=dy[idx].mean(), dye=dy[idx].std())))
def plot_stress(self, block_ids=None, fignum=0): block_ids = self.check_block_ids_list(block_ids) # plt.figure(fignum) ax1=plt.gca() plt.clf() plt.figure(fignum) plt.clf() ax0=plt.gca() # for block_id in block_ids: rws = numpy.core.records.fromarrays(zip(*filter(lambda x: x['block_id']==block_id, self.shear_stress_sequences)), dtype=self.shear_stress_sequences.dtype) stress_seq = [] for rw in rws: stress_seq += [[rw['sweep_number'], rw['shear_init']]] stress_seq += [[rw['sweep_number'], rw['shear_final']]] X,Y = zip(*stress_seq) # ax0.plot(X,Y, '.-', label='block_id: %d' % block_id) # plt.figure(fignum+1) plt.plot(rws['sweep_number'], rws['shear_init'], '.-', label='block_id: %d' % block_id) plt.plot(rws['sweep_number'], rws['shear_final'], '.-', label='block_id: %d' % block_id) plt.figure(fignum) ax0.plot([min(self.shear_stress_sequences['sweep_number']), max(self.shear_stress_sequences['sweep_number'])], [0., 0.], 'k-') ax0.legend(loc=0, numpoints=1) plt.figure(fignum) plt.title('Block shear_stress sequences') plt.xlabel('sweep number') plt.ylabel('shear stress')
def plotKerasExperimentcifar10(): index = 5 for experiment_number in range(1,index+1): outputPath_part_final = os.path.realpath( "/home/jie/docker_folder/random_keras/output_cifar10_mlp/errorFile/hyperopt_experiment_withoutparam_accuracy" + str(experiment_number) + ".txt") output_plot = os.path.realpath( "/home/jie/docker_folder/random_keras/output_cifar10_mlp/errorFile/plotErrorCurve" + str(experiment_number) + ".pdf") df = pd.read_csv(outputPath_part_final,delimiter='\t',header=None) df.drop(df.columns[[600]], axis=1, inplace=True) i=1 epochnum = [] while i<=250: epochnum.append(i) i = i+1 i=0 while i<10: df_1=df[df.columns[0:250]].ix[i] np.reshape(df_1, (1,250)) plt.plot(epochnum,df_1) i = i+1 # plt.show() # plt.show() plt.savefig(output_plot) plt.close()
def plot_data(x,y,Amp,freq): """ Plot the actual data point x,y along with the fit Amp*sin(freq*x) """ plb.plot(x,y,'b',linestyle=':') y_fit = Amp*np.sin(freq*x) plb.plot(x,y_fit,'r')
def createPlot(dataY, dataX, ticksX, annotations, axisY, axisX, dostep, doannotate): if not ticksX: ticksX = dataX if dostep: py.step(dataX, dataY, where='post', linestyle='-', label=axisY) # where=post steps after point else: py.plot(dataX, dataY, marker='o', ms=5.0, linestyle='-', label=axisY) if annotations and doannotate: for note, x, y in zip(annotations, dataX, dataY): py.annotate(note, (x, y), xytext=(2,2), xycoords='data', textcoords='offset points') py.xticks(np.arange(1, len(dataX)+1), ticksX, horizontalalignment='left', rotation=30) leg = py.legend() leg.draggable() py.xlabel(axisX) py.ylabel('time (s)') # Set X axis tick labels as rungs #print zip(dataX, dataY) py.draw() py.show() return
def drawPrfastscore(tp,fp,scr,tot,show=True): tp=numpy.cumsum(tp) fp=numpy.cumsum(fp) rec=tp/tot prec=tp/(fp+tp) #dif=numpy.abs(prec[1:]-rec[1:]) dif=numpy.abs(prec[::-1]-rec[::-1]) pos=dif.argmin() pos=len(dif)-pos-1 ap=0 for t in numpy.linspace(0,1,11): pr=prec[rec>=t] if pr.size==0: pr=0 p=numpy.max(pr); ap=ap+p/11; if show: pylab.plot(rec,prec,'-g') pylab.title("AP=%.3f EPRthr=%.3f"%(ap,scr[pos])) pylab.xlabel("Recall") pylab.ylabel("Precision") pylab.grid() pylab.show() pylab.draw() return rec,prec,scr,ap,scr[pos]
def drawPr(tp,fp,tot,show=True): """ draw the precision recall curve """ det=numpy.array(sorted(tp+fp)) atp=numpy.array(tp) afp=numpy.array(fp) #pylab.figure() #pylab.clf() rc=numpy.zeros(len(det)) pr=numpy.zeros(len(det)) #prc=0 #ppr=1 for i,p in enumerate(det): pr[i]=float(numpy.sum(atp>=p))/numpy.sum(det>=p) rc[i]=float(numpy.sum(atp>=p))/tot #print pr,rc,p ap=0 for c in numpy.linspace(0,1,num=11): if len(pr[rc>=c])>0: p=numpy.max(pr[rc>=c]) else: p=0 ap=ap+p/11 if show: pylab.plot(rc,pr,'-g') pylab.title("AP=%.3f"%(ap)) pylab.xlabel("Recall") pylab.ylabel("Precision") pylab.grid() pylab.show() pylab.draw() return rc,pr,ap
def simulation1(numTrials, numSteps, loc): results = {'UsualDrunk': [], 'ColdDrunk': [], 'EDrunk': [], 'PhotoDrunk': [], 'DDrunk': []} drunken_types = {'UsualDrunk': UsualDrunk, 'ColdDrunk': ColdDrunk, 'EDrunk': EDrunk, 'PhotoDrunk': PhotoDrunk, 'DDrunk': DDrunk} for drunken in drunken_types.keys(): #Create field initial_loc = Location(loc[0], loc[1]) field = Field() print "Simu", drunken drunk = Drunk(drunken) drunk_man = drunken_types[drunken](drunk) field.addDrunk(drunk_man, initial_loc) #print drunk_man for trial in range(numTrials): distance = walkVector(field, drunk_man, numSteps) results[drunken].append((round(distance[0], 1), round(distance[1], 1))) print drunken, "=", results[drunken] for result in results.keys(): # x, y = zip(*results[result]) # print "x", x # print "y", y pylab.plot(*zip(*results[result]), marker='o', color='r', ls='') pylab.title(result) pylab.xlabel('X coordinateds') pylab.ylabel('Y coordinateds') pylab.xlim(-100, 100) pylab.ylim(-100, 100) pylab.figure() pylab.show
def yfromx(self, newtimeaxis, doplot=False, debug=False): if debug: print('fastresampler: yfromx called with following parameters') print(' padvalue:, ', self.padvalue) print(' initstep, hiresstep:', self.initstep, self.hiresstep) print(' initial axis limits:', self.initstart, self.initend) print(' hires axis limits:', self.hiresstart, self.hiresend) print(' requested axis limits:', newtimeaxis[0], newtimeaxis[-1]) outindices = ((newtimeaxis - self.hiresstart) // self.hiresstep).astype(int) if debug: print('len(self.hires_y):', len(self.hires_y)) try: out_y = self.hires_y[outindices] except IndexError: print('') print('indexing out of bounds in fastresampler') print(' padvalue:, ', self.padvalue) print(' initstep, hiresstep:', self.initstep, self.hiresstep) print(' initial axis limits:', self.initstart, self.initend) print(' hires axis limits:', self.hiresstart, self.hiresend) print(' requested axis limits:', newtimeaxis[0], newtimeaxis[-1]) sys.exit() if doplot: fig = pl.figure() ax = fig.add_subplot(111) ax.set_title('fastresampler timecourses') pl.plot(self.hires_x, self.hires_y, newtimeaxis, out_y) pl.legend(('hires', 'output')) pl.show() return out_y
def qinit(x,y): """ Gaussian hump: """ from numpy import where from pylab import plot,show nxpoints = 2001 nypoints = 2001 #import sympy #def sech(x): #return sympy.cosh(x)**(-1) zmin = 1000.0 zmin1 = 1000 g = 9.81 A = 10.0 # wave height k = sqrt(3*A/(4*zmin**3)) z = zeros(shape=shape(y)) u = zeros(shape=shape(y)) hu = zeros(shape=shape(y)) y0 = 180000 c = sqrt(g*(A+zmin)) rho = 1.0e3 for i in range(nxpoints): for j in range(nypoints): z[i,j] = A*cosh(k*(y[i,j]-y0))**(-2) u[i,j] = -sqrt(g/zmin)*z[i,j] hu[i,j] =(z[i,j]+zmin)*u[i,j] plot(y,u,'-') show() #ze = -((y+0e0)**2)/10. #z = where(ze>-400000., 400.e0*exp(ze), 0.) return hu
def plotEventTime(library, num, eventNames, sizes, times, events, filename = None): from pylab import close, legend, plot, savefig, show, title, xlabel, ylabel import numpy as np close() arches = sizes.keys() bs = events[arches[0]].keys()[0] data = [] names = [] for event, color in zip(eventNames, ['b', 'g', 'r', 'y']): for arch, style in zip(arches, ['-', ':']): if event in events[arch][bs]: names.append(arch+'-'+str(bs)+' '+event) data.append(sizes[arch][bs]) data.append(np.array(events[arch][bs][event])[:,0]) data.append(color+style) else: print 'Could not find %s in %s-%d events' % (event, arch, bs) print data plot(*data) title('Performance on '+library+' Example '+str(num)) xlabel('Number of Dof') ylabel('Time (s)') legend(names, 'upper left', shadow = True) if filename is None: show() else: savefig(filename) return
def plot_sphere_x( s, fname ): """ put plot of ionization fractions from sphere `s` into fname """ plt.figure() s.Edges.units = 'kpc' s.r_c.units = 'kpc' xx = s.r_c L = s.Edges[-1] plt.plot( xx, np.log10( s.xHe1 ), color='green', ls='-', label = r'$x_{\rm HeI}$' ) plt.plot( xx, np.log10( s.xHe2 ), color='green', ls='--', label = r'$x_{\rm HeII}$' ) plt.plot( xx, np.log10( s.xHe3 ), color='green', ls=':', label = r'$x_{\rm HeIII}$' ) plt.plot( xx, np.log10( s.xH1 ), color='red', ls='-', label = r'$x_{\rm HI}$' ) plt.plot( xx, np.log10( s.xH2 ), color='red', ls='--', label = r'$x_{\rm HII}$' ) plt.xlim( -L/20, L+L/20 ) plt.xlabel( 'r_c [kpc]' ) plt.ylim( -4.5, 0.2 ) plt.ylabel( 'log 10 ( x )' ) plt.grid() plt.legend(loc='best', ncol=2) plt.tight_layout() plt.savefig( 'doc/img/x_' + fname )
def run( self , props , globdat ): a = [] for i,col in enumerate(self.columndata): if col.type in globdat.outputNames: data = globdat.getData( col.type , col.node ) elif hasattr(globdat,col.type): b = getattr( globdat , col.type ) if type(b) is ndarray: data = b[globdat.dofs.getForType(col.node,col.dof)] else: data = b else: data = globdat.getData( col.type , col.node ) data = data * col.factor a.append(data) self.outfile.write(str(data)+' ',) self.outfile.write('\n') if self.onScreen: self.output.append( a ) plot( [x[0] for x in self.output], [x[1] for x in self.output], 'ro-' ) draw() if not globdat.active: self.outfile.close
def cmap_plot(cmdLine): pylab.figure(figsize=[5,10]) a=outer(ones(10),arange(0,1,0.01)) subplots_adjust(top=0.99,bottom=0.00,left=0.01,right=0.8) maps=[m for m in cm.datad if not m.endswith("_r")] maps.sort() l=len(maps)+1 for i, m in enumerate(maps): print m subplot(l,1,i+1) pylab.setp(pylab.gca(),xticklabels=[],xticks=[],yticklabels=[],yticks=[]) imshow(a,aspect='auto',cmap=get_cmap(m),origin="lower") pylab.text(100.85,0.5,m,fontsize=10) # render plot if cmdLine: pylab.show(block=True) else: pylab.ion() pylab.plot([]) pylab.ioff() status = 1 return status
def create_figure(): psd = test_correlog() f = linspace(-0.5, 0.5, len(psd)) psd = cshift(psd, len(psd)/2) plot(f, 10*log10(psd/max(psd))) savefig('psd_corr.png')
def plot(self): """Plot the scores""" from pylab import plot plot(self.xdata, self.ydata) xlabel("Number of computeScore calls") ylabel("Score") ylim([0, ylim()[1]])
def plot_cost(self): if self.show_cost not in self.train_outputs[0][0]: raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost) train_errors = [o[0][self.show_cost][self.cost_idx] for o in self.train_outputs] test_errors = [o[0][self.show_cost][self.cost_idx] for o in self.test_outputs] numbatches = len(self.train_batch_range) test_errors = numpy.row_stack(test_errors) test_errors = numpy.tile(test_errors, (1, self.testing_freq)) test_errors = list(test_errors.flatten()) test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors)) test_errors = test_errors[:len(train_errors)] numepochs = len(train_errors) / float(numbatches) pl.figure(1) x = range(0, len(train_errors)) pl.plot(x, train_errors, 'k-', label='Training set') pl.plot(x, test_errors, 'r-', label='Test set') pl.legend() ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches) epoch_label_gran = int(ceil(numepochs / 20.)) # aim for about 20 labels epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) # but round to nearest 10 ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs)) pl.xticks(ticklocs, ticklabels) pl.xlabel('Epoch') # pl.ylabel(self.show_cost) pl.title(self.show_cost)
def plot_matches(self, name, show_below = True, match_maximum = None): """ 対応点を線で結んで画像を表示する 入力: im1,im2(配列形式の画像)、locs1,locs2(特徴点座標) machescores(match()の出力)、 show_below(対応の下に画像を表示するならTrue)""" im1 = self._image_1.get_array_image() im2 = self._image_2.get_array_image() self.appendimages() im3 = self._append_image if self._match_score is None: self.match() locs1 = self._image_1.get_shift_location() locs2 = self._image_2.get_shift_location() if show_below: im3 = numpy.vstack((im3,im3)) pylab.figure(dpi=160) pylab.gray() pylab.imshow(im3, aspect = 'auto') cols1 = im1.shape[1] match_num = 0 for i,m in enumerate(self._match_score): if m > 0 : pylab.plot([locs1[i][0],locs2[m][0]+cols1], [locs1[i][1],locs2[m][1]], 'c') match_num = match_num + 1 if match_maximum is not None and match_num >= match_maximum: break pylab.axis('off') pylab.savefig(name, dpi=160)
def plotForce(): figure(size=3,aspect=0.5) subplot(1,2,1) from EvalTraj import plotFF plotFF(vp=351,t=28,f=900,cm=0.6,foffset=8) subplot_annotate() subplot(1,2,2) for i in [1,2,3,4]: R=np.squeeze(np.load('Rdpse%d.npy'%i)) R=stats.nanmedian(R,axis=2)[:,1:,:] dps=np.linspace(-1,1,201)[1:] plt.plot(dps,R[:,:,2].mean(0)); plt.legend([0,0.1,0.2,0.3],loc=3) i=2 R=np.squeeze(np.load('Rdpse%d.npy'%i)) R=stats.nanmedian(R,axis=2)[:,1:,:] mn=np.argmin(R,axis=1) y=np.random.randn(mn.shape[0])*0.00002+0.0438 plt.plot(np.sort(dps[mn[:,2]]),y,'+',mew=1,ms=6,mec=[ 0.39 , 0.76, 0.64]) plt.xlabel('Displacement of Force Origin') plt.ylabel('Average Net Force Magnitude') hh=dps[mn[:,2]] err=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.975,hh.shape[0]) err2=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.75,hh.shape[0]) m=np.mean(hh) print m, m-err,m+err np.save('force',[m, m-err,m+err,m-err2,m+err2]) plt.xlim([-0.5,0.5]) plt.ylim([0.0435,0.046]) plt.grid(b=True,axis='x') subplot_annotate()
def plot_heatingrate(data_dict, filename, do_show=True): pl.figure(201) color_list = ['b','r','g','k','y','r','g','b','k','y','r',] fmtlist = ['s','d','o','s','d','o','s','d','o','s','d','o'] result_dict = {} for key in data_dict.keys(): x = data_dict[key][0] y = data_dict[key][1][:,0] y_err = data_dict[key][1][:,1] p0 = np.polyfit(x,y,1) fit = LinFit(np.array([x,y,y_err]).transpose(), show_graph=False) p1 = [0,0] p1[0] = fit.param_dict[0]['Slope'][0] p1[1] = fit.param_dict[0]['Offset'][0] print fit x0 = np.linspace(0,max(x)) cstr = color_list.pop(0) fstr = fmtlist.pop(0) lstr = key + " heating: {0:.2f} ph/ms".format((p1[0]*1e3)) pl.errorbar(x/1e3,y,y_err,fmt=fstr + cstr,label=lstr) pl.plot(x0/1e3,np.polyval(p0,x0),cstr) pl.plot(x0/1e3,np.polyval(p1,x0),cstr) result_dict[key] = 1e3*np.array(fit.param_dict[0]['Slope']) pl.xlabel('Heating time (ms)') pl.ylabel('nbar') if do_show: pl.legend() pl.show() if filename != None: pl.savefig(filename) return result_dict
def plotB2reg(prefix=''): w=loadStanFit(prefix+'revE2B2LHregCa.fit') px=np.array(np.linspace(-0.5,0.5,101),ndmin=2) a1=np.array(w['ma'][:,4],ndmin=2).T+1 a0=np.array(w['ma'][:,3],ndmin=2).T printCI(w,'ma') y=np.concatenate([sap(a0+a1*px,97.5,axis=0),sap(a0+a1*px[:,::-1],2.5,axis=0)]) x=np.squeeze(np.concatenate([px,px[:,::-1]],axis=1)) man=np.array([-0.4,-0.2,0,0.2,0.4]) plt.plot(px[0,:],np.median(a0)+np.median(a1)*px[0,:],'red') #plt.plot([-1,1],[0.5,0.5],'grey') ax=plt.gca() ax.set_aspect(1) ax.add_patch(plt.Polygon(np.array([x,y]).T,alpha=0.2,fill=True,fc='red',ec='w')) y=np.concatenate([sap(a0+a1*px,75,axis=0),sap(a0+a1*px[:,::-1],25,axis=0)]) ax.add_patch(plt.Polygon(np.array([x,y]).T,alpha=0.2,fill=True,fc='red',ec='w')) mus=[] for m in range(len(man)): mus.append(loadStanFit(prefix+'revE2B2LHC%d.fit'%m)['ma4']+man[m]) mus=np.array(mus).T errorbar(mus,x=man) ax.set_xticks(man) plt.xlim([-0.5,0.5]) plt.ylim([-0.6,0.8]) plt.xlabel('Pivot Displacement') plt.ylabel('Perceived Displacemet')
def plotB3reg(): w=loadStanFit('revE2B3BHreg.fit') printCI(w,'mmu') printCI(w,'mr') for b in range(2): subplot(1,2,b+1) plt.title('') px=np.array(np.linspace(-0.5,0.5,101),ndmin=2) a0=np.array(w['mmu'][:,b],ndmin=2).T a1=np.array(w['mr'][:,b],ndmin=2).T y=np.concatenate([sap(a0+a1*px,97.5,axis=0),sap(a0+a1*px[:,::-1],2.5,axis=0)]) x=np.squeeze(np.concatenate([px,px[:,::-1]],axis=1)) plt.plot(px[0,:],np.median(a0)+np.median(a1)*px[0,:],'red') #plt.plot([-1,1],[0.5,0.5],'grey') ax=plt.gca() ax.set_aspect(1) ax.add_patch(plt.Polygon(np.array([x,y]).T,alpha=0.2,fill=True,fc='red',ec='w')) y=np.concatenate([sap(a0+a1*px,75,axis=0),sap(a0+a1*px[:,::-1],25,axis=0)]) ax.add_patch(plt.Polygon(np.array([x,y]).T,alpha=0.2,fill=True,fc='red',ec='w')) man=np.array([-0.4,-0.2,0,0.2,0.4]) mus=[] for m in range(len(man)): mus.append(loadStanFit('revE2B3BH%d.fit'%m)['mmu'][:,b]) mus=np.array(mus).T errorbar(mus,x=man) ax.set_xticks(man) plt.xlim([-0.5,0.5]) plt.ylim([-0.4,0.8]) #plt.xlabel('Manipulated Displacement') if b==0: plt.ylabel('Perceived Displacemet') plt.gca().set_yticklabels([]) subplot_annotate() plt.text(-1.1,-0.6,'Pivot Displacement',fontsize=8);
def Doplots_monthly(mypathforResults,PlottingDF,variable_to_fill, Site_ID,units,item): ANN_label=str(item+"_NN") #Do Monthly Plots print "Doing MOnthly plot" #t = arange(1, 54, 1) NN_label='Fc' Plottemp = PlottingDF[[NN_label,item]][PlottingDF['day_night']!=1] #Plottemp = PlottingDF[[NN_label,item]].dropna(how='any') figure(1) pl.title('Nightime ANN v Tower by year-month for '+item+' at '+Site_ID) try: xdata1a=Plottemp[item].groupby([lambda x: x.year,lambda x: x.month]).mean() plotxdata1a=True except: plotxdata1a=False try: xdata1b=Plottemp[NN_label].groupby([lambda x: x.year,lambda x: x.month]).mean() plotxdata1b=True except: plotxdata1b=False if plotxdata1a==True: pl.plot(xdata1a,'r',label=item) if plotxdata1b==True: pl.plot(xdata1b,'b',label=NN_label) pl.ylabel('Flux') pl.xlabel('Year - Month') pl.legend() pl.savefig(mypathforResults+'/ANN and Tower plots by year and month for variable '+item+' at '+Site_ID) #pl.show() pl.close() time.sleep(1)
----------------------------------------------''' # ->> lower data resolution <<- # #zoom_factor=0.5 zoom_factor=1 #0.5 #n_pts=500 n_pts=50 ncol=10 #_dmap_=d[:100,10]-np.mean(d[:100,10]) _dmap_=d[:n_pts,ncol]-np.mean(d[:n_pts,ncol]) dmap=sp.ndimage.interpolation.zoom(_dmap_, zoom_factor) if False: dk=np.fft.fft(dmap) pk=np.abs(np.fft.fftshift(dk))**2. pl.plot(pk) pl.show() #->> data initialization <<- # qe_dict={'calculate_dcov': True, 'fname_dcov': root+'result/r1d/dcov_r1d_fft_24.npz', 'map_zoom_factor': zoom_factor, 'get_bp_type': 'FFT', } ''' #->> Initialzing quadratic estimator class <<- # #->> parafname='same as parameter file' #->> calculating dcov '''
def process_surface_adjoint(config_filename, filter_type='LAPLACE', marker_name='airfoil', chord_length=1.0): print('') print( '-------------------------------------------------------------------------' ) print( '| SU2 Suite (Process Surface Adjoint) |' ) print( '-------------------------------------------------------------------------' ) print('') # some other defaults c_clip = 0.01 # percent chord to truncate fft_copy = 5 # number of times to copy the fft signal smth_len = 0.05 # percent chord smoothing window length lapl_len = 1e-4 # laplace smoothing parameter # read config file config_data = libSU2.Get_ConfigParams(config_filename) surface_filename = config_data['SURFACE_ADJ_FILENAME'] + '.csv' print surface_filename mesh_filename = config_data['MESH_FILENAME'] gradient = config_data['OBJECTIVE_FUNCTION'] print('Config filename = %s' % config_filename) print('Surface filename = %s' % surface_filename) print('Filter Type = %s' % filter_type) # read adjoint data adj_data = np.genfromtxt(surface_filename, dtype=float, delimiter=',', skip_header=1) # read mesh data mesh_data = libSU2_mesh.Read_Mesh(mesh_filename) # proces adjoint data P = map(int, adj_data[:, 0]) X = adj_data[:, 6].copy() Y = adj_data[:, 7].copy() Sens = adj_data[:, 1].copy() PsiRho = adj_data[:, 2].copy() I = range(0, len(P)) # important - for unsorting durring write # store in dict by point index adj_data_dict = dict(zip(P, zip(X, Y, Sens, PsiRho, I))) # sort airfoil points iP_sorted, _ = libSU2_mesh.sort_Airfoil(mesh_data, marker_name) assert (len(iP_sorted) == len(P)) # rebuild airfoil loop i = 0 for this_P in iP_sorted: # the adjoint data entry this_adj_data = adj_data_dict[this_P] # re-sort P[i] = this_P X[i] = this_adj_data[0] Y[i] = this_adj_data[1] Sens[i] = this_adj_data[2] PsiRho[i] = this_adj_data[3] I[i] = this_adj_data[4] # next i = i + 1 #: for each point # calculate arc length S = np.sqrt(np.diff(X)**2 + np.diff(Y)**2) / chord_length S = np.cumsum(np.hstack([0, S])) # tail trucating, by arc length I_clip_lo = S < S[0] + c_clip I_clip_hi = S > S[-1] - c_clip S_clip = S.copy() Sens_clip = Sens.copy() Sens_clip[I_clip_hi] = Sens_clip[I_clip_hi][0] Sens_clip[I_clip_lo] = Sens_clip[I_clip_lo][-1] # some edge length statistics dS_clip = np.diff(S_clip) min_dS = np.min(dS_clip) mean_dS = np.mean(dS_clip) max_dS = np.max(dS_clip) #print 'min_dS = %.4e ; mean_dS = %.4e ; max_dS = %.4e' % ( min_dS , mean_dS , max_dS ) # -------------------------------------------- # APPLY FILTER if filter_type == 'FOURIER': Freq_notch = [1 / max_dS, np.inf] # the notch frequencies Sens_filter, Frequency, Power = fft_filter(S_clip, Sens_clip, Freq_notch, fft_copy) #Sens_filter = smooth(S_clip,Sens_filter, 0.03,'blackman') # post smoothing elif filter_type == 'WINDOW': Sens_filter = window(S_clip, Sens_clip, smth_len, 'blackman') elif filter_type == 'LAPLACE': Sens_filter = laplace(S_clip, Sens_clip, lapl_len) elif filter_type == 'SHARPEN': Sens_smooth = smooth(S_clip, Sens_clip, smth_len / 5, 'blackman') # pre smoothing Sens_smoother = smooth(S_clip, Sens_smooth, smth_len, 'blackman') Sens_filter = Sens_smooth + (Sens_smooth - Sens_smoother) # sharpener else: raise Exception, 'unknown filter type' # -------------------------------------------- # PLOTTING if pylab_imported: # start plot fig = plt.figure(gradient) plt.clf() #if not fig.axes: # for comparing two filter calls #plt.subplot(1,1,1) #ax = fig.axes[0] #if len(ax.lines) == 4: #ax.lines.pop(0) #ax.lines.pop(0) # SENSITIVITY plt.plot(S, Sens, color='b') # original plt.plot(S_clip, Sens_filter, color='r') # filtered plt.xlim(-0.1, 2.1) plt.ylim(-5, 5) plt.xlabel('Arc Length') plt.ylabel('Surface Sensitivity') #if len(ax.lines) == 4: #seq = [2, 2, 7, 2] #ax.lines[0].set_dashes(seq) #ax.lines[1].set_dashes(seq) plot_filename = os.path.splitext(surface_filename)[0] + '.png' plt.savefig('Sens_' + plot_filename, dpi=300) # zoom in plt.ylim(-0.4, 0.4) plt.savefig('Sens_zoom_' + plot_filename, dpi=300) # SPECTRAL if filter_type == 'FOURIER': plt.figure('SPECTRAL') plt.clf() plt.plot(Frequency, Power) #plt.xlim(0,Freq_notch[0]+10) plt.xlim(0, 200) plt.ylim(0, 0.15) plt.xlabel('Frequency (1/C)') plt.ylabel('Surface Sensitivity Spectal Power') plt.savefig('Spectral_' + plot_filename, dpi=300) #: if spectral plot #: if plot # -------------------------------------------- # SAVE SURFACE FILE # reorder back to input surface points Sens_out = np.zeros(len(S)) Sens_out[I] = Sens_filter # left over from sort adj_data[:, 1] = Sens_out # get surface header surface_orig = open(surface_filename, 'r') header = surface_orig.readline() surface_orig.close() # get list of prefix names prefix_names = libSU2.get_AdjointPrefix(None) prefix_names = prefix_names.values() # add filter prefix, before adjoint prefix surface_filename_split = surface_filename.rstrip('.csv').split('_') if surface_filename_split[-1] in prefix_names: surface_filename_split = surface_filename_split[0:-1] + [ 'filtered' ] + [surface_filename_split[-1]] else: surface_filename_split = surface_filename_split + ['filtered'] surface_filename_new = '_'.join(surface_filename_split) + '.csv' # write filtered surface file (only updates Sensitivity) surface_new = open(surface_filename_new, 'w') surface_new.write(header) for row in adj_data: for i, value in enumerate(row): if i > 0: surface_new.write(', ') if i == 0: surface_new.write('%i' % value) else: surface_new.write('%.16e' % value) surface_new.write('\n') surface_new.close() print('') print( '----------------- Exit Success (Process Surface Adjoint) ----------------' ) print('') return
INPUT = (S0, I0, R0) def diff_eqs(INP, t): '''The main set of equations''' Y = np.zeros((3)) V = INP Y[0] = -a* V[0]*V[1]+e*V[1]*V[2]-d*V[0] Y[1] = a* V[0]*V[1]-b*V[1]+c*V[1]*V[2] Y[2] = b*V[1]+d*V[0]-c*V[1]*V[2] return Y # For odeint t_start = 0.0;t_end = ND;t_inc = TS t_range = np.arange(t_start, t_end + t_inc, t_inc) RES = spi.odeint(diff_eqs, INPUT, t_range) print RES # Ploting pl.subplot(111) pl.plot(RES[:, 1], '-r', label='Infectious') pl.plot(RES[:, 0], '-g', label='Susceptibles') pl.plot(RES[:, 2], '-k', label='Recovereds') pl.legend(loc=0) pl.title('dy_SIR.py') pl.xlabel('Time') pl.ylabel('Infectious Susceptibles and Recovereds') pl.xlabel('Time') pl.show()
print "Deldatashape", Deldata.shape #import IPython; IPython.embed() #print "shapes of arrays:", data1.shape, data2.shape #Bootstrap resampling B = 100 bootmean, booterr = boot_simple.bootstrap(B, Deldata) #print bootmean #plotting fig = p.figure() ax = fig.add_subplot(411) #plotp.P_v_Eta(ax,kz,P) ax.set_xlabel('kz') ax.set_ylabel(r'$P(k) K^{2} (h^{-1} Mpc)^{3}$') p.plot(kz, P, 'bo') p.plot(kz, Q, 'go') p.plot(kz, (10 * 2 * n.pi**2) / n.abs(kz)**3, 'ro') #input ax.set_yscale('log') ax = fig.add_subplot(412) #ax.errorbar(kz, n.abs(bootmean), yerr=booterr, fmt='ok', ecolor='gray', alpha=0.5) ax.errorbar(k, n.abs(bootmean), yerr=booterr, fmt='ok', ecolor='gray', alpha=0.5) #ax.set_ylim([0,0.5]) #ax.set_yscale('log') ax.set_xlabel('kz') ax.set_ylabel(r'$P(k) K^{2} (h^{-1} Mpc)^{3}$')
# define a font properties using dict font = {'family' : 'serif', 'color' : 'red', 'weight' : 'normal', 'size' : 11 } # importing data from a file data = genfromtxt('data_plot.txt') # separating data [raw : column] mass = data[:, 0] radius = data[:, 1] print(mass) # plot data plot(mass, radius, 'r', label=r'data plot') # add title and axis's label plt.title(r'Contoh Relasi Masa dan Radius Bintang', fontdict=font) plt.xlabel(r'$M {\odot}$', fontdict=font) plt.ylabel(r'$M {\odot}$', fontdict=font) plt.legend(loc='upper right') # add a legend plt.grid(True) # start to grid a plot # Tweak spacing to prevent clipping of ylabel plt.subplots_adjust(left=0.10) plt.show()
if __name__ == "__main__": import pylab as pl a = np.random.power(0.01, size=1000) N_bin = 9 bins = to_constant_bin_number(a, N_bin) weight_sums = [np.sum(b) for b in bins] #show max values of a and weight sums of the bins print(np.sort(a)[-1:-11:-1], weight_sums) #plot distribution pl.plot(np.arange(N_bin), [np.sum(b) for b in bins]) pl.ylim([0, max([np.sum(b) for b in bins]) + 0.1]) b = {'a': 10, 'b': 10, 'c': 11, 'd': 1, 'e': 2, 'f': 7} bins = to_constant_bin_number(b, 4) print("===== dict\n", b, "\n", bins) lower_bound = None upper_bound = None b = (('a', 10), ('b', 10), ('c', 11), ('d', 1), ('e', 2), ('f', 7)) bins = to_constant_bin_number(b, 4, weight_pos=1, lower_bound=lower_bound, upper_bound=upper_bound)
c = connection(pre, post, [1, 1]) sim = simulation(2, dt=0.0001) sim.monitor(post, [ 'u', ], 0.001) run_sim(sim, [pre, post], [c]) figure(figsize=(15, 5)) m = sim.monitors['u'] m.plot() for t, n in pre.saved_spikes: plot([t, t], [0, 0.1], 'g', linewidth=3) # In[7]: pre.saved_spikes # In[8]: pre = neurons.poisson_pattern([10]) post = neurons.srm0(1) post.smoothed = True post.tau = 0.1 post.a = 10 post2 = neurons.srm0(1)
print("Delta: ", deltaMax, " isConverged: ", isConverged, " isPolicyStable: ", isPolicyStable) if (isPolicyStable): print("Stable policy achieved!") pl.figure() for e in visualizationSweeps: if e < len(agent.valueTables): printMatrix_pol = np.zeros(env.nStates - 2, dtype=np.int) printMatrix_val = np.zeros(env.nStates - 2, dtype=np.float) for i in range(env.nStates - 2): action = agent.selectAction(i + 1, env.getAvailableActions()) printMatrix_val[i] = agent.valueTables[e - 1][i] printMatrix_pol[i] = action pl.plot(printMatrix_val, label="Sweep " + str(e)) pl.xlabel("Capital") pl.ylabel("Value estimates") pl.title("p=" + str(prob_heads)) pl.legend() printMatrix_pol = np.zeros(env.nStates - 2, dtype=np.int) printMatrix_val = np.zeros(env.nStates - 2, dtype=np.float) for i in range(env.nStates - 2): action = agent.selectAction(i + 1, env.getAvailableActions()) printMatrix_val[i] = agent.valueTable[i] printMatrix_pol[i] = action print("Final value estimates and policy") print(printMatrix_val) print() print(printMatrix_pol)
zbeg = int(zoomints[l][0] / dx) zend = int(zoomints[l][1] / dx) xt = x[xbeg:zbeg:igap] + x[zbeg:zend:zoomgap[l]] + x[ zend:xend:igap] ht = h[xbeg:zbeg:igap] + h[zbeg:zend:zoomgap[l]] + h[ zend:xend:igap] else: xbeg = int(cxlim[0] / dx) xend = int(cxlim[1] / dx) xt = x[xbeg:xend:igap] ht = h[xbeg:xend:igap] x = array(xt) h = array(ht) s = str(wdirords[k]) plot(x, h, label=s) ylim(cylim) xlim(cxlim) xlabel("$x$ ($m$)") ylabel("$h$ ($m$)") #legend() gap = gap * gapf[l] """ plot(x,u ,'-b') ylim([-0.1,2]) xlim([0,1000]) s = "Dam Break: " + wdirord + " dx = " + str(dx) title(s) xlabel("x (m)") ylabel("u (m/s)") """
u0 = [0, 0] print 'f(x0) = ', f(0., x0, u0) # part 1: t_, x_, x1_, x2_ = sim(f, total_t, x0, u0, dt=4e-2) for i in range(1, 6): xinit = x0 + np.array([0, 0, 0, 0, 10 * i * math.pi / 180.0]) t_, x_, x1_, x2_ = sim(f, total_t, xinit, u0, dt=4e-2) print "a=" + str(i) + " : x(t)=", x_[60 / (4e-2) - 1, :] # Here is the result for part 1 # xta1 = np.array([1.82100427, 5.65061528, -0.87326504, 0.46588623, 2.79551686]) xta2 = np.array([0.81212008, 5.88098381, -0.94089848, 0.30716749, 2.97004978]) xta3 = np.array([-0.22143997, 5.93266163, -0.97994319, 0.13911562, -3.1386026]) xta4 = np.array( [-1.24827168, 5.80407852, -0.98921283, -0.0331632, -2.96406967]) xta5 = np.array( [-2.23717529, 5.49914142, -0.96842573, -0.20443439, -2.78953675]) # part 2: a_ = np.array([1, 2, 3, 4]) for i in range(5): plt.figure() lineObjects = plt.plot( a_, np.array([(xta2[i] - xta1[i]) / 1.0, (xta3[i] - xta2[i]) / 2.0, (xta4[i] - xta3[i]) / 3.0, (xta5[i] - xta4[i]) / 4.0]), '.-') plt.title('x' + str(i + 1)) plt.show()
def validate(label, target, predictions, baseline=0.5, compute_auc=False, quiet=True): """ Validates binary predictions, computes confusion matrix and AUC. Given a vector of predictions and actual values, scores how well we did on a prediction. Args: label: label of what we're validating target: vector of actual results predictions: predicted results. May be a probability vector, in which case we'll sort it and take the most confident values where baseline is the proportion that we want to take as True predictions. If a prediction is 1.0 or 0.0, however, we'll take it to be a true or false prediction, respectively. compute_auc: If true, will compute the AUC for the predictions. If this is true, predictions must be a probability vector. """ if len(target) != len(predictions): raise Exception('Length mismatch %d vs %d' % (len(target), len(predictions))) if baseline > 1.0: # Baseline number is expected count, not proportion. Get the proportion. baseline = baseline * 1.0 / len(target) #Make an iterator that aggregates elements from each of the iterables. zipped = sorted(zip(target, predictions), key=lambda tup: -tup[1]) expect = len(target) * baseline (true_pos, true_neg, false_pos, false_neg) = (0, 0, 0, 0) for index in xrange(len(target)): (yval, prob) = zipped[index] if float(prob) == 0.0: predicted = False elif float(prob) == 1.0: predicted = True else: predicted = index < expect if predicted: if yval: true_pos += 1 else: false_pos += 1 else: if yval: false_neg += 1 else: true_neg += 1 pos = true_pos + false_neg neg = true_neg + false_pos # P(1 | predicted(1)) and P(0 | predicted(f)) pred_t = true_pos + false_pos pred_f = true_neg + false_neg prob1_t = true_pos * 1.0 / pred_t if pred_t > 0.0 else -1.0 prob0_f = true_neg * 1.0 / pred_f if pred_f > 0.0 else -1.0 # Lift = P(1 | t) / P(1) prob_1 = pos * 1.0 / (pos + neg) lift = prob1_t / prob_1 if prob_1 > 0 else 0.0 accuracy = (true_pos + true_neg) * 1.0 / len(target) if compute_auc: y_bool = [True if yval else False for (yval, _) in zipped] x_vec = [xval for (_, xval) in zipped] auc_value = roc_auc_score(y_bool, x_vec) fpr, tpr, _ = roc_curve(y_bool, x_vec) pl.plot(fpr, tpr, lw=1.5, label='ROC %s (area = %0.2f)' % (label, auc_value)) pl.xlabel('False Positive Rate', fontsize=18) pl.ylabel('True Positive Rate', fontsize=18) pl.title('ROC curve', fontsize=18) auc_value = '%0.03g' % auc_value else: auc_value = 'NA' print '(%s) Lift: %0.03g Auc: %s' % (label, lift, auc_value) if not quiet: print ' Base: %0.03g Acc: %0.03g P(1|t): %0.03g P(0|f): %0.03g' % ( baseline, accuracy, prob1_t, prob0_f) print ' Fp/Fn/Tp/Tn p/n/c: %d/%d/%d/%d %d/%d/%d' % ( false_pos, false_neg, true_pos, true_neg, pos, neg, len(target))
for line in mse: xy = line.split() X.append(float(xy[0])) Y.append(float(xy[1])) for line in res: RES.append(float(line.split()[1])) for line in des: DES.append(float(line)) for line in mse_test: MSE_TEST.append(float(line.split()[1])) pl.figure() pl.xlabel('epochs') pl.ylabel('MSE') pl.title('MSE for training data (60% of total data)') pl.plot(X,Y, label='Training MSE') pl.plot(X,MSE_TEST, label='Test MSE') pl.grid() pl.figure() pl.xlabel('Years') pl.ylabel('Scaled Output') pl.title('Desired and Forward Pass Output on test data (40% of total data)') pl.plot(RES[187:], label='result') pl.plot(DES[187:], label='desired') pl.legend() pl.grid() pl.figure() pl.xlabel('Years') pl.ylabel('Scaled Output') pl.title('Desired and Forward Pass Output on training data (to check overfitting)') pl.plot(RES[0:186], label='result')
# -*- coding: utf-8 -*- """ Created on Thu Sep 10 11:27:28 2015 @author: NigmatullinR """ import scipy.io.wavfile as sw import numpy as np import pylab as plb name = 'voice.wav' f = open(name, 'rb') [fr, dti] = sw.read(f) f.close() outN = dti[5:300] print 'length', len(dti) print 'sampling frequency', fr print 'signal example', np.float32(dti[5:300]) plb.figure() plb.plot(outN)
transOffset = offset_copy(ax.transData, fig=fig, x=0.05, y=0.10, units='inches') # for i in xrange(len(data['code'])): # if data['price'][i] <= 0 or data['gdp'][i] <= 0: # continue # if data['oilout'][i] > data['oilin'][i]: # fuel = False # else: # fuel = True # symbol = "kx" if fuel else 'ko' # pl.plot(np.log(data['gdp'][i]), data['price'][i], symbol) # # pl.text(data[i,0], data[i,4], '%.1f' % (fuel), transform=transOffset) # pl.text(np.log(data['gdp'][i]), data['price'][i], data['name'][i], transform=transOffset) total = [] for i in xrange(len(data['code'])): if data['price'][i] > 0: total += [(data['code'][i], data['price'][i])] total2 = sorted(total, key=lambda x: x[1]) for j, v in enumerate(total2): pl.plot(j, v[1]) pl.text(j, v[1], v[0], transform=transOffset) pl.show()
if ar1 == ar2: nlth = interpolate_dict(lb, nbs_ar1xar1, lth, spectra) else: Rlth = interpolate_dict(lb, Rb, lth, spectra) nlth_ar1xar1 = interpolate_dict(lb, nbs_ar1xar1, lth, spectra) nlth_ar2xar2 = interpolate_dict(lb, nbs_ar2xar2, lth, spectra) nlth = {} for spec in spectra: nlth[spec] = Rlth[spec] * np.sqrt( nlth_ar1xar1[spec] * nlth_ar2xar2[spec]) for spec in spectra: plt.figure(figsize=(12, 12)) plt.plot(lth, nlth[spec], label="interpolate", color="lightblue") if ar1 == ar2: nbs = nbs_ar1xar1[spec] else: nbs = nbs_ar1xar2[spec] plt.plot(lb, nbs, ".", label="%s %sx%s" % (sv, ar1, ar2), color="red") plt.legend(fontsize=20) plt.savefig("%s/noise_interpolate_%sx%s_%s_%s.png" % (plot_dir, ar1, ar2, sv, spec), bbox_inches="tight")
model_path = os.path.join(save_dir, model_name) model.save(model_path) print('Saved trained model at %s ' % model_path) # Score trained model. scores = model.evaluate(test_tensors, test_targets, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) model.load_weights(file_path) mushroom_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors] test_accuracy = 100*np.sum(np.array(mushroom_predictions)==np.argmax(test_targets, axis=1))/len(mushroom_predictions) print('Test accuracy: %.4f%%' % test_accuracy) print(history.history.keys()) # summarize history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show()
print 'stopped early' break win.logOnFlip(msg='frame=%i' %frameN, level=logging.EXP) win.flip() win.close() #calculate some values intervalsMS = pylab.array(win.frameIntervals[1:])*1000 m=pylab.mean(intervalsMS) sd=pylab.std(intervalsMS) # se=sd/pylab.sqrt(len(intervalsMS)) # for CI of the mean distString= "Mean=%.1fms, s.d.=%.1f, 99%%CI(frame)=%.2f-%.2f" %(m,sd,m-2.58*sd,m+2.58*sd) nTotal=len(intervalsMS) nDropped=sum(intervalsMS>(1.5*m)) droppedString = "Dropped/Frames = %i/%i = %.3f%%" %(nDropped,nTotal, 100*nDropped/float(nTotal)) #plot the frameintervals pylab.figure(figsize=[20,10]) pylab.subplot(1,2,1) pylab.plot(intervalsMS, '-') pylab.ylabel('t (ms)') pylab.xlabel('frame N') pylab.title(droppedString) # pylab.subplot(1,2,2) pylab.hist(intervalsMS, 50, normed=0, histtype='stepfilled') pylab.xlabel('t (ms)') pylab.ylabel('n frames') pylab.title(distString) pylab.show()
def displayFibs(n): (xvals, yvals) = gatherFacts(n) plt.figure('fibs') plt.plot(xvals, yvals, label='fibonacci') plt.show()
p=p, dview=dview, Ain=None, method_deconvolution='oasis', skip_refinement=False) cnm = cnm.fit(images) crd = plot_contours(cnm.A, Cn, thr=0.9) C_dff = extract_DF_F(Yr, cnm.A, cnm.C, cnm.bl, quantileMin=8, frames_window=200, dview=dview) pl.figure() pl.plot(C_dff.T) else: rf = 14 # half-size of the patches in pixels. rf=25, patches are 50x50 stride = 6 # amounpl.it of overlap between the patches in pixels K = 6 # number of neurons expected per patch gSig = [6, 6] # expected half size of neurons merge_thresh = 0.8 # merging threshold, max correlation allowed p = 1 # order of the autoregressive system save_results = False cnm = cnmf.CNMF(n_processes, k=K, gSig=gSig, merge_thresh=0.8, p=0, dview=dview,
som.random_weights_init(X) som.train_random(data = X, num_iteration = 100) # Visualizing the results from pylab import bone, pcolor, colorbar, plot, show bone() pcolor(som.distance_map().T) colorbar() markers = ['o', 'x'] colors = ['r', 'g'] for i, x in enumerate(X): w = som.winner(x) plot(w[0] + 0.5, w[1] + 0.5, markers[1], markeredgecolor = colors[1], markerfacecolor = 'None', markersize = 10, markeredgewidth = 2) show() # Finding the frauds mappings = som.win_map(X) grp1 = np.concatenate((mappings[(6,2)], mappings[(3,5)], mappings[(1,6)], mappings[(2,7)]), axis = 0) grp1 = sc.inverse_transform(grp1) grp2 = np.concatenate((mappings[(0,0)], mappings[(0,9)], mappings[(9,0)], mappings[(8,9)], mappings[(9,8)], mappings[(9,4)]), axis = 0) grp2 = sc.inverse_transform(grp2) grp3 = np.concatenate((mappings[(1,0)], mappings[(5,5)], mappings[(8,3)], mappings[(5,9)]), axis = 0) grp3 = sc.inverse_transform(grp3) grp4 = np.concatenate((mappings[(0,4)], mappings[(0,2)], mappings[(8,2)], mappings[(3,9)], mappings[(0,7)]), axis = 0) grp4 = sc.inverse_transform(grp4)
def main(xyz): ######################### #some err ############## xyz_acc = xyz mod_acc_mean = np.mean(np.sqrt((xyz_acc * xyz_acc).sum(axis=1))) assert isinstance(xyz_acc, np.ndarray) assert xyz_acc.shape[0] >= 10 assert xyz_acc.shape[1] == 3 assert mod_acc_mean >= 5 and mod_acc_mean <= 50 ########################################## #fea visual filter ->valid data ################################################# #########load pickle #xyz=load_pickle(dataPath+'xyz-watchphone-nov-'+class_type) ######clean data #xyz=xyz[:4087,:] xyz_abs = np.abs(xyz) #########visual x_axis = xyz.shape[0] #500 200 1000 y_axis = 30 ind = np.arange(xyz.shape[0]) plt.figure() plt.subplot(211) plt.title('xyz') plt.plot(ind, xyz[:, 0], 'r-', ind, xyz[:, 1], 'y.', ind, xyz[:, 2], 'b--') #plt.xlim(0,9000);#plt.ylim(0,y_axis) #plt.show() ########################### #generate obs x y [n,3]->[n_obs,10,3] ->[n_obs,4x3] ############################ fea = xyz kernel_sz = 10. stride = kernel_sz obs_list = [] num = int((xyz.shape[0] - kernel_sz) / stride) + 1 for i in range(num)[:]: #[0,...100] total 101 obs = fea[i * stride:i * stride + kernel_sz, :] #[10,3] if obs.shape[0] == kernel_sz: v = np.array([fea4(obs[:, i]) for i in range(obs.shape[1])]).flatten() obs_list.append(v) #[10,3]->[3x4,] x_arr = np.array(obs_list) print 'x', x_arr.shape #[n-obs,12] ##### #save2pickle([x_arr,y_arr],'xy-'+class_type) #[n,12][n,] xy = np.concatenate((x_arr, np.zeros((x_arr.shape[0], 1))), axis=1) print xy.shape #[n,13] # #dataSet=[list(xy[i,:]) for i in range(xy.shape[0]) ] #nx13 list #dataSet=np.array([xy[i,:] for i in range(xy.shape[0]) ] ) #nx13 array # dataSet = xy #load model trees many_stumps = load_pickle(dataPath + 'rf-para-watchphone') #test X_test = dataSet #[n,13] #####ensemble test [ [],[]...] []=[tree,dim_list,accuracy] n_val = X_test.shape[0] dim_val = X_test.shape[1] - 1 #[n,13] 12+1 f_label_mat = np.zeros((n_val, many_stumps.__len__())) #[n,10stump] for ind in range(many_stumps.__len__()): #[3,5,8,11..] dim_sample = many_stumps[ind][1] tree = many_stumps[ind][0] for obs in range(n_val): pred = classify(tree, X_test[obs, :]) #13=12+1 f_label_mat[obs, ind] = pred ## #f_label majority vote maj_vote = np.zeros((n_val, )) #[n,] for i in range(f_label_mat.shape[0]): #[n,10stump] vote1 = f_label_mat[i, :].sum() vote0 = (1 - f_label_mat[i, :]).sum() maj_vote[i] = [1 if vote1 > vote0 else 0][0] ##calculate accuracy #y_true=X_test[:,-1] #accuracy=T.mean(T.eq(maj_vote,y_true)).eval();print 'accur dt ensemble',accuracy plt.subplot(2, 1, 2) plt.title('predict watch or not watch over time') plt.plot(np.where(maj_vote == 0)[0], np.zeros((np.where(maj_vote == 0)[0].shape[0], )), 'bo', label='watch') plt.plot(np.where(maj_vote == 1)[0], np.ones((np.where(maj_vote == 1)[0].shape[0], )), 'y^', label='notwatch') plt.legend() plt.xlabel('time') plt.ylabel('predicted class') #plt.show() #class_dic={'watch':0,'notwatch':1} class_dic = {0: 'watch', 1: 'notwatch'} label_list = [class_dic[i] for i in maj_vote] print 'predict', label_list return label_list # [string,...]predict label
normalize_dataset(dataset, minmax) inputs = [] outputs = [] for row in dataset: inputs.append(row[0:-1]) outputs.append(row[2:]) #X = np.array([[0,0,1], # [0,1,1], # [1,0,1], # [1,1,1]]) X = np.array(inputs) #y = np.array([[0], # [1], # [1], # [0]]) y = np.array(outputs) cols = X.T xlabel("Instances") ylabel("Inputs and Outputs") title("Data Visualization") plot(cols[0], label='Show') plot(cols[1], label='Network') #, 'b') plot(outputs, label='Fall-off') #, 'r') legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) show()
from vendor.nfb.pynfb.signal_processing.helpers import get_outliers_mask import numpy as np from PyQt5 import QtGui, QtWidgets from vendor.nfb.pynfb.protocols.ssd.topomap_selector_ica import ICADialog fs = 1000 band = (8, 14) channels = ['Fp1', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'Ft9', 'Fc5', 'Fc1', 'Fc2', 'Fc6', 'Ft10', 'T3', 'C3', 'Cz', 'C4', 'T4', 'Tp9', 'Cp5', 'Cp1', 'Cp2', 'Cp6', 'Tp10', 'T5', 'P3', 'Pz', 'P4', 'T6', 'O1', 'Oz', 'O2'] data = [loadmat(r'C:\Users\Nikolai\Desktop\Liza_diplom_data\Liza_diplom_data\treatment\p25\day2\proba{}.mat'.format(k))['X1Topo'].T for k in range(1, 16)] df = pd.DataFrame(data=np.concatenate(data), columns=channels) for ch in['Pz', 'Cz']: channels.remove(ch) del df[ch] df = df.loc[~get_outliers_mask(df[channels])] #plt.plot(*welch(df['C3'], fs, nperseg=4*fs)) plt.plot(df[channels]) plt.show() #plt.show() #df['C3env'] = np.abs(hilbert(fft_filter(df['C3'], fs, band))) #df['C4env'] = np.abs(hilbert(fft_filter(df['C4'], fs, band))) #plt.plot(df['C3env']+df['C4env']) a = QtWidgets.QApplication([]) (rej, filt, topo, _unmix, _bandpass, _) = ICADialog.get_rejection(df.iloc[:fs*60*3], channels, fs) df['SMR'] = np.dot(df.as_matrix(), filt) df.to_pickle('p4') plt.plot(df['SMR']) plt.show()
ydata = np.array([0.1, 0.81, 4.03, 9.1, 15.99, 24.2, 37.2]) #fit a third order polynomial from pylab import polyfit, plot, xlabel, ylabel, show, legend, savefig pars = polyfit(xdata, ydata, 3) print 'pars from polyfit: {0}'.format(pars) ## numpy method returns more data A = np.column_stack([xdata**3, xdata**2, xdata, np.ones(len(xdata), np.float)]) pars_np, resids, rank, s = np.linalg.lstsq(A, ydata) print 'pars from np.linalg.lstsq: {0}'.format(pars_np) ''' we are trying to solve Ax = b for x in the least squares sense. There are more rows in A than elements in x so, we can left multiply each side by A^T, and then solve for x with an inverse. A^TAx = A^Tb x = (A^TA)^-1 A^T b ''' # not as pretty but equivalent! pars_man = np.dot(np.linalg.inv(np.dot(A.T, A)), np.dot(A.T, ydata)) print 'pars from linear algebra: {0}'.format(pars_man) #but, it is easy to fit an exponential function to it! # y = a*exp(x)+b Aexp = np.column_stack([np.exp(xdata), np.ones(len(xdata), np.float)]) pars_exp = np.dot(np.linalg.inv(np.dot(Aexp.T, Aexp)), np.dot(Aexp.T, ydata)) plot(xdata, ydata, 'ro') fity = np.dot(A, pars) plot(xdata, fity, 'k-', label='poly fit') plot(xdata, np.dot(Aexp, pars_exp), 'b-', label='exp fit') xlabel('x') ylabel('y') legend() savefig('images/curve-fit-1.png')
hexNeighbours[1] = oldUniverseList[currentRow - 1][currentColumn - 1] if (currentColumn + 1) < cellCountX: # CELL 4 ODD hexNeighbours[4] = oldUniverseList[currentRow][currentColumn + 1] if (currentRow + 1) < cellCountY: # CELL 5 ODD hexNeighbours[5] = oldUniverseList[currentRow + 1][currentColumn + 1] # Get the new state by sending the currentCell value + string of all neighbours hexNeighbours = "".join(hexNeighbours) # join the characters into 1 string newUniverseRow += getNewState2DHex(oldUniverseList[currentRow][currentColumn], hexNeighbours) universeList[currentRow] = newUniverseRow #print RES #Ploting pl.subplot(3, 1, 1) pl.plot(map(itemgetter(3), RES), map(itemgetter(2), RES), '-r', label='Infected') pl.plot(map(itemgetter(3), RES), map(itemgetter(0), RES), '-b', label='Normal') pl.legend(loc=0) pl.title('Infected and Normal') pl.xlabel('Time') pl.ylabel('Count') pl.subplot(3, 1, 2) pl.plot(map(itemgetter(3), RES), map(itemgetter(1), RES), '-r', label='Susceptibles') pl.plot(map(itemgetter(3), RES), map(itemgetter(0), RES), '-b', label='Normal') pl.legend(loc=0) pl.title('Susceptibles and Normal') pl.xlabel('Time') pl.ylabel('Count') pl.subplot(3, 1, 3)
for i in xrange(X.size): griddata.addSample([X.ravel()[i], Y.ravel()[i]], [0]) griddata._convertToOneOfMany( ) # this is still needed to make the fnn feel comfy for i in range(20): trainer.trainEpochs(1) trnresult = percentError(trainer.testOnClassData(), trndata['class']) tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class']) print "epoch: %4d" % trainer.totalepochs, \ " train error: %5.2f%%" % trnresult, \ " test error: %5.2f%%" % tstresult out = fnn.activateOnDataset(griddata) out = out.argmax(axis=1) # the highest output activation gives the class out = out.reshape(X.shape) figure(1) ioff() # interactive graphics off clf() # clear the plot hold(True) # overplot on for c in [0, 1, 2]: here, _ = where(tstdata['class'] == c) plot(tstdata['input'][here, 0], tstdata['input'][here, 1], 'o') if out.max() != out.min(): # safety check against flat field contourf(X, Y, out) # plot the contour ion() # interactive graphics on draw() # update the plot ioff() show()
from pylab import plot,show,xlabel from numpy import linspace,sin,loadtxt,cos x=linspace(0,10,100) print(x) y=sin(x) #plot(x,y) #show() a=open("test.dat","w") for i in range(len(x)): a.write("%.2f %.2f\n" % (x[i],y[i])) a.close() b=loadtxt("test.dat",float) plot(b[:,0],b[:,1],"r--") t=cos(x) xlabel("Radians") plot(x,t,"b--") show()
# -*- coding: utf-8 -*- import scipy.signal as signal import numpy as np import pylab as pl h1 = signal.remez(201, (0, 0.18, 0.2, 0.50), (0.01, 1)) h2 = signal.remez(201, (0, 0.38, 0.4, 0.50), (1, 0.01)) h3 = np.convolve(h1, h2) w, h = signal.freqz(h3, 1) pl.plot(w/2/np.pi, 20*np.log10(np.abs(h))) pl.legend() pl.xlabel(u"正规化频率 周期/取样") pl.ylabel(u"幅值(dB)") pl.title(u"低通和高通级联为带通滤波器") pl.show()
# plt.ylabel('loss') # plt.legend(['greedy']) # #plt.title(fn) # plt.tight_layout() for res in results[1:]: im = cc.wire_network(res['network'],give_dense=True).detach().numpy().reshape(im_size,order='F') params=res['num_params'] error=np.linalg.norm((image-im).ravel())/np.linalg.norm(image.ravel())*100 if xp == 'einstein': plt.subplot(nploty,nplotx,subplot_index) subplot_index += 1 plt.imshow(im/255) plt.axis('off') plt.title(f"#param.:{params},\ntest error:{error:.2f}%",fontsize=4) plt.tight_layout() plt.style.use('ggplot') plt.figure() plt.plot(greedy_params,greedy_errors,'-') plt.plot(tr_als_params,tr_als_errors,'-') plt.plot(tt_als_params,tt_als_errors,'-') plt.legend("Greedy TR-ALS TT-ALS".split()) plt.xlabel("parameters") plt.ylabel("relative error") plt.tight_layout() plt.show()
woman = woman[:shortest] man = man[:shortest] np.random.seed(101) noise = np.random.uniform(-1, 1, len(man)) sources = np.stack((woman, man, noise)) A = np.random.uniform(-1, 1, (3, 3)) linear_mix = np.dot(A, sources) pnl_mix = linear_mix.copy() pnl_mix[0] = np.tanh(pnl_mix[0]) pnl_mix[1] = (pnl_mix[1] + pnl_mix[1]**3) / 2 pnl_mix[2] = np.exp(pnl_mix[2]) return linear_mix, pnl_mix, A, sources if __name__ == '__main__': linear_mix, pnl_mix, A, sources = get_data() wavfile.write('./mixtape1.wav', rate=rate, data=linear_mix[0]) wavfile.write('./mixtape2.wav', rate=rate, data=linear_mix[1]) wavfile.write('./mixtape3.wav', rate=rate, data=linear_mix[2]) wavfile.write('./pnlmixtape1.wav', rate=rate, data=pnl_mix[0]) wavfile.write('./pnlmixtape2.wav', rate=rate, data=pnl_mix[1]) wavfile.write('./pnlmixtape3.wav', rate=rate, data=pnl_mix[2]) pl.subplot(311) pl.plot(man) pl.subplot(312) pl.plot(woman) pl.subplot(313) pl.plot(noise) pl.show()