def main(): SAMPLE_NUM = 10 degree = 9 x, y = sin_wgn_sample(SAMPLE_NUM) fig = pylab.figure(1) pylab.grid(True) pylab.xlabel('x') pylab.ylabel('y') pylab.axis([-0.1,1.1,-1.5,1.5]) # sin(x) + noise # markeredgewidth mew # markeredgecolor mec # markerfacecolor mfc # markersize ms # linewidth lw # linestyle ls pylab.plot(x, y,'bo',mew=2,mec='b',mfc='none',ms=8) # sin(x) x2 = linspace(0, 1, 1000) pylab.plot(x2,sin(2*x2*pi),'#00FF00',lw=2,label='$y = \sin(x)$') # polynomial fit reg = exp(-18) w = curve_poly_fit(x, y, degree,reg) #w = polyfit(x, y, 3) po = poly1d(w) xx = linspace(0, 1, 1000) pylab.plot(xx, po(xx),'-r',label='$M = 9, \ln\lambda = -18$',lw=2) pylab.legend() pylab.show() fig.savefig("poly_fit9_10_reg.pdf")
def TestOverAlpha(): nDim = 5 numOfParticles = 10 maxIteration = 2000 minX = array([-100.0]*nDim) maxX = array([100.0]*nDim) maxV = 1.0*(maxX - minX) minV = -1.0*maxV numOfTrial = 10 intDim = 4 alpha = 0.3 while alpha<1.0: gBest = array([0.0]*maxIteration) for i in xrange(numOfTrial): p1 = AUPSO.PSOProblem(nDim, numOfParticles, maxIteration, minX, maxX, minV, maxV, AUPSO.Sphere,intDim,alpha) p1.run() gBest = gBest + p1.gBestArray[:maxIteration] gBest = gBest / numOfTrial pylab.plot(range(maxIteration), gBest,label='alpha='+str(alpha)) print 'alpha = ', alpha alpha += 0.3 print 'now drawing' pylab.title('$G_{best}$ over 20 trials'+' intDim='+str(intDim)) pylab.xlabel('The $N^{th}$ Iteratioin') pylab.ylabel('Average gBest over '+str(numOfTrial)+' runs') pylab.grid(True) pylab.yscale('log') ylim = [-6, 1] ystep = 1.0 # pylab.ylim(ylim[0], ylim[1]) # yticks = linspace(ylim[0], ylim[1], int((ylim[1]-ylim[0])/ystep+1)) # pylab.yticks(tuple(yticks), tuple(map(str,yticks))) pylab.legend(loc='lower left') pylab.show()
def simulationWithoutDrugNick(numViruses, maxPop, maxBirthProb, clearProb, numTrials): """ Run the simulation and plot the graph for problem 3 (no drugs are used, viruses do not have any drug resistance). For each of numTrials trial, instantiates a patient, runs a simulation for 300 timesteps, and plots the average virus population size as a function of time. numViruses: number of SimpleVirus to create for patient (an integer) maxPop: maximum virus population for patient (an integer) maxBirthProb: Maximum reproduction probability (a float between 0-1) clearProb: Maximum clearance probability (a float between 0-1) numTrials: number of simulation runs to execute (an integer) """ #Instantiate the viruses first, the patient second viruses= [ SimpleVirus(maxBirthProb, clearProb) for i in range(numViruses) ] patient = Patient(viruses, maxPop) #Execute the patient.update method 300 times for 100 trials steps = 300 countList = [0 for i in range(300)] for trial in range(numTrials): for timeStep in range(steps): countList[timeStep] += patient.update() avgList = [ countList[i]/float(numTrials) for i in range(steps) ] #Plot a diagram with xAxis=timeSteps, yAxis=average virus population xAxis = [ x for x in range(steps) ] pylab.figure(2) pylab.plot(xAxis, avgList, 'ro', label='Simple Virus') pylab.xlabel('Number of elapsed time steps') pylab.ylabel('Average size of the virus population') pylab.title('Virus growth in a patient without the aid of any drag') pylab.legend() pylab.show()
def time_delays_plot(env, **kwargs): models = kwargs.pop('models', env.models) obj_index = kwargs.pop('obj_index', 0) src_index = kwargs.pop('src_index', 0) key = kwargs.pop('key', 'accepted') d = defaultdict(list) for m in models: obj,data = m['obj,data'][obj_index] t0 = data['arrival times'][src_index][0] for i,t in enumerate(data['arrival times'][src_index][1:]): d[i].append( float('%0.6f'%convert('arcsec^2 to days', t-t0, obj.dL, obj.z, data['nu'])) ) t0 = t s = product(range(1,1+len(d)), ['solid', 'dashed', 'dashdot', 'dotted']) for k,v in d.iteritems(): #print 'td plot', k, len(v) #print v lw,ls = s.next() pl.hist(v, bins=25, histtype='step', color='k', ls=ls, lw=lw, label='%s - %s' % (str(k+1),str(k+2)), **kwargs) #pl.xlim(xmin=0) pl.ylim(ymin=0) pl.xlim(xmin=pl.xlim()[0] - 0.01*(pl.xlim()[1] - pl.xlim()[0])) pl.legend() pl.xlabel(_time_delays_xlabel) pl.ylabel(r'Count')
def TestOverIntDim(): nDim = 10 numOfParticles = 20 maxIteration = 200 minX = array([-100.0]*nDim) maxX = array([100.0]*nDim) maxV = 0.2*(maxX - minX) minV = -1.0*maxV numOfTrial = 20 alpha = 0.0 for intDim in xrange(0,11,2): gBest = array([0.0]*maxIteration) for i in xrange(numOfTrial): p1 = AUPSO.PSOProblem(nDim, numOfParticles, maxIteration, minX, maxX, minV, maxV, AUPSO.Griewank,intDim,alpha) p1.run() gBest = gBest + p1.gBestArray[:maxIteration] gBest = gBest / numOfTrial pylab.plot(range(maxIteration), log10(gBest),label='intDim='+str(intDim)) pylab.title('$G_{best}$ over 20 trials'+' alpha='+str(alpha)) pylab.xlabel('The $N^{th}$ Iteratioin') pylab.ylabel('Average gBest over '+str(numOfTrial)+' runs') pylab.grid(True) # pylab.yscale('log') ylim = [-6, 1] ystep = 1.0 # pylab.ylim(ylim[0], ylim[1]) # yticks = linspace(ylim[0], ylim[1], int((ylim[1]-ylim[0])/ystep+1)) # pylab.yticks(tuple(yticks), tuple(map(str,yticks))) pylab.legend(loc='lower left') pylab.show()
def chisq_plot(env, **kwargs): _hist(env, 'sigp:chisq', xlabel=r'$\chi^2$') return models = kwargs.pop('models', env.models) objects = kwargs.pop('objects', None) key = kwargs.pop('key', 'accepted') # select a list to append to based on the 'accepted' property. l = [[], [], []] for m in models: # For H0 we only have to look at one model because the others are the same obj, data = m['obj,data'][0] l[m.get(key,2)].append(data['sigp:chisq']) not_accepted, accepted, notag = l for d,s in zip(l, _styles): if d: pl.hist(d, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'], log=False, **kwargs) if not_accepted or accepted: pl.legend() pl.xlabel(_chisq_xlabel) pl.ylabel(r'Count')
def yfromx(self, newtimeaxis, doplot=False, debug=False): if debug: print('fastresampler: yfromx called with following parameters') print(' padvalue:, ', self.padvalue) print(' initstep, hiresstep:', self.initstep, self.hiresstep) print(' initial axis limits:', self.initstart, self.initend) print(' hires axis limits:', self.hiresstart, self.hiresend) print(' requested axis limits:', newtimeaxis[0], newtimeaxis[-1]) outindices = ((newtimeaxis - self.hiresstart) // self.hiresstep).astype(int) if debug: print('len(self.hires_y):', len(self.hires_y)) try: out_y = self.hires_y[outindices] except IndexError: print('') print('indexing out of bounds in fastresampler') print(' padvalue:, ', self.padvalue) print(' initstep, hiresstep:', self.initstep, self.hiresstep) print(' initial axis limits:', self.initstart, self.initend) print(' hires axis limits:', self.hiresstart, self.hiresend) print(' requested axis limits:', newtimeaxis[0], newtimeaxis[-1]) sys.exit() if doplot: fig = pl.figure() ax = fig.add_subplot(111) ax.set_title('fastresampler timecourses') pl.plot(self.hires_x, self.hires_y, newtimeaxis, out_y) pl.legend(('hires', 'output')) pl.show() return out_y
def param_set_averages_plot(results): averages_ocr = [ a[1] for a in sorted( param_set_averages(results, metric='ocr').items(), key=lambda x: int(x[0].split('-')[1])) ] averages_q = [ a[1] for a in sorted( param_set_averages(results, metric='q').items(), key=lambda x: int(x[0].split('-')[1])) ] averages_mse = [ a[1] for a in sorted( param_set_averages(results, metric='mse').items(), key=lambda x: int(x[0].split('-')[1])) ] fig = plt.figure(figsize=(6, 4)) # plt.tight_layout() plt.plot(averages_ocr, label='OCR', linewidth=2.0) plt.plot(averages_q, label='Q', linewidth=2.0) plt.plot(averages_mse, label='MSE', linewidth=2.0) plt.ylim([0, 1]) plt.xlabel(u'Paslėptų neuronų skaičius') plt.ylabel(u'Vidurinė Q įverčio pokyčio reikšmė') plt.grid(True) plt.tight_layout() plt.legend(loc='lower right') plt.show()
def plot_max_avg_Rho_lev012(lev0,lev1,lev2, ic, spath): '''lev -- TimeProfQs() object, whose lev.convert() attribute has been called''' #avgRho Tratio = lev0.t / ic.tCr fig, ax = plt.subplots() #initial plt.hlines(ic.rho0, Tratio.min(),Tratio.max(), colors="magenta", linestyles='dashed',label="initial") #lev0 plt.plot(Tratio,lev0.maxRho,ls="-",c="black",lw=2.,label="max Lev0") plt.plot(Tratio,lev0.minRho,ls="-",c="green",lw=2.,label="min Lev0") plt.plot(Tratio,lev0.avgRho,ls="-",c="blue",lw=2.,label="avg Lev0") plt.plot(Tratio,lev0.avgRho_HiPa,ls="-",c="red",lw=2.,label=r"avg ($\rho > \rho_0$)") #lev1 plt.plot(Tratio,lev1.maxRho,ls="--",c="black",lw=2.,label="max Lev1") plt.plot(Tratio,lev1.minRho,ls="--",c="green",lw=2.,label="min Lev1") plt.plot(Tratio,lev1.avgRho,ls="--",c="blue",lw=2.,label="avg Lev1") plt.plot(Tratio,lev1.avgRho_HiPa,ls="--",c="red",lw=2.,label=r"avg Lev1 ($\rho > \rho_0$)") #lev2 plt.plot(Tratio,lev2.maxRho,ls=":",c="black",lw=2.,label="max Lev2") plt.plot(Tratio,lev2.minRho,ls=":",c="green",lw=2.,label="min Lev2") plt.plot(Tratio,lev2.avgRho,ls=":",c="blue",lw=2.,label="avg Lev2") plt.plot(Tratio,lev0.avgRho_HiPa,ls=":",c="red",lw=2.,label=r"avg Lev1 ($\rho > \rho_0$)") #finish plt.yscale("log") plt.xlabel("t / t_cross") plt.ylabel("Densities [g/cm^3]") plt.title(r"Max & Avg Densities, Lev0") py.legend(loc=4, fontsize="small") name = spath+"max_avg_Rho_Lev012.pdf" plt.savefig(name,format="pdf") plt.close()
def plotEventFlop(library, num, eventNames, sizes, times, events, filename = None): from pylab import legend, plot, savefig, semilogy, show, title, xlabel, ylabel import numpy as np arches = sizes.keys() bs = events[arches[0]].keys()[0] data = [] names = [] for event, color in zip(eventNames, ['b', 'g', 'r', 'y']): for arch, style in zip(arches, ['-', ':']): if event in events[arch][bs]: names.append(arch+'-'+str(bs)+' '+event) data.append(sizes[arch][bs]) data.append(1e-3*np.array(events[arch][bs][event])[:,1]) data.append(color+style) else: print 'Could not find %s in %s-%d events' % (event, arch, bs) semilogy(*data) title('Performance on '+library+' Example '+str(num)) xlabel('Number of Dof') ylabel('Computation Rate (GF/s)') legend(names, 'upper left', shadow = True) if filename is None: show() else: savefig(filename) return
def plot_sphere_x( s, fname ): """ put plot of ionization fractions from sphere `s` into fname """ plt.figure() s.Edges.units = 'kpc' s.r_c.units = 'kpc' xx = s.r_c L = s.Edges[-1] plt.plot( xx, np.log10( s.xHe1 ), color='green', ls='-', label = r'$x_{\rm HeI}$' ) plt.plot( xx, np.log10( s.xHe2 ), color='green', ls='--', label = r'$x_{\rm HeII}$' ) plt.plot( xx, np.log10( s.xHe3 ), color='green', ls=':', label = r'$x_{\rm HeIII}$' ) plt.plot( xx, np.log10( s.xH1 ), color='red', ls='-', label = r'$x_{\rm HI}$' ) plt.plot( xx, np.log10( s.xH2 ), color='red', ls='--', label = r'$x_{\rm HII}$' ) plt.xlim( -L/20, L+L/20 ) plt.xlabel( 'r_c [kpc]' ) plt.ylim( -4.5, 0.2 ) plt.ylabel( 'log 10 ( x )' ) plt.grid() plt.legend(loc='best', ncol=2) plt.tight_layout() plt.savefig( 'doc/img/x_' + fname )
def test_mask_LUT(self): """ The masked image has a masked ring around 1.5deg with value -10 without mask the pixels should be at -10 ; with mask they are at 0 """ x1 = self.ai.xrpd_LUT(self.data, 1000) # print self.ai._lut_integrator.lut_checksum x2 = self.ai.xrpd_LUT(self.data, 1000, mask=self.mask) # print self.ai._lut_integrator.lut_checksum x3 = self.ai.xrpd_LUT(self.data, 1000, mask=numpy.zeros(shape=self.mask.shape, dtype="uint8"), dummy= -20.0, delta_dummy=19.5) # print self.ai._lut_integrator.lut_checksum res1 = numpy.interp(1.5, *x1) res2 = numpy.interp(1.5, *x2) res3 = numpy.interp(1.5, *x3) if logger.getEffectiveLevel() == logging.DEBUG: pylab.plot(*x1, label="nomask") pylab.plot(*x2, label="mask") pylab.plot(*x3, label="dummy") pylab.legend() pylab.show() raw_input() self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1) self.assertAlmostEqual(res2, 0., 4, msg="With mask the bad pixels are actually at 0 (got %.4f)" % res2) self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def plot_cost(self): if self.show_cost not in self.train_outputs[0][0]: raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost) train_errors = [o[0][self.show_cost][self.cost_idx] for o in self.train_outputs] test_errors = [o[0][self.show_cost][self.cost_idx] for o in self.test_outputs] numbatches = len(self.train_batch_range) test_errors = numpy.row_stack(test_errors) test_errors = numpy.tile(test_errors, (1, self.testing_freq)) test_errors = list(test_errors.flatten()) test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors)) test_errors = test_errors[:len(train_errors)] numepochs = len(train_errors) / float(numbatches) pl.figure(1) x = range(0, len(train_errors)) pl.plot(x, train_errors, 'k-', label='Training set') pl.plot(x, test_errors, 'r-', label='Test set') pl.legend() ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches) epoch_label_gran = int(ceil(numepochs / 20.)) # aim for about 20 labels epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) # but round to nearest 10 ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs)) pl.xticks(ticklocs, ticklabels) pl.xlabel('Epoch') # pl.ylabel(self.show_cost) pl.title(self.show_cost)
def Doplots_monthly(mypathforResults,PlottingDF,variable_to_fill, Site_ID,units,item): ANN_label=str(item+"_NN") #Do Monthly Plots print "Doing MOnthly plot" #t = arange(1, 54, 1) NN_label='Fc' Plottemp = PlottingDF[[NN_label,item]][PlottingDF['day_night']!=1] #Plottemp = PlottingDF[[NN_label,item]].dropna(how='any') figure(1) pl.title('Nightime ANN v Tower by year-month for '+item+' at '+Site_ID) try: xdata1a=Plottemp[item].groupby([lambda x: x.year,lambda x: x.month]).mean() plotxdata1a=True except: plotxdata1a=False try: xdata1b=Plottemp[NN_label].groupby([lambda x: x.year,lambda x: x.month]).mean() plotxdata1b=True except: plotxdata1b=False if plotxdata1a==True: pl.plot(xdata1a,'r',label=item) if plotxdata1b==True: pl.plot(xdata1b,'b',label=NN_label) pl.ylabel('Flux') pl.xlabel('Year - Month') pl.legend() pl.savefig(mypathforResults+'/ANN and Tower plots by year and month for variable '+item+' at '+Site_ID) #pl.show() pl.close() time.sleep(1)
def plotForce(): figure(size=3,aspect=0.5) subplot(1,2,1) from EvalTraj import plotFF plotFF(vp=351,t=28,f=900,cm=0.6,foffset=8) subplot_annotate() subplot(1,2,2) for i in [1,2,3,4]: R=np.squeeze(np.load('Rdpse%d.npy'%i)) R=stats.nanmedian(R,axis=2)[:,1:,:] dps=np.linspace(-1,1,201)[1:] plt.plot(dps,R[:,:,2].mean(0)); plt.legend([0,0.1,0.2,0.3],loc=3) i=2 R=np.squeeze(np.load('Rdpse%d.npy'%i)) R=stats.nanmedian(R,axis=2)[:,1:,:] mn=np.argmin(R,axis=1) y=np.random.randn(mn.shape[0])*0.00002+0.0438 plt.plot(np.sort(dps[mn[:,2]]),y,'+',mew=1,ms=6,mec=[ 0.39 , 0.76, 0.64]) plt.xlabel('Displacement of Force Origin') plt.ylabel('Average Net Force Magnitude') hh=dps[mn[:,2]] err=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.975,hh.shape[0]) err2=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.75,hh.shape[0]) m=np.mean(hh) print m, m-err,m+err np.save('force',[m, m-err,m+err,m-err2,m+err2]) plt.xlim([-0.5,0.5]) plt.ylim([0.0435,0.046]) plt.grid(b=True,axis='x') subplot_annotate()
def simulationWithDrug(): """ Runs simulations and plots graphs for problem 4. Instantiates a patient, runs a simulation for 150 timesteps, adds guttagonol, and runs the simulation for an additional 150 timesteps. total virus population vs. time and guttagonol-resistant virus population vs. time are plotted """ maxBirthProb = .1 clearProb = .05 resistances = {'guttagonal': False} mutProb = .005 total = [100] g = [0] badVirus = ResistantVirus(maxBirthProb, clearProb, resistances, mutProb) viruses = [badVirus]*total[0] maxPop = 1000 Bob = Patient(viruses, maxPop) for i in range(150): Bob.update() gVirus = 0 for v in Bob.viruses: if v.isResistantTo('guttagonal'): gVirus += 1 #print "g = ", gVirus #print "t = ", len(Bob.viruses) #print g += [gVirus] total += [len(Bob.viruses)] Bob.addPrescription('guttagonal') for i in range(150): Bob.update() gVirus = 0 for v in Bob.viruses: if v.isResistantTo('guttagonal'): gVirus += 1 g += [gVirus] total += [len(Bob.viruses)] pylab.title("Number of Viruses with Different Resistances to Guttagonal") pylab.xlabel("Number of Timesteps") pylab.ylabel("Number of Viruses") pylab.plot(g, '-r', label = 'Resistant') pylab.plot(total, '-b', label = 'Total') pylab.legend(loc = 'lower right') pylab.show()
def plot_heatingrate(data_dict, filename, do_show=True): pl.figure(201) color_list = ['b','r','g','k','y','r','g','b','k','y','r',] fmtlist = ['s','d','o','s','d','o','s','d','o','s','d','o'] result_dict = {} for key in data_dict.keys(): x = data_dict[key][0] y = data_dict[key][1][:,0] y_err = data_dict[key][1][:,1] p0 = np.polyfit(x,y,1) fit = LinFit(np.array([x,y,y_err]).transpose(), show_graph=False) p1 = [0,0] p1[0] = fit.param_dict[0]['Slope'][0] p1[1] = fit.param_dict[0]['Offset'][0] print fit x0 = np.linspace(0,max(x)) cstr = color_list.pop(0) fstr = fmtlist.pop(0) lstr = key + " heating: {0:.2f} ph/ms".format((p1[0]*1e3)) pl.errorbar(x/1e3,y,y_err,fmt=fstr + cstr,label=lstr) pl.plot(x0/1e3,np.polyval(p0,x0),cstr) pl.plot(x0/1e3,np.polyval(p1,x0),cstr) result_dict[key] = 1e3*np.array(fit.param_dict[0]['Slope']) pl.xlabel('Heating time (ms)') pl.ylabel('nbar') if do_show: pl.legend() pl.show() if filename != None: pl.savefig(filename) return result_dict
def PlotLine(type): i=j=0 pylab.figure(type) if type==0: pylab.title("Geomagnetism") pylab.xlabel("Distance") pylab.ylabel("Value") elif type==1: pylab.title("Compass") pylab.xlabel("Distance") pylab.ylabel("Value") for path in pathlist: f = open(path) f.readline() data = np.loadtxt(f) dataAfterfilter = filters.median(data,10) if type == 0: pylab.plot(dataAfterfilter, color[i] ,label =lablelist[i]) i=i+1 pylab.legend() elif type == 1: pylab.plot(data[:,1], color[i] ,label =lablelist[i]) i=i+1 pylab.legend() pass
def plotFeatureImportance(featureImportance, title, originalImage=None, lim=0.06, colorate=None): """ originalImage : the index of the original image. If None, ignore """ indices = featureImportanceIndices(len(featureImportance), originalImage) pl.figure() pl.title(title) if colorate is not None: nbType = len(colorate) X = [[] for i in range(nbType)] Y = [[] for i in range(nbType)] for j, f in enumerate(featureImportance): X[j % nbType].append(j) Y[j % nbType].append(f) for i in range(nbType): pl.bar(X[i], Y[i], align="center", label=colorate[i][0], color=colorate[i][1]) pl.legend() else: pl.bar(range(len(featureImportance)), featureImportance, align="center") #pl.xticks(pl.arange(len(indices)), indices, rotation=-90) pl.xlim([-1, len(indices)]) pl.ylabel("Feature importance") pl.xlabel("Filter indices") pl.ylim(0, lim) pl.show()
def exercise_4_1(): exp_t = np.load('exp_t.npy') exp_somav = np.load('exp_v.npy') exp_somav -= exp_somav[0] exp_somav /= abs(exp_somav.max()) soma_rall, dend_rall = return_ball_and_stick_soma() stim = insert_current_clamp(soma_rall(0.5)) t, v_rall = run_simulation(soma_rall(0.5)) v_rall -= v_rall[0] v_rall /= abs(v_rall.max()) soma_ball = return_ball_soma() stim_ball = insert_current_clamp(soma_ball(0.5)) t_ball, v_ball = run_simulation(soma_ball(0.5)) v_ball -= v_ball[0] v_ball /= abs(v_ball.max()) fig = plt.figure() ax1 = fig.add_subplot(111, xlabel="Time [ms]", ylabel="Voltage [mV]") ax1.plot(t, exp_somav, 'gray', label='"Experiment"') ax1.plot(t, v_rall, 'g', label='Rall') ax1.plot(t_ball, v_ball, 'b', label='ball') plt.legend(loc=4, frameon=False) plt.savefig('exercise_4_1_.png') plt.show()
def showExamplePolyFit(xs,ys,fitDegree1 = 1,fitDegree2 = 2): pylab.figure() pylab.plot(xs,ys,'r.',ms=2.0,label = "measured") # poly fit to noise coeeff = numpy.polyfit(xs, ys, fitDegree1) # Predict the curve pys = numpy.polyval(numpy.poly1d(coeeff), xs) se = mse(ys, pys) r2 = rSquared(ys, pys) pylab.plot(xs,pys, 'g--', lw=5,label="%d degree fit, SE = %0.10f, R2 = %0.10f" %(fitDegree1,se,r2)) # Poly fit to noise coeeffs = numpy.polyfit(xs, ys, fitDegree2) # Predict the curve pys = numpy.polyval(numpy.poly1d(coeeffs), xs) se = mse(ys, pys) r2 = rSquared(ys, pys) pylab.plot(xs,pys, 'b--', lw=5,label="%d degree fit, SE = %0.10f, R2 = %0.10f" %(fitDegree2,se,r2)) pylab.legend()
def simulationDelayedTreatment(numTrials, condition=75): """ Runs simulations and make histograms for problem 1. Runs numTrials simulations to show the relationship between delayed treatment and patient outcome using a histogram. Histograms of final total virus populations are displayed for delays of 300, 150, 75, 0 timesteps (followed by an additional 150 timesteps of simulation). numTrials: number of simulation runs to execute (an integer) """ trialResults = {trialNum: 0 for trialNum in range(numTrials)} for trial in range(numTrials): viruses = [ResistantVirus(0.1, 0.05, {'guttagonol': False}, 0.005) for x in range(100)] treatedPatient = TreatedPatient(viruses, 1000) for timeStep in range(0,condition+150): treatedPatient.update() if timeStep == condition: treatedPatient.addPrescription('guttagonol') print str(trial) + " Completed" trialResults[trial] = treatedPatient.update() print trialResults pylab.hist(trialResults.values(), bins=20) pylab.title("Final Resistant Population - Prescription Given After " + str(condition) + " Time Steps for " + str(numTrials) + " Trials") pylab.xlabel("Final Total Virus Population") pylab.ylabel("Number of Trials") pylab.legend(loc='best') pylab.show()
def PlotNCodonMuts(allmutations, plotfile, title): """Plots number of nucleotide changes per codon mutation. allmutations -> list of all mutations as tuples (wtcodon, r, mutcodon) plotfile -> name of the plot file we create. title -> string giving the plot title. """ pylab.figure(figsize=(3.5, 2.25)) (lmargin, rmargin, bmargin, tmargin) = (0.16, 0.01, 0.21, 0.07) pylab.axes([lmargin, bmargin, 1.0 - lmargin - rmargin, 1.0 - bmargin - tmargin]) nchanges = {1:0, 2:0, 3:0} nmuts = len(allmutations) for (wtcodon, r, mutcodon) in allmutations: assert 3 == len(wtcodon) == len(mutcodon) diffs = len([i for i in range(3) if wtcodon[i] != mutcodon[i]]) nchanges[diffs] += 1 barwidth = 0.6 xs = [1, 2, 3] nactual = [nchanges[x] for x in xs] nexpected = [nmuts * 9. / 63., nmuts * 27. / 63., nmuts * 27. / 63.] bar = pylab.bar([x - barwidth / 2.0 for x in xs], nactual, width=barwidth) pred = pylab.plot(xs, nexpected, 'rx', markersize=6, mew=3) pylab.gca().set_xlim([0.5, 3.5]) pylab.gca().set_ylim([0, max(nactual + nexpected) * 1.1]) pylab.gca().xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(4)) pylab.gca().yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(5)) pylab.xlabel('nucleotide changes in codon') pylab.ylabel('number of mutations') pylab.legend((bar[0], pred[0]), ('actual', 'expected'), loc='upper left', numpoints=1, handlelength=0.9, borderaxespad=0, handletextpad=0.4) pylab.title(title, fontsize=12) pylab.savefig(plotfile) time.sleep(0.5) pylab.show()
def main(): amps = [0.167e-9, 0.25e-9, 0.333e-9] model_dict = setup_model() for ii, a in enumerate(amps): do_sim(model_dict['stimulus'], a) config.logger.info('##### %d' % (model_dict['tab_vm'].size)) vm = model_dict['tab_vm'].vector inject = model_dict['tab_stim'].vector.copy() t = np.linspace(0, simtime, len(vm)) fname = 'data_fig_a3_%s.txt' % (chr(ord('A')+ii)) np.savetxt(fname, np.vstack((t, inject, vm)).transpose()) msg = 'Saved data for %g A current pulse in %s' % (a, fname) config.logger.info(msg) print(msg) pylab.subplot(3,1,ii+1) pylab.title('%g nA' % (a*1e9)) pylab.plot(t, vm, label='soma-Vm (mV)') stim_boundary = np.flatnonzero(np.diff(inject)) pylab.plot((t[stim_boundary[0]]), (vm.min()), 'r^', label='stimulus start') pylab.plot((t[stim_boundary[-1]]), (vm.min()), 'gv', label='stimulus end') pylab.legend() pylab.savefig('fig_a3.png') pylab.show()
def rmsdSpreadSubplot(multiplier=1.0, layout=(-1, -1)): rmsd_data = dict( (e, rad_data[e]['innov'][quant]) for e in rad_data.iterkeys() ) spread_data = dict( (e, rad_data[e]['spread'][quant]) for e in rad_data.iterkeys() ) times = temp.getTimes() n_t = len(times) for exp, exp_name in exp_names.iteritems(): pylab.plot(sawtooth(times, times)[:(n_t + 1)], rmsd_data[exp][:(n_t + 1)], color=colors[exp], linestyle='-') pylab.plot(times[(n_t / 2):], rmsd_data[exp][n_t::2], color=colors[exp], linestyle='-') for exp, exp_name in exp_names.iteritems(): pylab.plot(sawtooth(times, times)[:(n_t + 1)], spread_data[exp][:(n_t + 1)], color=colors[exp], linestyle='--') pylab.plot(times[(n_t / 2):], spread_data[exp][n_t::2], color=colors[exp], linestyle='--') ylim = pylab.ylim() pylab.plot(times, -1 * np.ones((len(times),)), color='#999999', linestyle='-', label="RMS Innovation") pylab.plot(times, -1 * np.ones((len(times),)), color='#999999', linestyle='--', label="Spread") pylab.axhline(y=7, color='k', linestyle=':') pylab.axvline(x=14400, color='k', linestyle=':') pylab.ylabel("RMS Innovation/Spread (dBZ)", size='large') pylab.xlim(times[0], times[-1]) pylab.ylim(ylim) pylab.legend(loc=4) pylab.xticks(times[::2], [ "" for t in times[::2] ]) pylab.yticks(size='x-large') return
def plot_datasets(dataset_ids, title=None, legend=True, labels=True): """ Plots one or more dataset. :param dataset_ids: list of datasets to plot :type dataset_ids: list of integers :param title: title of the plot :type title: string :param legend: whether or not to show legend :type legend: boolean :param labels: whether or not to plot point labels :type labels: boolean """ title = title if title else "Datasets " + ",".join( [str(d) for d in dataset_ids]) pl.title(title) data = {k: v for k, v in npoints.items() if k in dataset_ids} lines = [pl.plot(zip(*p)[0], zip(*p)[1], 'o-')[0] for p in data.values()] if legend: pl.legend(lines, data.keys()) if labels: for x, y, l in [i for s in data.values() for i in s]: pl.annotate(str(l), xy=(x, y), xytext=(x, y + 0.1)) pl.grid(True) return pl
def simulationWithoutDrug(numViruses, maxPop, maxBirthProb, clearProb, numTrials): """ Run the simulation and plot the graph for problem 3 (no drugs are used, viruses do not have any drug resistance). For each of numTrials trial, instantiates a patient, runs a simulation for 300 timesteps, and plots the average virus population size as a function of time. numViruses: number of SimpleVirus to create for patient (an integer) maxPop: maximum virus population for patient (an integer) maxBirthProb: Maximum reproduction probability (a float between 0-1) clearProb: Maximum clearance probability (a float between 0-1) numTrials: number of simulation runs to execute (an integer) """ # TODO steps = 300 trialResults = [[] for s in range(steps)] for i in range(numTrials): viruses = [SimpleVirus(maxBirthProb,clearProb) for v in range(numViruses)] patient = Patient(viruses, maxPop) for step in range(300): trialResults[step].append(patient.update()) resultsSummary = [sum(l) / float(numTrials) for l in trialResults] pylab.plot(resultsSummary, label="Total Virus Population") pylab.title("SimpleVirus simulation") pylab.xlabel("Time Steps") pylab.ylabel("Average Virus Population") pylab.legend() pylab.show()
def RosenbrockTest(): nDim = 3 numOfParticles = 20 maxIteration = 200 minX = array([-5.0]*nDim) maxX = array([5.0]*nDim) maxV = 0.2*(maxX - minX) minV = -1.0*maxV numOfTrial = 20 gBest = array([0.0]*maxIteration) for i in xrange(numOfTrial): p1 = RPSO.PSOProblem(nDim, numOfParticles, maxIteration, minX, maxX, minV, maxV, RPSO.Rosenbrock) p1.run() gBest = gBest + p1.gBestArray[:maxIteration] gBest = gBest / numOfTrial pylab.title('$G_{best}$ over 20 trials') pylab.xlabel('The $N^{th}$ Iteratioin') pylab.ylabel('Average gBest over '+str(numOfTrial)+' runs (logscale)') pylab.grid(True) # pylab.yscale('log') ymin, ymax = -1.5, 2.5 ystep = 0.5 pylab.ylim(ymin, ymax) yticks = linspace(ymin, ymax, (ymax-ymin)/ystep+1) pylab.yticks(tuple(yticks),tuple(map(str,yticks))) pylab.plot(range(maxIteration), log10(gBest),'-', label='Global best') pylab.legend() pylab.show()
def plotMonthlyTrend(keywords, title, monthList): db = mysql(host, user, passwd, dbName) db.connect() allKeywordTrend = [] for k in keywords: allCount = [] for m in monthList: rows = db.getMonthlyKeywordCount(k, m) print rows count = 0 for r in rows: count += r[0] persent = count*1.0 cc = db.getMonthlyTweetCount(m) if cc == 0: persent = 0.0 else: persent /= cc allCount.append(persent) allKeywordTrend.append(allCount) db.close() for p in allKeywordTrend: pylab.plot(range(1, len(p)+1), p) pylab.title(title) pylab.legend(keywords) pylab.xlabel("month") pylab.ylabel("frequency of occurrence") pylab.show()
def plot_dat(ax, file_name): with open(file_name, 'rb') as datfile: l=[] for row in datfile: if len(row.split('|')[-1].split()): l.append(row.split('|')[-1].split()) # print row lengend_names=l[1] l=l[2:] data=[] for row in l: for i in range(len(row)): try: type=row[i][-1] row[i]=float(row[i][:-1]) if type=='G': row[i]*=1000.0 except: # print i row[i]=0. data.append([row[0]]) data=zip(*data) data=numpy.array(data) shape=data.transpose().shape ax.plot(numpy.mgrid[0:shape[0]*10:10,0:1][0], 100*(data.transpose()-data.transpose()[0,0])/(1533.0+59900.0)) pylab.legend([lengend_names[0]]) pylab.ylabel('Memory (MB)') pylab.xlabel('Time (sec)') pylab.show()
def over_fitting(self, x_train, y_train, x_test, y_test, n_estimator_range=range(1, 301, 10)): """ Calculate overfitting using accuracy for ERT Class model. We get the training Acc and the test Acc, and also plot it. """ accuracy_test_list = [] accuracy_train_list = [] if self.sampling is None: class_weight = self.class_weight elif self.sampling == 'ALLKNN': x_train, y_train = under_sampling(x_train, y_train) class_weight = None else: x_train, y_train = over_sampling(x_train, y_train, model=self.sampling) class_weight = None if isinstance(x_train, pd.DataFrame): x_train = x_train.values if isinstance(y_train, (pd.DataFrame, pd.Series)): y_train = y_train.values if isinstance(x_test, pd.DataFrame): x_test = x_test.values if isinstance(y_test, (pd.DataFrame, pd.Series)): y_test = y_test.values min_sample_leaf = round(x_train.shape[0] * 0.01) min_sample_split = min_sample_leaf * 10 max_features = 'sqrt' for iter in n_estimator_range: print('iter nº: ', iter) file_model = ensemble.ExtraTreesClassifier(criterion='entropy', bootstrap=self.bootstrap, min_samples_leaf=min_sample_leaf, min_samples_split=min_sample_split, n_estimators=iter, max_depth=self.max_depth, max_features=max_features, oob_score=self.oob_score, random_state=531, verbose=1, class_weight=class_weight, n_jobs=1) file_model.fit(x_train, y_train) predictions = file_model.predict_proba(x_test) predictions = np.delete(predictions, 0, axis=1) predictions = (predictions > self.final_threshold).astype(int) accuracy_test_list.append(accuracy_score(y_test, predictions)) predictions = file_model.predict_proba(x_train) predictions = np.delete(predictions, 0, axis=1) predictions = (predictions > self.final_threshold).astype(int) accuracy_train_list.append(accuracy_score(y_train, predictions)) plot.figure() plot.plot(n_estimator_range, accuracy_train_list, label='Training Set Accuracy') plot.plot(n_estimator_range, accuracy_test_list, label='Test Set Accuracy') plot.legend(loc='upper right') plot.xlabel('Number of Trees in Ensamble') plot.ylabel('Accuracy') plot.show()
def Metallicity(self, G): print('Plotting the metallicities') seed(2222) plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure w = np.where((G.Type == 0) & (G.ColdGas / (G.StellarMass + G.ColdGas) > 0.1) & (G.StellarMass > 0.01))[0] if (len(w) > dilute): w = sample(w, dilute) mass = np.log10(G.StellarMass[w] * 1.0e10 / self.Hubble_h) Z = np.log10((G.MetalsColdGas[w] / G.ColdGas[w]) / 0.02) + 9.0 plt.scatter(mass, Z, marker='o', s=1, c='k', alpha=0.5, label='Model galaxies') # overplot Tremonti et al. 2003 (h=0.7) w = np.arange(7.0, 13.0, 0.1) Zobs = -1.492 + 1.847 * w - 0.08026 * w * w if (whichimf == 0): # Conversion from Kroupa IMF to Slapeter IMF plt.plot(np.log10((10**w * 1.5)), Zobs, 'b-', lw=2.0, label='Tremonti et al. 2003') elif (whichimf == 1): # Conversion from Kroupa IMF to Slapeter IMF to Chabrier IMF plt.plot(np.log10((10**w * 1.5 / 1.8)), Zobs, 'b-', lw=2.0, label='Tremonti et al. 2003') plt.ylabel(r'$12\ +\ \log_{10}[\mathrm{O/H}]$') # Set the y... plt.xlabel(r'$\log_{10} M_{\mathrm{stars}}\ (M_{\odot})$' ) # and the x-axis labels # Set the x and y axis minor ticks ax.xaxis.set_minor_locator(plt.MultipleLocator(0.05)) ax.yaxis.set_minor_locator(plt.MultipleLocator(0.25)) plt.axis([8.0, 12.0, 8.0, 9.5]) leg = plt.legend(loc='lower right') leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '7.Metallicity' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def Detect_Logo_SIFT_Video(args): Debug = int(args.debug) logo = cv2.imread(args.logo) # logo image scale = 4 logo = cv2.resize(logo, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) vinput = args.input # input video if not os.path.isfile(vinput): logging.error('---video does not exist---') sys.exit(1) cap = cv2.VideoCapture(vinput) logging.warning( '***************************************Opening the video: ' + args.input + ' for TV Logo detection**********************************************') fintv = float(args.frequency) fps = cap.get(5) # frame per second in video frn = int(cap.get(7)) # frame number outputpath = args.outputpath if outputpath != '' and not os.path.exists(outputpath): os.makedirs(outputpath) # verify beginning and end time if args.beginning is None: bese = 0 else: bese = args.beginning if args.end is None: endse = (frn / fps) else: endse = args.end if bese >= endse or bese < 0 or endse > (frn / fps): logging.error('wrong arguments of beginning and end time') sys.exit(1) logging.info('process each segment of video {0}'.format(args.input)) befr = int(bese * fps) # begining frame endfr = int(endse * fps) # ending frame n_matches = [] frames = [] if cap.isOpened(): # if video is opened, process frames ret, frame = cap.read() counter = 0 #print('endfr = %d' % endfr + 'endse %d ' % endse + 'fps %d' %fps + 'frn %d' %frn ) for i in xrange(befr, endfr, int(np.round(fps / fintv))): #print('i = %d' %i + '/ %d' %frn) while (counter != i): #print('counter = %d' %counter) ret, frame = cap.read() counter += 1 #Crop the image to the ROI and zoom for better detection performances x1 = int(args.x1) x2 = int(args.x2) y1 = int(args.y1) y2 = int(args.y2) #print('Crop image to x1 = %d' %x1 + 'x2 = %d' %x2+'y1 = %d' %y1 + 'y2 = %d' %y2) frame_ROI = frame[x1:x2, y1:y2] cv2.imwrite('frame_ROI.png', frame_ROI) scale = 4 #frame_ROI = cv2.resize(frame_ROI, None, fx= scale, fy= scale, interpolation=cv2.INTER_CUBIC) #n_matches.append(Detect_Logo_SIFT_Frame(logo,frame_ROI,Debug, i)) #n_matches.append(Detect_Logo_SURF_Frame(logo,frame_ROI,Debug, i)) #n_matches.append(Detect_Logo_ORB_Frame(logo,frame_ROI,Debug, i)) n_matches.append(Detect_Logo_BRISK_Frame(logo, frame_ROI, Debug, i)) #n_matches.append(Detect_Logo_FREAK_Frame(logo,frame_ROI,Debug, i)) frames.append(int(i / fps)) pl.figure(figsize=(30, 4)) chunckname_wextension = os.path.basename(args.input) chunckname = chunckname_wextension.split('.')[0] if not os.path.isfile('/opt/exe/textocr/demo/Chunks/GroundTruth/' + chunckname + '_Pub_GroundTruth.txt'): logging.warning( 'No ground Truth file found for commercial adds detection') pl.plot(frames, n_matches, 'r') else: GT = np.loadtxt('/opt/exe/textocr/demo/Chunks/GroundTruth/' + chunckname + '_Pub_GroundTruth.txt') GT = GT * (max(n_matches)) print('GT dimension %d' % np.shape(GT)) print('histo_array dimension %d' % np.shape(n_matches)) pl.plot(frames, n_matches, 'r', label='Logo match') #pl.plot(frames, GT, 'g', label='Ground Truth') pl.legend() pl.savefig(os.path.join(outputpath, args.outputname + "_logoMatch.jpg"), dpi=50) pl.show()
}) kde = gaussian_kde(height) #, bw_method=bandwidth / height.std(ddof=1)) heightGrid = np.linspace(0, 300, 100) heightKde = kde.evaluate(heightGrid) pylab.subplot(4, 5, i) pylab.plot(heightGrid, heightKde) axLim = pylab.axis() h = pylab.plot([50, 50], [0, heightKde.max()], 'r') pylab.grid('on') pylab.xlabel('Plant Height (cm)', fontdict={'size': fontSize}) pylab.ylabel('Prob.(height)', fontdict={'size': fontSize}) pylab.title('Height dist. for %s' % (plotKey), fontdict={'size': fontSize}) if i >= plots.__len__(): pylab.legend(h, ['50cm threshold'], loc='upper left', bbox_to_anchor=(1.2, 1), prop={'size': fontSize}) pylab.axis([axLim[0], axLim[1], 0, heightKde.max()]) i += 1 i = 1 pylab.figure() for plotKey, plot in plots.iteritems(): yc = np.array([record['yc'] for record in plot]) height = np.float64([record['height'] for record in plot]) idx = np.argsort(height) ycCumSum = np.cumsum(yc[idx]) pylab.subplot(4, 5, i) pylab.plot(height[idx], ycCumSum) axLim = pylab.axis()
if first_only[i] and fi != 0: x[i] = [] y[i] = [] if labels: lab = labels[fi*len(fields):(fi+1)*len(fields)] else: lab = fields[:] if args.multi: col = colors[:] else: col = colors[fi*len(fields):] fig = plotit(x, y, lab, colors=col) for i in range(0, len(x)): x[i] = [] y[i] = [] if args.output is None: pylab.show() pylab.draw() input('press enter to exit....') else: fname, fext = os.path.splitext(args.output) if fext == '.html': import mpld3 html = mpld3.fig_to_html(fig) f_out = open(args.output, 'w') f_out.write(html) f_out.close() else: pylab.legend(loc=2,prop={'size':8}) pylab.savefig(args.output, bbox_inches='tight', dpi=200)
def multi_way(A=64): pylab.ion() pylab.figure(2, figsize=figsize) pylab.clf() qvec = [0.001, 0.01, 0.1] cvec = ['b', 'c', 'g'] m = np.logspace(0, 5) pylab.loglog(m, m, 'k:', linewidth=lw) for (q, c) in zip(qvec, cvec): print q p = 1 - q R = np.floor(np.log(q) / np.log(1 - q)) B = p**np.arange(1, R) D = np.ones(len(B)) B = np.concatenate([B, q * p**np.arange(0, R)]) D = np.concatenate([D, np.arange(R)]) for pow in range(2, 88): B = np.concatenate([B, q**pow * p**np.arange(0, R)]) D = np.concatenate([D, sm.comb(np.arange(R), pow)]) assert len(B) == len(D), 'len(B) != len(D)' if len(B) > 10**8: print pow, 'breaking' break B = np.concatenate(([1], B)) D = np.concatenate(([1], D)) i = B.argsort()[::-1] B = (D[i] * B[i]).cumsum() D = D[i].cumsum() j = np.nonzero((D <= 10**5))[0] #pylab.loglog(np.arange(A, 100001), A*C[np.arange(A-1, 100000)/A]) pylab.loglog(np.concatenate(([1], A * D[j])), np.concatenate(([1], A * B[j])), c, linewidth=lw) pylab.draw() pylab.loglog(np.concatenate(([1], m * A)), np.concatenate(([1], np.log2(m + 1) * A)), 'purple', linewidth=lw) pylab.xlabel('Number of cores', fontsize=fs) pylab.ylabel('Expected speedup', fontsize=fs) pylab.title('Expected speedup with %d-way parallelism' % A, fontsize=fs) pylab.legend(['$E[S_J] = J$'] + [('$q = %1.4f' % q).strip('0') + '$' for q in qvec] + ['$q = 0.5$'], loc='upper left', fontsize=fs) pylab.xticks(fontsize=fs) pylab.yticks(fontsize=fs) pylab.axis((1, 10**4, 1, 10**4)) pylab.savefig('../figs/speedup-%d.pdf' % A)
def weak_bounds(): pylab.ion() pylab.figure(1, figsize=figsize) pylab.clf() qvec = [0.001, 0.01, 0.1] m = np.logspace(0, 5) pylab.loglog(m, m, 'k:', linewidth=lw) cvec = ['b', 'c', 'g'] """ for (q, c) in zip(qvec, cvec): K = np.log(q)/np.log(1-q) M = np.linspace(1, K) MM = np.logspace(np.log10(M[-1]), 5) SS = np.zeros(len(MM)) + K SS[MM < K] = MM[MM < K] SS[MM > 2**K] += np.log2(MM[MM > 2**K] - K) pylab.loglog(MM, SS, c + '-', linewidth=1) """ for (q, c) in zip(qvec, cvec): K = np.log(q) / np.log(1 - q) print q, K M = np.linspace(1, K) S = (1 - q - (1 - q)**M) / q MM = np.array(M.tolist() + np.logspace(np.log10(M[-1]), 5).tolist()) SS = np.array(S.tolist() + [S[-1]] * 50) SL = SS * 0 + K SL[MM < K] = MM[MM < K] SL[MM > 2**K] += np.log2(MM[MM > 2**K] - K) pylab.fill_between(MM, SL, SS, color=c, alpha=0.2) A = 1 for (q, c) in zip(qvec, cvec): p = 1 - q R = np.log(q) / np.log(1 - q) B = p**np.arange(1, R) D = np.ones(len(B)) B = np.concatenate([B, q * p**np.arange(0, R)]) D = np.concatenate([D, np.arange(R)]) for pow in range(2, 88): B = np.concatenate([B, q**pow * p**np.arange(0, R)]) D = np.concatenate([D, sm.comb(np.arange(R), pow)]) assert len(B) == len(D), 'len(B) != len(D)' if len(B) > 10**8: print pow, 'breaking' break B = np.concatenate(([1], B)) D = np.concatenate(([1], D)) i = B.argsort()[::-1] B = (D[i] * B[i]).cumsum() D = D[i].cumsum() j = np.nonzero((D >= A) & (D <= 10**5))[0] #pylab.loglog(np.arange(A, 100001), A*C[np.arange(A-1, 100000)/A]) pylab.loglog(D[j], A * B[j / A], c, linewidth=lw) pylab.draw() pylab.loglog(m, np.log2(m + 1), 'purple', linewidth=lw) pylab.yscale('log') pylab.xscale('log') #pylab.loglog(MM, SS, c + '-', linewidth=1) pylab.xlabel('Number of cores $(J)$', fontsize=fs) pylab.ylabel('Expected speedup $(E[S_J])$', fontsize=fs) pylab.title('Expected speedup with simple bounds', fontsize=fs) pylab.legend(['$E[S_J] = J$'] + [('$q = %1.4f' % q).strip('0') + '$' for q in qvec] + ['$E[S_J] = \log_2 (J+1)$'], loc='upper left', fontsize=fs) pylab.xticks(fontsize=fs) pylab.yticks(fontsize=fs) pylab.axis((1, 10**4, 1, 10**4)) pylab.savefig('../figs/expected-speedup.pdf')
def MassReservoirScatter(self, G): print( 'Plotting the mass in stellar, cold, hot, ejected, ICS reservoirs') seed(2222) plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure w = np.where((G.Type == 0) & (G.Mvir > 1.0) & (G.StellarMass > 0.0))[0] if (len(w) > dilute): w = sample(w, dilute) mvir = np.log10(G.Mvir[w] * 1.0e10) plt.scatter(mvir, np.log10(G.StellarMass[w] * 1.0e10), marker='o', s=0.3, c='k', alpha=0.5, label='Stars') plt.scatter(mvir, np.log10(G.ColdGas[w] * 1.0e10), marker='o', s=0.3, color='blue', alpha=0.5, label='Cold gas') plt.scatter(mvir, np.log10(G.HotGas[w] * 1.0e10), marker='o', s=0.3, color='red', alpha=0.5, label='Hot gas') plt.scatter(mvir, np.log10(G.EjectedMass[w] * 1.0e10), marker='o', s=0.3, color='green', alpha=0.5, label='Ejected gas') plt.scatter(mvir, np.log10(G.IntraClusterStars[w] * 1.0e10), marker='o', s=10, color='yellow', alpha=0.5, label='Intracluster stars') plt.ylabel(r'$\mathrm{stellar,\ cold,\ hot,\ ejected,\ ICS\ mass}$' ) # Set the y... plt.xlabel(r'$\log\ M_{\mathrm{vir}}\ (h^{-1}\ M_{\odot})$' ) # and the x-axis labels plt.axis([10.0, 14.0, 7.5, 12.5]) leg = plt.legend(loc='upper left') leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') plt.text(13.5, 8.0, r'$\mathrm{All}') outputFile = OutputDir + '9.MassReservoirScatter' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def BaryonicMassFunction(self, G): print('Plotting the baryonic mass function') plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure binwidth = 0.1 # mass function histogram bin width # calculate BMF w = np.where(G.StellarMass + G.ColdGas > 0.0)[0] mass = np.log10( (G.StellarMass[w] + G.ColdGas[w]) * 1.0e10 / self.Hubble_h) mi = np.floor(min(mass)) - 2 ma = np.floor(max(mass)) + 2 NB = (ma - mi) / binwidth (counts, binedges) = np.histogram(mass, range=(mi, ma), bins=NB) # Set the x-axis values to be the centre of the bins xaxeshisto = binedges[:-1] + 0.5 * binwidth # Bell et al. 2003 BMF (h=1.0 converted to h=0.73) M = np.arange(7.0, 13.0, 0.01) Mstar = np.log10(5.3 * 1.0e10 / self.Hubble_h / self.Hubble_h) alpha = -1.21 phistar = 0.0108 * self.Hubble_h * self.Hubble_h * self.Hubble_h xval = 10.0**(M - Mstar) yval = np.log(10.) * phistar * xval**(alpha + 1) * np.exp(-xval) if (whichimf == 0): # converted diet Salpeter IMF to Salpeter IMF plt.plot(np.log10(10.0**M / 0.7), yval, 'b-', lw=2.0, label='Bell et al. 2003') # Plot the SMF elif (whichimf == 1): # converted diet Salpeter IMF to Salpeter IMF, then to Chabrier IMF plt.plot(np.log10(10.0**M / 0.7 / 1.8), yval, 'g--', lw=1.5, label='Bell et al. 2003') # Plot the SMF # Overplot the model histograms plt.plot(xaxeshisto, counts / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'k-', label='Model') plt.yscale('log', nonposy='clip') plt.axis([8.0, 12.5, 1.0e-6, 1.0e-1]) # Set the x-axis minor ticks ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1)) plt.ylabel( r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1})$') # Set the y... plt.xlabel(r'$\log_{10}\ M_{\mathrm{bar}}\ (M_{\odot})$' ) # and the x-axis labels leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '2.BaryonicMassFunction' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def GasMassFunction(self, G): print('Plotting the cold gas mass function') plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure binwidth = 0.1 # mass function histogram bin width # calculate all w = np.where(G.ColdGas > 0.0)[0] mass = np.log10(G.ColdGas[w] * 1.0e10 / self.Hubble_h) sSFR = (G.SfrDisk[w] + G.SfrBulge[w]) / (G.StellarMass[w] * 1.0e10 / self.Hubble_h) mi = np.floor(min(mass)) - 2 ma = np.floor(max(mass)) + 2 NB = (ma - mi) / binwidth (counts, binedges) = np.histogram(mass, range=(mi, ma), bins=NB) # Set the x-axis values to be the centre of the bins xaxeshisto = binedges[:-1] + 0.5 * binwidth # additionally calculate red w = np.where(sSFR < 10.0**sSFRcut)[0] massRED = mass[w] (countsRED, binedges) = np.histogram(massRED, range=(mi, ma), bins=NB) # additionally calculate blue w = np.where(sSFR > 10.0**sSFRcut)[0] massBLU = mass[w] (countsBLU, binedges) = np.histogram(massBLU, range=(mi, ma), bins=NB) # Baldry+ 2008 modified data used for the MCMC fitting Zwaan = np.array([[6.933, -0.333], [7.057, -0.490], [7.209, -0.698], [7.365, -0.667], [7.528, -0.823], [7.647, -0.958], [7.809, -0.917], [7.971, -0.948], [8.112, -0.927], [8.263, -0.917], [8.404, -1.062], [8.566, -1.177], [8.707, -1.177], [8.853, -1.312], [9.010, -1.344], [9.161, -1.448], [9.302, -1.604], [9.448, -1.792], [9.599, -2.021], [9.740, -2.406], [9.897, -2.615], [10.053, -3.031], [10.178, -3.677], [10.335, -4.448], [10.492, -5.083]], dtype=np.float32) ObrRaw = np.array([[7.300, -1.104], [7.576, -1.302], [7.847, -1.250], [8.133, -1.240], [8.409, -1.344], [8.691, -1.479], [8.956, -1.792], [9.231, -2.271], [9.507, -3.198], [9.788, -5.062]], dtype=np.float32) ObrCold = np.array([[8.009, -1.042], [8.215, -1.156], [8.409, -0.990], [8.604, -1.156], [8.799, -1.208], [9.020, -1.333], [9.194, -1.385], [9.404, -1.552], [9.599, -1.677], [9.788, -1.812], [9.999, -2.312], [10.172, -2.656], [10.362, -3.500], [10.551, -3.635], [10.740, -5.010]], dtype=np.float32) ObrCold_xval = np.log10(10**(ObrCold[:, 0]) / self.Hubble_h / self.Hubble_h) ObrCold_yval = (10**(ObrCold[:, 1]) * self.Hubble_h * self.Hubble_h * self.Hubble_h) Zwaan_xval = np.log10(10**(Zwaan[:, 0]) / self.Hubble_h / self.Hubble_h) Zwaan_yval = (10**(Zwaan[:, 1]) * self.Hubble_h * self.Hubble_h * self.Hubble_h) ObrRaw_xval = np.log10(10**(ObrRaw[:, 0]) / self.Hubble_h / self.Hubble_h) ObrRaw_yval = (10**(ObrRaw[:, 1]) * self.Hubble_h * self.Hubble_h * self.Hubble_h) plt.plot(ObrCold_xval, ObrCold_yval, color='black', lw=7, alpha=0.25, label='Obr. \& Raw. 2009 (Cold Gas)') plt.plot(Zwaan_xval, Zwaan_yval, color='cyan', lw=7, alpha=0.25, label='Zwaan et al. 2005 (HI)') plt.plot(ObrRaw_xval, ObrRaw_yval, color='magenta', lw=7, alpha=0.25, label='Obr. \& Raw. 2009 (H2)') # Overplot the model histograms plt.plot(xaxeshisto, counts / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'k-', label='Model - Cold Gas') plt.yscale('log', nonposy='clip') plt.axis([8.0, 11.5, 1.0e-6, 1.0e-1]) # Set the x-axis minor ticks ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1)) plt.ylabel( r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1})$') # Set the y... plt.xlabel(r'$\log_{10} M_{\mathrm{X}}\ (M_{\odot})$' ) # and the x-axis labels leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '3.GasMassFunction' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def VelocityDistribution(self, G): print('Plotting the velocity distribution of all galaxies') seed(2222) mi = -40.0 ma = 40.0 binwidth = 0.5 NB = (ma - mi) / binwidth # set up figure plt.figure() ax = plt.subplot(111) pos_x = G.Pos[:, 0] / self.Hubble_h pos_y = G.Pos[:, 1] / self.Hubble_h pos_z = G.Pos[:, 2] / self.Hubble_h vel_x = G.Vel[:, 0] vel_y = G.Vel[:, 1] vel_z = G.Vel[:, 2] dist_los = np.sqrt(pos_x * pos_x + pos_y * pos_y + pos_z * pos_z) vel_los = (pos_x / dist_los) * vel_x + (pos_y / dist_los) * vel_y + ( pos_z / dist_los) * vel_z dist_red = dist_los + vel_los / (self.Hubble_h * 100.0) tot_gals = len(pos_x) (counts, binedges) = np.histogram(vel_los / (self.Hubble_h * 100.0), range=(mi, ma), bins=NB) xaxeshisto = binedges[:-1] + 0.5 * binwidth plt.plot(xaxeshisto, counts / binwidth / tot_gals, 'k-', label='los-velocity') (counts, binedges) = np.histogram(vel_x / (self.Hubble_h * 100.0), range=(mi, ma), bins=NB) xaxeshisto = binedges[:-1] + 0.5 * binwidth plt.plot(xaxeshisto, counts / binwidth / tot_gals, 'r-', label='x-velocity') (counts, binedges) = np.histogram(vel_y / (self.Hubble_h * 100.0), range=(mi, ma), bins=NB) xaxeshisto = binedges[:-1] + 0.5 * binwidth plt.plot(xaxeshisto, counts / binwidth / tot_gals, 'g-', label='y-velocity') (counts, binedges) = np.histogram(vel_z / (self.Hubble_h * 100.0), range=(mi, ma), bins=NB) xaxeshisto = binedges[:-1] + 0.5 * binwidth plt.plot(xaxeshisto, counts / binwidth / tot_gals, 'b-', label='z-velocity') plt.yscale('log', nonposy='clip') plt.axis([mi, ma, 1e-5, 0.5]) # plt.axis([mi, ma, 0, 0.13]) plt.ylabel(r'$\mathrm{Box\ Normalised\ Count}$') # Set the y... plt.xlabel(r'$\mathrm{Velocity / H}_{0}$') # and the x-axis labels leg = plt.legend(loc='upper left', numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '11.VelocityDistribution' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
def StellarMassFunction(self, G): print('Plotting the stellar mass function') plt.figure() # New figure ax = plt.subplot(111) # 1 plot on the figure binwidth = 0.1 # mass function histogram bin width # calculate all w = np.where(G.StellarMass > 0.0)[0] mass = np.log10(G.StellarMass[w] * 1.0e10 / self.Hubble_h) sSFR = (G.SfrDisk[w] + G.SfrBulge[w]) / (G.StellarMass[w] * 1.0e10 / self.Hubble_h) mi = np.floor(min(mass)) - 2 ma = np.floor(max(mass)) + 2 NB = (ma - mi) / binwidth (counts, binedges) = np.histogram(mass, range=(mi, ma), bins=NB) # Set the x-axis values to be the centre of the bins xaxeshisto = binedges[:-1] + 0.5 * binwidth # additionally calculate red w = np.where(sSFR < 10.0**sSFRcut)[0] massRED = mass[w] (countsRED, binedges) = np.histogram(massRED, range=(mi, ma), bins=NB) # additionally calculate blue w = np.where(sSFR > 10.0**sSFRcut)[0] massBLU = mass[w] (countsBLU, binedges) = np.histogram(massBLU, range=(mi, ma), bins=NB) # Baldry+ 2008 modified data used for the MCMC fitting Baldry = np.array([ [7.05, 1.3531e-01, 6.0741e-02], [7.15, 1.3474e-01, 6.0109e-02], [7.25, 2.0971e-01, 7.7965e-02], [7.35, 1.7161e-01, 3.1841e-02], [7.45, 2.1648e-01, 5.7832e-02], [7.55, 2.1645e-01, 3.9988e-02], [7.65, 2.0837e-01, 4.8713e-02], [7.75, 2.0402e-01, 7.0061e-02], [7.85, 1.5536e-01, 3.9182e-02], [7.95, 1.5232e-01, 2.6824e-02], [8.05, 1.5067e-01, 4.8824e-02], [8.15, 1.3032e-01, 2.1892e-02], [8.25, 1.2545e-01, 3.5526e-02], [8.35, 9.8472e-02, 2.7181e-02], [8.45, 8.7194e-02, 2.8345e-02], [8.55, 7.0758e-02, 2.0808e-02], [8.65, 5.8190e-02, 1.3359e-02], [8.75, 5.6057e-02, 1.3512e-02], [8.85, 5.1380e-02, 1.2815e-02], [8.95, 4.4206e-02, 9.6866e-03], [9.05, 4.1149e-02, 1.0169e-02], [9.15, 3.4959e-02, 6.7898e-03], [9.25, 3.3111e-02, 8.3704e-03], [9.35, 3.0138e-02, 4.7741e-03], [9.45, 2.6692e-02, 5.5029e-03], [9.55, 2.4656e-02, 4.4359e-03], [9.65, 2.2885e-02, 3.7915e-03], [9.75, 2.1849e-02, 3.9812e-03], [9.85, 2.0383e-02, 3.2930e-03], [9.95, 1.9929e-02, 2.9370e-03], [10.05, 1.8865e-02, 2.4624e-03], [10.15, 1.8136e-02, 2.5208e-03], [10.25, 1.7657e-02, 2.4217e-03], [10.35, 1.6616e-02, 2.2784e-03], [10.45, 1.6114e-02, 2.1783e-03], [10.55, 1.4366e-02, 1.8819e-03], [10.65, 1.2588e-02, 1.8249e-03], [10.75, 1.1372e-02, 1.4436e-03], [10.85, 9.1213e-03, 1.5816e-03], [10.95, 6.1125e-03, 9.6735e-04], [11.05, 4.3923e-03, 9.6254e-04], [11.15, 2.5463e-03, 5.0038e-04], [11.25, 1.4298e-03, 4.2816e-04], [11.35, 6.4867e-04, 1.6439e-04], [11.45, 2.8294e-04, 9.9799e-05], [11.55, 1.0617e-04, 4.9085e-05], [11.65, 3.2702e-05, 2.4546e-05], [11.75, 1.2571e-05, 1.2571e-05], [11.85, 8.4589e-06, 8.4589e-06], [11.95, 7.4764e-06, 7.4764e-06], ], dtype=np.float32) # Finally plot the data # plt.errorbar( # Baldry[:, 0], # Baldry[:, 1], # yerr=Baldry[:, 2], # color='g', # linestyle=':', # lw = 1.5, # label='Baldry et al. 2008', # ) Baldry_xval = np.log10(10**Baldry[:, 0] / self.Hubble_h / self.Hubble_h) if (whichimf == 1): Baldry_xval = Baldry_xval - 0.26 # convert back to Chabrier IMF Baldry_yvalU = (Baldry[:, 1] + Baldry[:, 2] ) * self.Hubble_h * self.Hubble_h * self.Hubble_h Baldry_yvalL = (Baldry[:, 1] - Baldry[:, 2] ) * self.Hubble_h * self.Hubble_h * self.Hubble_h plt.fill_between(Baldry_xval, Baldry_yvalU, Baldry_yvalL, facecolor='purple', alpha=0.25, label='Baldry et al. 2008 (z=0.1)') # This next line is just to get the shaded region to appear correctly in the legend plt.plot(xaxeshisto, counts / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, label='Baldry et al. 2008', color='purple', alpha=0.3) # # Cole et al. 2001 SMF (h=1.0 converted to h=0.73) # M = np.arange(7.0, 13.0, 0.01) # Mstar = np.log10(7.07*1.0e10 /self.Hubble_h/self.Hubble_h) # alpha = -1.18 # phistar = 0.009 *self.Hubble_h*self.Hubble_h*self.Hubble_h # xval = 10.0 ** (M-Mstar) # yval = np.log(10.) * phistar * xval ** (alpha+1) * np.exp(-xval) # plt.plot(M, yval, 'g--', lw=1.5, label='Cole et al. 2001') # Plot the SMF # Overplot the model histograms plt.plot(xaxeshisto, counts / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'k-', label='Model - All') plt.plot(xaxeshisto, countsRED / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'r:', lw=2, label='Model - Red') plt.plot(xaxeshisto, countsBLU / self.volume * self.Hubble_h * self.Hubble_h * self.Hubble_h / binwidth, 'b:', lw=2, label='Model - Blue') plt.yscale('log', nonposy='clip') plt.axis([8.0, 12.5, 1.0e-6, 1.0e-1]) # Set the x-axis minor ticks ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1)) plt.ylabel( r'$\phi\ (\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1})$') # Set the y... plt.xlabel(r'$\log_{10} M_{\mathrm{stars}}\ (M_{\odot})$' ) # and the x-axis labels plt.text(12.2, 0.03, whichsimulation, size='large') leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1) leg.draw_frame(False) # Don't want a box frame for t in leg.get_texts(): # Reduce the size of the text t.set_fontsize('medium') outputFile = OutputDir + '1.StellarMassFunction' + OutputFormat plt.savefig(outputFile) # Save the figure print('Saved file to', outputFile) plt.close() # Add this plot to our output list OutputList.append(outputFile)
from pylab import plot, show, legend from random import normalvariate x = [normalvariate(0, 1) for i in range(250)] plot(x, 'b-', label="white noise changed") legend() show()
def threshold(self, x_train, y_train, x_valid, y_valid, plot_graph=True): """ Obtain optimal threshold using FBeta as parameter using a range (0.1, 1.0, 200) for evaluation """ if self.sampling is None: class_weight = self.class_weight elif self.sampling == 'ALLKNN': x_train, y_train = under_sampling(x_train, y_train) class_weight = None else: x_train, y_train = over_sampling(x_train, y_train, model=self.sampling) class_weight = None if isinstance(x_train, pd.DataFrame): x_train = x_train.values if isinstance(y_train, (pd.DataFrame, pd.Series)): y_train = y_train.values if isinstance(x_valid, pd.DataFrame): x_valid = x_valid.values if isinstance(y_valid, (pd.DataFrame, pd.Series)): y_valid = y_valid.values min_sample_leaf = round(x_train.shape[0] * 0.01) min_sample_split = min_sample_leaf * 10 max_features = None file_model = ensemble.ExtraTreesClassifier(criterion='gini', bootstrap=self.bootstrap, min_samples_leaf=min_sample_leaf, min_samples_split=min_sample_split, n_estimators=self.n_estimators, max_depth=self.max_depth, max_features=max_features, oob_score=self.oob_score, random_state=531, verbose=1, class_weight=class_weight, n_jobs=1) cv = StratifiedKFold(n_splits=10, random_state=None) file_model.fit(x_train, y_train) thresholds = np.linspace(0.1, 1.0, 200) scores = [] y_pred_score = cross_val_predict(file_model, x_valid, y_valid, cv=cv, method='predict_proba') y_pred_score = np.delete(y_pred_score, 0, axis=1) for threshold in thresholds: y_hat = (y_pred_score > threshold).astype(int) y_hat = y_hat.tolist() y_hat = [item for sublist in y_hat for item in sublist] scores.append([ recall_score(y_pred=y_hat, y_true=y_valid), precision_score(y_pred=y_hat, y_true=y_valid), fbeta_score(y_pred=y_hat, y_true=y_valid, beta=self.beta, average=self.metric_weight)]) scores = np.array(scores) if plot_graph: plot.plot(thresholds, scores[:, 0], label='$Recall$') plot.plot(thresholds, scores[:, 1], label='$Precision$') plot.plot(thresholds, scores[:, 2], label='$F_2$') plot.ylabel('Score') plot.xlabel('Threshold') plot.legend(loc='best') plot.close() self.final_threshold = thresholds[scores[:, 2].argmax()] print(self.final_threshold) return self.final_threshold
def add_zeroline(current_data): from pylab import plot, legend t = current_data.t legend(('surface', 'topography'), loc='lower left') plot(t, 0 * t, 'k')
timefile = open('gabls4s3.time', 'w') timefile.write('{0:^20s} {1:^20s} \n'.format('t', 'sbot[th]')) for t in range(s3.t.size): timefile.write('{0:1.14E} {1:1.14E} \n'.format(s3.t[t], s3.ths[t])) timefile.close() # Plot pl.close('all') pl.figure() pl.subplot(221) pl.plot(th, z, 'k-', label='mhh') pl.plot(s3.th, s3.z, 'go', mfc='none', label='s3') pl.ylim(0, 1100) pl.xlim(270, 285) pl.legend(frameon=False, loc=2) pl.subplot(222) pl.plot(u, z, 'k-', label='mhh') pl.plot(s3.u, s3.z, 'go', mfc='none', label='s3') pl.plot(ug, z, 'k--', label='mhh') pl.plot(s3.ug, s3.z, 'bo', mfc='none', label='s3') pl.ylim(0, 1100) pl.xlim(0, 10) pl.legend(frameon=False, loc=2) pl.subplot(223) pl.plot(v, z, 'k-', label='mhh') pl.plot(s3.v, s3.z, 'go', mfc='none', label='s3') pl.plot(vg, z, 'k--', label='mhh') pl.plot(s3.vg, s3.z, 'bo', mfc='none', label='s3')
residual3 = delta_tensor_norm(statistic_res, check_tensor) x_values.append(train_percent) y_values1.append(residual1) y_values2.append(residual2) y_values3.append(residual3) train_percent += 0.2 pylab.plot(x_values, y_values1, 'rs', linewidth=1, linestyle="-", label=u"MTT") pylab.plot(x_values, y_values2, 'ks', linewidth=1, linestyle="-", label=u"DTA") pylab.plot(x_values, y_values3, 'gs', linewidth=1, linestyle="-", label=u"Baseline") pylab.xlabel(u"训练集比重") pylab.ylabel(u"平均误差") pylab.title(u"训练集比重与平均误差的关系") pylab.legend(loc='center right') pylab.show()
xx, yy = np.meshgrid(np.linspace(4, 8.5, 200), np.linspace(1.5, 4.5, 200)) print(xx) print(yy) X_grid = np.c_[xx.ravel(), yy.ravel()] zz_lda = lda.predict_proba(X_grid)[:,1].reshape(xx.shape) # zz_qda = qda.predict_proba(X_grid)[:,1].reshape(xx.shape) pl.figure() splot = pl.subplot(1, 2, 1) pl.contourf(xx, yy, zz_lda > 0.5, alpha=0.5) # pl.scatter(X[y==0,0], X[y==0,1], c='b', label=target_names[0]) # pl.scatter(X[y==1,0], X[y==1,1], c='r', label=target_names[1]) # pl.contour(xx, yy, zz_lda, [0.5], linewidths=2., colors='k') # plot_ellipse(splot, lda.means_[0], lda.covariance_, 'b') # plot_ellipse(splot, lda.means_[1], lda.covariance_, 'r') pl.legend() pl.axis('tight') pl.title('Linear Discriminant Analysis') # splot = pl.subplot(1, 2, 2) # pl.contourf(xx, yy, zz_qda > 0.5, alpha=0.5) # pl.scatter(X[y==0,0], X[y==0,1], c='b', label=target_names[0]) # pl.scatter(X[y==1,0], X[y==1,1], c='r', label=target_names[1]) # pl.contour(xx, yy, zz_qda, [0.5], linewidths=2., colors='k') # plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'b') # plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'r') # pl.legend() # pl.axis('tight') # pl.title('Quadratic Discriminant Analysis') pl.show()
for i in range(noIters): trX, trY = shuffle_data(trX, trY) for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX), batch_size)): cost = train3(trX[start:end], trY[start:end]) a3.append(np.mean(np.argmax(teY, axis=1) == predict(teX))) trainCost3.append(cost / (len(trX) // batch_size)) print(a[i]) pylab.figure() pylab.plot(range(noIters), a, label='SGD') pylab.plot(range(noIters), a2, label='SGD with momentum') pylab.plot(range(noIters), a3, label='RMSprop') pylab.xlabel('epochs') pylab.ylabel('test accuracy') pylab.legend(loc='lower right') pylab.title('test accuracy ') pylab.savefig('testAccuracy') pylab.figure() pylab.plot(range(noIters), trainCost, label='SGD') pylab.plot(range(noIters), trainCost2, label='SGD with momentum') pylab.plot(range(noIters), trainCost3, label='RMSprop') pylab.xlabel('epochs') pylab.ylabel('training cost') pylab.legend(loc='upper right') pylab.title('training cost') pylab.savefig('trainingCost') w = w1.get_value('filters learned') pylab.figure()
## Compute event rate print '' ee = pow(10,eu) print 'Energy:',ee,'eV' print 'Apperture (agressive):',seffta,'km2.sr' print 'Apperture (conservative):',sefftc,'km2.sr' print 'Optimal apperture:',sefftopt[0],'km2.sr' pl.figure(2) pl.plot(eu,seffta,label='Agressive') pl.plot(eu,sefftc,label='Conservative') pl.plot(eu,sefftopt,label='Optimal') pl.xlabel('Energy (eV)') pl.ylabel('Apperture (km$^2$.sr)') pl.grid(True) pl.legend(loc='best') pl.title("GRAND CR apperture") #pl.show() seffta = seffta*1e6 #m2 sefftc = sefftc*1e6 #m2 sefftopt = sefftopt*1e6 #m2 dt = 3600*24*ndays #1 day evt1da = np.trapz(seffta*J1*pow(ee,-gamma1)*dt,ee) evt1dc = np.trapz(sefftc*J1*pow(ee,-gamma1)*dt,ee) evt1dopt = np.trapz(sefftopt*J1*pow(ee,-gamma1)*dt,ee) print 'Expected daily event rate in 10^17-10^18eV (agressive):',evt1da print 'Expected daily event rate in 10^17-10^18eV (conservative):',evt1dc print 'Optimal daily event rate in 10^17-10^18eV:',evt1dopt # HE events
bin_start, bin_stop = bin_range[ps_name] std_sim = np.sqrt(cov.diagonal())[bin_start:bin_stop] mean_sim = mean_vec[bin_start:bin_stop] plt.figure(figsize=(12, 8)) plt.errorbar(lb, cb * fac, color="grey", label="input theory") plt.errorbar(lb, mean_sim * fac, std_sim * fac, color="red", fmt=".", label="mean %s %s GHzx %s GHz" % (spec, f0, f1), alpha=0.4) plt.xlabel(r"$\ell$", fontsize=20) plt.ylabel(r"$D_\ell$", fontsize=20) plt.legend() plt.savefig("%s/mean_spectra_%s_%sx%s.png" % (plot_dir, spec, f0, f1)) plt.clf() plt.close() plt.figure(figsize=(12, 8)) plt.semilogy() plt.errorbar(l_planck, std_planck, color="grey", label="std planck") plt.errorbar(l_planck, std_analytic, color="blue", label="std std_analytic") plt.errorbar(lb, std_sim, color="red", fmt=".",
Z = sum(rho[j, j] for j in range(nx + 1)) * dx pi_of_x = [rho[j, j] / Z for j in range(nx + 1)] # graphics ''' y = [j * dtau for j in range(N)] pylab.plot(x, y, 'b-') pylab.xlabel('$x$', fontsize=18) pylab.ylabel('$\\tau$', fontsize=18) pylab.title('Levi harmonic path, N= %i, $\\beta$ = %.0f'%(N, beta)) pylab.xlim(-3.0, 3.0) pylab.savefig('plot_B2_levi_path_beta%s.png' % beta) pylab.show() ''' pylab.hist(data, density=True, bins=200, label='Levi-path') pylab.plot(x, pi_of_x, label='matrix-square') list_x = [0.1 * a for a in range(-30, 31)] list_y = [math.sqrt(math.tanh(beta / 2.0)) / math.sqrt(math.pi) * \ math.exp(-x ** 2 * math.tanh(beta / 2.0)) for x in list_x] #pylab.plot(list_x, list_y, label='analytic') pylab.legend() pylab.xlabel('$x$') pylab.ylabel('$\\pi(x)$ (normalized)') pylab.title( 'metro_levi_free_anharmonic\n(beta=%s, N=%i, n_steps=%i, Ncut=%i)' % (beta, N, n_steps, Ncut)) pylab.xlim(-2, 2) pylab.savefig('plot_C1_metro_levi_free.png') pylab.show()
def exp_fit(crvfile, outname, dump_frequency, Er, Ed, Ebi, recoil_relaxation_time=30000, start_timeoffset=500): ## CRV File - renamed to .crv and manually changed header (crv format) from extracted data file ## PNG output ## timestep used in recoil.in during recoil insertion --> datapoint after each recoil crv = CRV.CRV(crvfile)[0] #extract mech. stress tensor in x or y pyy = crv['pyy'] x = crv['step'] n_atoms = crv['atoms'][0] #calc timeaxis out of pxx or pyy size #x = np.arange(0,len(pyy)*timestep,timestep) #number of recoils nrecoils = [] #pressure tensor component pxx = [] dump_factor = int(recoil_relaxation_time / dump_frequency) print dump_factor #reduce timeaxis to recoil axis for i in range(len(x) / dump_factor): nrecoils.append((x[i * dump_factor] - start_timeoffset) / float(recoil_relaxation_time)) pxx.append(abs(pyy[i * dump_factor] / pyy[0])) #number of displacements per target atom ndpa = np.multiply(nrecoils, (Er / (2.5 * Ed * n_atoms))) #fit popt, pcov = opt.curve_fit(lambda ndpa, eta: eta_exp(ndpa, Ebi, eta), ndpa, pxx, p0=2e8) #anotate to add value to plot print 'RIV (exp)= {:.4e}'.format(popt[0]) # npa interpolated ndpa_interp = np.linspace(0, ndpa[-1] * 5, 1000) fig = plt.figure(1, figsize=fsize) plt.xlim(0, ndpa_interp[-1]) #plt.ylim(0.95*pxx[-1] , 1) plt.grid() plt.plot(ndpa, pxx, 'bs', markeredgecolor='blue', markerfacecolor='None', markeredgewidth=mew, markersize=ms, label='MD Simulation') plt.plot(ndpa_interp, eta_exp(ndpa_interp, Ebi, *popt), 'r-', linewidth=lw, label='Fit') plt.xlabel('Number of displacements per atom') plt.ylabel(r'$ \frac{\sigma}{\vert \sigma_0 \vert} $') legtitle = r'$ \eta_{ri} = $' + '{:.4e}'.format( popt[0] ) + ' $ Pa \cdot dpa $' + '\n' + r'$ E_{bi} = $' + '{:.3e}'.format( Ebi) + ' $ Pa $' + '\n' + r'$ E_D = $' + '{:.1e}'.format( Ed) + ' $ \mathrm{eV} $' + '\n' + r'$ E_R = $' + '{:.1e}'.format( Er) + ' $ \mathrm{eV} $' plt.legend(loc='best', shadow=False, title=legtitle, prop={'size': legpropsize}, numpoints=1) #every other tick label for label in plt.gca().xaxis.get_ticklabels()[::2]: label.set_visible(False) #plt.show() fig.savefig(outname) print "Png file written to " + outname plt.close("all") return popt[0]
text_file = open("training_time.txt", "w") text_file.write("--- Run Time =" + str(((time.time() - start_time))) + " seconds ---" + "\n" + "--- Run Time = " + str(((time.time() - start_time) / 60.0)) + " minutes ---" + "\n") print(history.history.keys()) plt.plot(history.history['loss']) plt.xlabel('epoch', fontsize=16) plt.ylabel('loss', fontsize=16) plt.savefig("./resulted_plotes/train_loss.jpg") plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['train', 'validation'], loc='upper right', fontsize='large') plt.ylabel('loss', fontsize=16) plt.xlabel('epoch', fontsize=16) plt.savefig("./resulted_plotes/all_loss.jpg") plt.show() plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.xlabel('epoch', fontsize=16) plt.ylabel('accuracy', fontsize=16) plt.legend(['train', 'validation'], loc='lower right', fontsize='large') plt.savefig("./resulted_plotes/_accuracy.jpg") plt.show() plt.plot(history.history['lr'])
gin = nest.Create('spike_generator', params = {'spike_times': np.array([15.0, 25.0, 55.0])}) nest.Connect(gex, n, params={'weight': 40.0}) # excitatory nest.Connect(gin, n, params={'weight': -20.0}) # inhibitory nest.Connect(m, n) # simulate nest.Simulate(100) # obtain and display data events = nest.GetStatus(m)[0]['events'] t = events['times']; pl.clf() pl.subplot(211) pl.plot(t, events['V_m']) pl.axis([0, 100, -75, -53]) pl.ylabel('Membrane potential [mV]') pl.subplot(212) pl.plot(t, events['g_ex'], t, events['g_in']) pl.axis([0, 100, 0, 45]) pl.xlabel('Time [ms]') pl.ylabel('Synaptic conductance [nS]') pl.legend(('g_exc', 'g_inh')) pl.show()
def lin_fit(crvfile, refcrvfile, outname, dump_frequency, Er, Ed, recoil_relaxation_time=10000, start_timeoffset=500): crv = CRV.CRV(crvfile)[0] eps = crv['lz'] x = crv['step'] pyy = crv['pyy'] n_atoms = crv['atoms'][0] crv1 = CRV.CRV(refcrvfile)[0] eps_ref = crv1['lz'] # eps = np.subtract(eps,eps_ref) print eps nrecoils = [] #pressure tensor component pxx = [] ezz = [] ezz_ref = [] dump_factor = int(recoil_relaxation_time / dump_frequency) pressure_conversion = 1e9 #GPa -> Pa #reduce timeaxis to recoil axis for i in range(len(x) / dump_factor): nrecoils.append((x[i * dump_factor] - start_timeoffset) / float(recoil_relaxation_time)) pxx.append(pyy[i * dump_factor] * pressure_conversion) # ezz.append(((eps[i*dump_factor] - eps[0])/eps[0] - (eps_ref[i*dump_factor] - eps_ref[0])/eps_ref[0])) # ezz.append(abs( (eps[i*dump_factor] -eps_ref[i*dump_factor])/ (eps[0] - eps_ref[0]))) ezz.append(((-eps[dump_factor] + eps[i * dump_factor]))) ezz_ref.append(((-eps_ref[dump_factor] + eps_ref[i * dump_factor]))) del nrecoils[0] del pxx[0] del ezz[0] del ezz_ref[0] #number of displacements per target atom ndpa = np.multiply(nrecoils, (Er / (2.5 * Ed * n_atoms))) fit_start = 0 fit_end = 20 dezz = np.multiply(np.subtract(ezz, ezz_ref), 1. / eps[dump_factor]) #fit1 popt1, pcov1 = opt.curve_fit( lambda ndpa, eta, offset: eta_lin(ndpa, pxx[0], eta, offset), ndpa[fit_start:fit_end], dezz[fit_start:fit_end]) #popt1 = [1,1] #fit2 # popt2, pcov2 = opt.curve_fit( lambda ndpa,eta,offset: eta_lin(ndpa,pxx[0],eta,offset), ndpa[0:fit_start], ezz[0:fit_start]) #popt2 = [1,1] #anotate to add value to plot # print 'RIV (lin)= {:.4e}'.format(popt1[0]) # npa interpolated ndpa_interp = np.linspace(0, ndpa[-1] * 1.5, 1000) fig = plt.figure(1, figsize=fsize) plt.xlim(0, ndpa_interp[-1]) # plt.ylim(ezz[-1]*0.8, ezz[-1]*1.1) plt.grid() plt.plot(ndpa[fit_start:fit_end], dezz[fit_start:fit_end], 'bs', markeredgecolor='blue', markerfacecolor='None', markeredgewidth=mew, markersize=ms, label='MD Simulation') # plt.plot(ndpa, ezz_ref, 'bs', markeredgecolor = 'magenta', markerfacecolor= 'None', markeredgewidth=mew, markersize = ms, label = 'MD Simulation Reference') plt.plot(ndpa_interp, eta_lin(ndpa_interp, pxx[0], *popt1), 'r-', linewidth=lw, label='Fit') # plt.plot(ndpa_interp, eta_lin(ndpa_interp,pxx[0],*popt2), '-', color = 'black', linewidth = lw, label='Fit2') plt.xlabel('Number of displacements per atom') plt.ylabel(r'$ \Delta \varepsilon_{zz} $') # legtitle = r'$ \eta_{ri,1} = $' + '{:.4e}'.format(popt1[0]) + ' $ \mathrm{Pa \cdot dpa} $' + '\n' + r'$ \eta_{ri,2} = $' + '{:.4e}'.format(popt2[0]) + ' $ \mathrm{Pa \cdot dpa} $' + '\n'r'$\sigma_0 = $' + '{:.2e}'.format(abs(pxx[0])) + r'$\,\mathrm{Pa}$' + '\n' + r'$E_D = ' + '{:.1e}'.format(Ed) + 'eV $'+ '\n' + r'$ E_R = $' + '{:.1e}'.format(Er) + ' $ eV $' legtitle = r'$ \eta^\prime = $' + '{:.4e}'.format( popt1[0] ) + ' $ \mathrm{Pa \cdot dpa} $' + '\n' r'$\sigma_0 = $' + '{:.2e}'.format( abs(pxx[0]) ) + r'$\,\mathrm{Pa}$' + '\n' + r'$E_D = ' + '{:.1e}'.format( Ed) + '\mathrm{eV} $' + '\n' + r'$ E_R = $' + '{:.1e}'.format( Er) + ' $ \mathrm{eV} $' plt.legend(loc='best', shadow=False, title=legtitle, prop={'size': legpropsize}, numpoints=1) #every other tick label for label in plt.gca().xaxis.get_ticklabels()[::2]: label.set_visible(False) #plt.show() fig.tight_layout() fig.savefig(outname) print "Png file written to " + outname plt.close("all")
def plotReport( self, summ={} ,fignum=1 ): if not ( summ.has_key('summaryminor') and summ.has_key('summarymajor') and summ.has_key('threshold') and summ['summaryminor'].shape[0]==6 ): print 'Cannot make summary plot. Please check contents of the output dictionary from tclean.' return summ import pylab as pl from numpy import max as amax # 0 : iteration number (within deconvolver, per cycle) # 1 : peak residual # 2 : model flux # 3 : cyclethreshold # 4 : deconvolver id # 5 : subimage id (channel, stokes..) pl.ioff() pl.figure(fignum) pl.clf(); minarr = summ['summaryminor'] if minarr.size==0: casalog.post("Zero iteration: no summary plot is generated.", "WARN") else: iterlist = minarr[0,:] eps=0.0 peakresstart=[] peakresend=[] modfluxstart=[] modfluxend=[] itercountstart=[] itercountend=[] peakresstart.append( minarr[1,:][0] ) modfluxstart.append( minarr[2,:][0] ) itercountstart.append( minarr[0,:][0] + eps ) peakresend.append( minarr[1,:][0] ) modfluxend.append( minarr[2,:][0] ) itercountend.append( minarr[0,:][0] + eps ) for ii in range(0,len(iterlist)-1): if iterlist[ii]==iterlist[ii+1]: peakresend.append( minarr[1,:][ii] ) peakresstart.append( minarr[1,:][ii+1] ) modfluxend.append( minarr[2,:][ii] ) modfluxstart.append( minarr[2,:][ii+1] ) itercountend.append( iterlist[ii]-eps ) itercountstart.append( iterlist[ii]+eps ) peakresend.append( minarr[1,:][len(iterlist)-1] ) modfluxend.append( minarr[2,:][len(iterlist)-1] ) itercountend.append( minarr[0,:][len(iterlist)-1] + eps ) # pl.plot( iterlist , minarr[1,:] , 'r.-' , label='peak residual' , linewidth=1.5, markersize=8.0) # pl.plot( iterlist , minarr[2,:] , 'b.-' , label='model flux' ) # pl.plot( iterlist , minarr[3,:] , 'g--' , label='cycle threshold' ) pl.plot( itercountstart , peakresstart , 'r.--' , label='peak residual (start)') pl.plot( itercountend , peakresend , 'r.-' , label='peak residual (end)',linewidth=2.5) pl.plot( itercountstart , modfluxstart , 'b.--' , label='model flux (start)' ) pl.plot( itercountend , modfluxend , 'b.-' , label='model flux (end)',linewidth=2.5 ) pl.plot( iterlist , minarr[3,:] , 'g--' , label='cycle threshold', linewidth=2.5 ) maxval = amax( minarr[1,:] ) maxval = max( maxval, amax( minarr[2,:] ) ) bcols = ['b','g','r','y','c'] minv=1 niterdone = len(minarr[4,:]) if len(summ['summarymajor'].shape)==1 and summ['summarymajor'].shape[0]>0 : pl.vlines(summ['summarymajor'],0,maxval, label='major cycles', linewidth=2.0) pl.hlines( summ['threshold'], 0, summ['iterdone'] , linestyle='dashed' ,label='threshold') pl.xlabel( 'Iteration Count' ) pl.ylabel( 'Peak Residual (red), Model Flux (blue)' ) ax = pl.axes() box = ax.get_position() ax.set_position([box.x0, box.y0, box.width, box.height*0.8]) pl.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), ncol=3, fancybox=True, shadow=True) pl.savefig('summaryplot_'+str(fignum)+'.png') pl.ion() return summ;
markersize=10, alpha=0.7, label=rf"l{l}_m{m}") increment += 1 pylab.semilogy((levels[0], levels[-1]), theoretical_velocity_convergence[1, -1, :], "k--", label=r"$\mathcal{O} (\Delta x^{4.0}$)") pylab.xlabel("Level of Refinement") pylab.xlim(levels[0] - 0.5, levels[-1] + 0.5) pylab.xticks(levels) pylab.ylabel( r"Error, $|| \mathbf{u} - \mathbf{u}^{*} ||_{2} \; / \; || \mathbf{u}^{*} ||_{2}$" ) pylab.grid() pylab.legend(numpoints=1) pylab.savefig("ZS_Velocity_Convergence.png", bbox_inches="tight") pylab.savefig("ZS_Velocity_Convergence.pdf", bbox_inches="tight") pylab.close() # Now pressure: increment = 0 for lindex, l in enumerate(ls): ms = numpy.array([l / 2, l], dtype=int) for mindex, m in enumerate(ms): pylab.semilogy(levels, errors_p[lindex, mindex, :], ls="none", marker=symbols[increment], color=colours[increment], markersize=10,
def test_prox_budget_heuristic(self, w=None, verbose=0): # Approx prox update we should have |active(groups)| <= budget. In the # case of overlapping groups, we penalize some groups too many times so # the bound will be looser. # What can we say about the prox operator? We can test that the prox # operator shrinks parameters to be under the group budget (even though # the constraint is tight if there are nonoverlapping groups and loose # otherwise). #groups = [[i] for i in range(self.H)] # reduces to ordinary L1 penalty groups = self.group_structure() coverage = {k for g in groups for k in g} assert len(coverage) == len(self.C) # all features must appear in some group. if verbose: print('[prox]', groups) if w is None: w_orig = np.random.uniform(-1, 1, size=self.H) else: print('H =', self.H, '|raw contexts| =', len(w)) w_orig = np.zeros(self.H) for k in w: w_orig[self.context_feature_id(k)] = w[k] del w print((w_orig != 0).sum(), 'active features') print(sum(np.abs(w_orig[list(G)]).sum() > 0 for G in groups), 'active groups') print(len(groups), 'total groups') dense = OnlineProx(groups, self.H, C=0, L=2, eta=1.0, fudge=1) dense.w[:] = w_orig.copy() def L0(threshold): dense.w[:] = w_orig.copy() dense.prox_threshold(threshold) return self.L0_group_norm_proxy(dense) # Check that the find_threshold gives a conservative estimate for L0 budget f = {} est = {} M = len(groups) #sum(len(g) for g in groups) for budget in range(M+1): dense.w[:] = w_orig.copy() est[budget] = dense.find_threshold(budget) f[budget] = L0(est[budget]) assert f[budget] <= budget, [budget, f[budget], est[budget]] # check that L0 on this group structure is what we wanted assert self.ideal_runtime(dense.w) == f[budget] # Check end points # # The maximum number of active groups. Is upper bounded by the number of # groups with a nonzero norm, which might <<= tne number of groups. max_active = sum(np.abs(w_orig[list(G)]).sum() > 0 for G in groups) #print('max_active:', max_active, 'number of groups:', len(groups)) assert f[0] == 0 assert f[max_active] == max_active assert f[M] == max_active # Check coverage against a numerical sweep. numerical_x = np.linspace(0, M+1, 10000) numerical_y = np.array([L0(threshold) for threshold in numerical_x]) heuristic_x = np.array(sorted(est.values())) heuristic_y = np.array([L0(threshold) for threshold in heuristic_x]) if 0: pl.title('threshold vs L0 coverage') keep = numerical_y > 0 pl.plot(numerical_x[keep], numerical_y[keep], c='b', alpha=0.5, lw=2, label='numerical') pl.plot(heuristic_x, heuristic_y, alpha=0.5, c='r', lw=2, label='heuristic') pl.scatter(heuristic_x, heuristic_y, lw=0) pl.legend(loc='best') pl.show() numerical_points = list(sorted(set(numerical_y))) heuristic_points = list(sorted(set(heuristic_y))) # How many operating points (budgets) do we miss that the numerical # method achieves? # # Note that we don't expect perfect coverage because the heuristic # pretends that groups don't overlap. # # ^^ We appear to be getting great coverage. Should we revise this # statement? print(numerical_points) print(heuristic_points) recall = len(set(numerical_points) & set(heuristic_points)) / len(set(numerical_points)) print('recall: %.2f' % recall) assert recall >= 0.99, recall if 0: pl.title('Ability to conservatively meet the budget') xs, ys = list(zip(*sorted(f.items()))) pl.plot(xs, xs, c='k', alpha=0.5, linestyle=':') pl.plot(xs, ys, alpha=0.5, c='r', lw=2) pl.scatter(xs, ys, lw=0) pl.show() print('[test budget]', colors.light.green % 'pass')
# Plots the drag coefficient against the particle Reynolds number. from math import sqrt, pi from numpy import * import pylab from fluidity_tools import stat_parser C_wen_yu = zeros(200000) C_stokes = zeros(200000) particle_Re = arange(0.001, 1000, 0.005) for i in range(0, len(particle_Re)): # Drag coefficients for the Wen & Yu and Stokes drag correlations respectively. C_wen_yu[i] = (24.0 / particle_Re[i]) * (1.0 + 0.15 * particle_Re[i]**0.687) C_stokes[i] = (24.0 / particle_Re[i]) s = stat_parser("./mphase_wen_yu_drag_correlation.stat") numerical_particle_Re_wen_yu = s["Tephra"]["ParticleReynoldsNumber"]["max"][-1] numerical_C_wen_yu = s["Tephra"]["DragCoefficient"]["max"][-1] pylab.loglog(particle_Re, C_stokes, "-r", label="Stokes drag correlation") pylab.loglog(particle_Re, C_wen_yu, "-g", label="Wen & Yu drag correlation") pylab.loglog(numerical_particle_Re_wen_yu, numerical_C_wen_yu, "*b", label="Numerical result") pylab.legend(loc=1) pylab.xlabel("ParticleReynoldsNumber") pylab.ylabel("DragCoefficient") pylab.show()