def view_simple( self, stats, thetas ): # plotting params nbins = 20 alpha = 0.5 label_size = 8 linewidth = 3 linecolor = "r" # extract from states #thetas = states_object.get_thetas()[burnin:,:] #stats = states_object.get_statistics()[burnin:,:] #nsims = states_object.get_sim_calls()[burnin:] # plot sample distribution of thetas, add vertical line for true theta, theta_star f = pp.figure() sp = f.add_subplot(111) pp.plot( self.fine_theta_range, self.posterior, linecolor+"-", lw = 1) ax = pp.axis() pp.hist( thetas, self.nbins_coarse, range=self.range,normed = True, alpha = alpha ) pp.fill_between( self.fine_theta_range, self.posterior, color="m", alpha=0.5) pp.plot( self.posterior_bars_range, self.posterior_bars, 'ro') pp.vlines( thetas.mean(), ax[2], ax[3], color="b", linewidths=linewidth) #pp.vlines( self.theta_star, ax[2], ax[3], color=linecolor, linewidths=linewidth ) pp.vlines( self.posterior_mode, ax[2], ax[3], color=linecolor, linewidths=linewidth ) pp.xlabel( "theta" ) pp.ylabel( "P(theta)" ) pp.axis([self.range[0],self.range[1],ax[2],ax[3]]) set_label_fonsize( sp, label_size ) pp.show()
def shade_bands(edges, y_range=[-1e5,1e5],cmap='prism', **kwargs): ''' Shades frequency bands. when plotting data over a set of frequency bands it is nice to have each band visually seperated from the other. The kwarg `alpha` is useful. Parameters -------------- edges : array-like x-values seperating regions of a given shade y_range : tuple y-values to shade in cmap : str see matplotlib.cm or matplotlib.colormaps for acceptable values \*\* : key word arguments passed to `matplotlib.fill_between` Examples ----------- >>> rf.shade_bands([325,500,750,1100], alpha=.2) ''' cmap = plb.cm.get_cmap(cmap) for k in range(len(edges)-1): plb.fill_between( [edges[k],edges[k+1]], y_range[0], y_range[1], color = cmap(1.0*k/len(edges)), **kwargs)
def plot(self): if not self.plot_state: return pop,best = self.plot_state with self.pylab_interface: import pylab pylab.clf() n,p = pop.shape iternum = numpy.arange(1,n+1) tail = int(0.25*n) pylab.hold(True) c = coordinated_colors(base=(0.4,0.8,0.2)) if p==5: pylab.fill_between(iternum[tail:], pop[tail:,1], pop[tail:,3], color=c['light'], label='_nolegend_') pylab.plot(iternum[tail:],pop[tail:,2], label="80% range", color=c['base']) pylab.plot(iternum[tail:],pop[tail:,0], label="_nolegend_", color=c['base']) else: pylab.plot(iternum,pop, label="population", color=c['base']) pylab.plot(iternum[tail:], best[tail:], label="best", color=c['dark']) pylab.xlabel('iteration number') pylab.ylabel('chisq') pylab.legend() #pylab.gca().set_yscale('log') pylab.hold(False) pylab.draw()
def plotWithVariance(x, y, variance, *args, **kwargs): """ Plot data with variance indicated by shading within one sigma. """ line = pylab.plot(x, y.flatten(), *args, **kwargs)[0] sigma = np.sqrt(variance) pylab.fill_between(x, y - sigma, y + sigma, color=line.get_color(), alpha=0.5)
def demoplot(theta,args): colour=np.array([0,0,1.0]) faded = 1-(1-colour)/2.0 (X,y) = args (n, D) = np.shape(X) xrange = X.max() - X.min() Xtest = np.arange(X.min()-xrange/2,X.max()+xrange/2,(X.max()-X.min())/100) Xtest.shape = (len(Xtest),1) k = kernel2(X,X,theta,wantderiv=False) kstar = [kernel2(X,xs*np.ones((1,1)),theta,wantderiv=False,measnoise=False) for xs in Xtest] kstar = np.squeeze(kstar) kstarstar = [kernel2(xs*np.ones((1,1)),xs*np.ones((1,1)),theta,wantderiv=False,measnoise=False) for xs in Xtest] kstarstar = np.squeeze(kstarstar) L = np.linalg.cholesky(k) invk = np.linalg.solve(L.transpose(),np.linalg.solve(L,np.eye(np.shape(X)[0]))) mean = np.dot(kstar,np.dot(invk,y)) var = kstarstar - np.diag(np.dot(kstar,np.dot(invk,kstar.T))) #var = np.reshape(var,(100,1)) pl.ion() fig = pl.figure() #ax1 = fig.add_subplot(211) #ax2 = fig.add_subplot(212,sharex=ax1,sharey=ax1) pl.plot(Xtest,mean,'-k') #pl.plot(xstar,mean+2*np.sqrt(var),'x-') #pl.plot(xstar,mean-2*np.sqrt(var),'x-') #print np.shape(xstar), np.shape(mean), np.shape(var) pl.fill_between(np.squeeze(Xtest),np.squeeze(mean-2*np.sqrt(var)),np.squeeze(mean+2*np.sqrt(var)),color='0.75') pl.plot(X,y,'ko')
def plotuttdpc(): pylab.figure(1) auttdpc = uttdperc(always) modauttdpc = uttdperc(modalways) for name,pf,c in variables: ivals = map(lambda x : uttdperc(x),pf) imean,istd,imeanpstd,imeanmstd = multimeanstd(ivals) pylab.fill_between(pallthedays, imeanpstd, imeanmstd, facecolor=c, alpha=0.3) for name,pf,c in variables: ivals = map(lambda x : uttdperc(x),pf) imean,istd,imeanpstd,imeanmstd = multimeanstd(ivals) mdiff = numpy.mean(imean) pylab.plot(pallthedays,imean,color=c,label=("Mean (+-1std) UTTDpC of 30 \"%s\" users" % name)) pylab.plot(pallthedays,auttdpc,color='black',label="UTTDpC of \"Always Upgrade\" user") pylab.plot(pallthedays,modauttdpc,color='red',label="UTTDpC of \"Progressive Always Upgrade\" user") print "Last uttd always",auttdpc[-1] print "Last uttd mod always",modauttdpc[-1] pylab.legend(loc="upper left") pylab.xlabel("Date") pylab.ylabel("Uptodate Distance per Component") pylab.title("Uptodate Distance per Component of Users") pylab.ylim([0,1]) saveFigure("q4auttdperc")
def plotchange(): fig = pylab.figure(10) chtalw = chtt(always) chtmodalw= chtt(modalways) for name,pf,c in variables: ivals = map(lambda x : chtt(x),pf) imean,istd,imeanpstd,imeanmstd = multimeanstd(ivals) pylab.fill_between(pallthedays, imeanpstd, imeanmstd, facecolor=c, alpha=0.3) for name,pf,c in variables: ivals = map(lambda x : chtt(x),pf) imean,istd,imeanpstd,imeanmstd = multimeanstd(ivals) mdiff = numpy.mean(imean) pylab.plot(pallthedays,imean,color=c,label=("Mean (+-1std) Total Change of 30 \"%s\" users" % name)) pylab.plot(pallthedays,chtalw, color="black", label="Total Change of \"Always Upgrade\" user") pylab.plot(pallthedays,chtmodalw, color="red", label="Total Change of \"Progressive Always Upgrade\" user") print "Last change always",chtalw[-1] print "Last change mod always",chtmodalw[-1] pylab.legend(loc="upper left") pylab.xlabel("Date") pylab.ylabel("Total Change") pylab.title("Total Change of Users") saveFigure("q4achange")
def plotnew(): fig = pylab.figure(20) for name,pf,c in variables: ivals = map(lambda x : nntt(x),pf) imean,istd,imeanpstd,imeanmstd = multimeanstd(ivals) pylab.fill_between(pallthedays, imeanpstd, imeanmstd, facecolor=c, alpha=0.3) for name,pf,c in variables: ivals = map(lambda x : nntt(x),pf) imean,istd,imeanpstd,imeanmstd = multimeanstd(ivals) mdiff = numpy.mean(imean) pylab.plot(pallthedays,imean,color=c,label=(name +"+-1std ")) nntal = nntt(always) nntmod =nntt(modalways) pylab.plot(pallthedays,nntal, color="black", label="Always Upgrade Mean change") pylab.plot(pallthedays,nntmod, color="red", label="Always Upgrade Mean change") print "Last new always",nntal[-1] print "Last new mod always",nntmod[-1] pylab.legend(loc="upper left") saveFigure("q4anew")
def plot_trend(date_map, filename): x_data = [] y_data = [] for date in sorted(date_map): data = date_map[date] x_data.append(date) y_data.append(data['total']) fig = pylab.figure(figsize=(9, 6), dpi=80, facecolor='#ffffff', edgecolor='#333333') fig.autofmt_xdate() pylab.grid(True) x_vals = pylab.np.array(list(range(len(x_data)))) y_vals = pylab.np.array(y_data) pylab.plot(x_vals, y_vals, color='#555555', alpha=1.00) pylab.fill_between(x_vals, y_vals, 0, where=y_vals>0, color='#5555ff', alpha=.25, interpolate=True) pylab.fill_between(x_vals, y_vals, 0, where=y_vals<0, color='#ff5555', alpha=.25, interpolate=True) pylab.xlim(0, len(x_data) - 1) sep = int(len(x_vals) / 10) ran = list(range(0, len(x_vals), sep)) pylab.xticks([x_vals[i] for i in ran], [x_data[i] for i in ran], rotation=45, ha='right') pylab.ylim(-100, 100) pylab.yticks(range(-100, 101, 25)) pylab.text(len(x_vals) - 2, 87, '/r/Christianity alignment', ha='right', va='center', color="#333333", alpha=.55, fontsize=16) pylab.savefig(filename)
def drawROC(points,zeTitle,zeFilename,visible,show_fig,save_fig=True, special_point=None,special_value=None,special_label=None): AUC=computeAUC(points) import pylab pylab.clf() pylab.grid(color='#aaaaaa', linestyle='-', linewidth=1,alpha=0.5) pylab.plot([x[0] for x in points], [y[1] for y in points], '-', linewidth=3,color="#000088",zorder=3) pylab.fill_between([x[0] for x in points], [y[1] for y in points],0,color='0.9') pylab.plot([0.0,1.0], [0.0, 1.0], '-',color="#AAAAAA") pylab.ylim((-0.01,1.01)) pylab.xlim((-0.01,1.01)) pylab.xticks(pylab.arange(0,1.1,.1)) pylab.yticks(pylab.arange(0,1.1,.1)) pylab.grid(True) ax=pylab.gca() r = pylab.Rectangle((0,0), 1, 1, edgecolor='#444444', facecolor='none',zorder=1) ax.add_patch(r) [spine.set_visible(False) for spine in ax.spines.values()] if len(points)<10: for i in range(1,len(points)-1): pylab.plot(points[i][0],points[i][1],'o',color="#000066",zorder=6) pylab.xlabel('False positive rate') pylab.ylabel('True positive rate') if special_point is not None: pylab.plot(special_point[0],special_point[1],'o',color="#DD9999",zorder=6) if special_value is not None: pylab.text(special_point[0]+0.01,special_point[1]-0.01, special_value, {'color' : '#DD5555', 'fontsize' : 10}, horizontalalignment = 'left', verticalalignment = 'top', rotation = 0, clip_on = False) if special_label is not None: if special_label!="": labels=[special_label] colors=['#DD9999'] circles=[pylab.Circle((0, 0), 1, fc=colors[0])] legend_location = 'lower right' pylab.legend(circles, labels, loc=legend_location) pylab.text(0.5, 0.3,'AUC=%f'%AUC, horizontalalignment='center', verticalalignment='center', fontsize=18) pylab.title(zeTitle) if save_fig: pylab.savefig(zeFilename,dpi=300) print("\n result in "+zeFilename) if show_fig: pylab.show()
def sun(): dur = 1000 cad = 29.4 / 60.0 / 24.0 X = numpy.genfromtxt('%ssun/sun_composite_tsi_20130930.txt' % root).T date = X[0] t = X[1] irr = X[2] l = (irr > 0) * (t > 5000) t = t[l] - t[l].min() date_ref = date[l][0] irr = irr[l] / irr[l].max() pylab.figure(1) pylab.clf() pylab.plot(t, irr, 'k-') col = ['r','g','b','y','m'] tstart = [1000, 2100, 2600, 3800, 5000] for i in numpy.arange(5): time = numpy.r_[tstart[i]:tstart[i]+dur:cad] if i == 0: y1 = numpy.zeros(len(time)) + 1 y2 = y1 - 0.0045 g = scipy.interpolate.interp1d(t, irr, bounds_error = False) dF = filter.boxcare(g(time), 10, fill = True) pylab.plot(time, dF, c = col[i]) pylab.fill_between(time, y1, y2, color = col[i], alpha = 0.3) X = numpy.zeros((2,len(time))) X[0,:] = time - time[0] X[1,:] = dF numpy.savetxt('%ssun/sun_lightcurve_%02d.txt' % (root, i), X.T) pylab.xlim(t.min(), t.max()) pylab.ylim(0.9955,1) pylab.xlabel('Days since %d' % date_ref) pylab.ylabel('Flux decrement') pylab.savefig('%ssun/sun_lightcurves.png' % root) return
def sampleplot_K(r,ylim=None,HDI_y=None): from pylab import plot,fill_between,gca,text x,y=histogram(r,plot=False) plot(x,y,'-o') fill_between(x,y,facecolor='blue', alpha=0.2) if ylim: gca().set_ylim(ylim) dx=x[1]-x[0] cs=np.cumsum(y)*dx HDI=np.percentile(r,[2.5,50,97.5]) yl=gca().get_ylim() dy=0.05*yl[1] if HDI_y is None: HDI_y=yl[1]*.1 text((HDI[0]+HDI[2])/2, HDI_y+dy,'95% HDI', ha='center', va='center',fontsize=12) plot(HDI,[HDI_y,HDI_y,HDI_y],'k.-',linewidth=1) for v in HDI: text(v, HDI_y-dy,'%.3f' % v, ha='center', va='center', fontsize=12) xl=gca().get_xlim() text(.05*(xl[1]-xl[0])+xl[0], 0.9*yl[1],r'$\tilde{x}=%.3f$' % np.median(r), ha='left', va='center')
def GP_plotpred(xpred, x, y, cov_par, cov_func = None, cov_typ = 'SE', MF = None, MF_par = None, MF_args = None, MF_args_pred = None, \ WhiteNoise = False, plot_color = None): ''' Wrapper for GP_predict that takes care of merging the covariance and mean function parameters, and (optionally) plots the predictive distribution (as well as returning it) ''' if MF != None: merged_par = scipy.append(cov_par, MF_par) n_MF_par = len(MF_par) else: merged_par = cov_par[:] n_MF_par = 0 fpred, fpred_err = GP_predict(merged_par, xpred, x, y, \ cov_func = cov_func, cov_typ = cov_typ, \ MF = MF, n_MF_par = n_MF_par, \ MF_args = MF_args, MF_args_pred = MF_args_pred, \ WhiteNoise = WhiteNoise) xpl = scipy.array(xpred[:,0]).flatten() if plot_color != None: pylab.fill_between(xpl, fpred + 2 * fpred_err, fpred - 2 * fpred_err, \ color = plot_color, alpha = 0.1) pylab.fill_between(xpl, fpred + fpred_err, fpred - fpred_err, \ color = plot_color, alpha = 0.1) pylab.plot(xpl, fpred, '-', color = plot_color) return fpred, fpred_err
def Draw(func1, func2): # генирация точек графика xlist = mlab.frange(a, b, 0.01) ylist = [func1(x) for x in xlist] ylist2 = [func2(x) for x in xlist] # Генирирум ось y0 = [0 for x in xlist] pylab.plot(xlist, ylist) #pylab.plot(xlist, y0, label='line1', color='blue') pylab.plot(xlist, ylist2, label='$sin(x)/x)$', color='red') pylab.legend() # Включаем рисование сетки pylab.grid(True) pylab.fill_between(xlist, ylist, ylist2, color='green', alpha=0.25) # если мало разбиений, то переопереляем сетку под шаг if ((round((b - a) / h)) < 25): pylab.xticks([a + i * h for i in range(round((b - a) / h) + 1)]) # рисуем корни, промерка того что корень не содержит ошибок for i in range(1, len(table)): if (table[i][4] != ':-('): pylab.scatter(table[i][3], table[i][4]) # Рисуем фогрму с графиком pylab.show()
def visualize(generation_list): '''Generate pretty pictures using pylab and pygame''' best = [] average = [] stddev = [] average_plus_stddev = [] average_minus_stddev = [] for pop in generation_list: best += [most_fit(pop).fitness] average += [avg_fitness(pop)] stddev += [fitness_stddev(pop)] average_plus_stddev += [average[-1] + stddev[-1]] average_minus_stddev += [average[-1] - stddev[-1]] pylab.figure(1) pylab.fill_between(range(len(generation_list)), average_plus_stddev, average_minus_stddev, alpha=0.2, color='b', label="Standard deviation") pylab.plot(range(len(generation_list)), best, color='r', label='Best') pylab.plot(range(len(generation_list)), average, color='b', label='Average with std.dev.') pylab.title("Fitness plot - Beer-cog") pylab.xlabel("Generation") pylab.ylabel("Fitness") pylab.legend(loc="upper left") pylab.savefig("mincog_fitness.png") best_index = best.index(max(best)) best_individual = most_fit(generation_list[-1]) with open('last.txt','w') as f: f.write(str(best_individual.gtype)) print best_individual.gtype game = min_cog_game.Game() game.play(best_individual.ptype, True)
def addqqplotinfo(qnull,M,xl='-log10(P) observed',yl='-log10(P) expected',xlim=None,ylim=None,alphalevel=0.05,legendlist=None,fixaxes=False): distr='log10' pl.plot([0,qnull.max()], [0,qnull.max()],'k') pl.ylabel(xl) pl.xlabel(yl) if xlim is not None: pl.xlim(xlim) if ylim is not None: pl.ylim(ylim) if alphalevel is not None: if distr == 'log10': betaUp, betaDown, theoreticalPvals = _qqplot_bar(M=M,alphalevel=alphalevel,distr=distr) lower = -sp.log10(theoreticalPvals-betaDown) upper = -sp.log10(theoreticalPvals+betaUp) pl.fill_between(-sp.log10(theoreticalPvals),lower,upper,color="grey",alpha=0.5) #pl.plot(-sp.log10(theoreticalPvals),lower,'g-.') #pl.plot(-sp.log10(theoreticalPvals),upper,'g-.') if legendlist is not None: leg = pl.legend(legendlist, loc=4, numpoints=1) # set the markersize for the legend for lo in leg.legendHandles: lo.set_markersize(10) if fixaxes: fix_axes()
def show_barlines(page): import pylab for i, barlines in enumerate(page.barlines): sd = page.staves.staff_dist[i] for j, barline_range in enumerate(barlines): barline_x = int(barline_range.mean()) staff_y = page.staves.staff_y(i, barline_x) repeats = page.repeats[i][j] if repeats: # Draw thick bar pylab.fill_between([barline_x - sd/4, barline_x + sd/4], staff_y - sd*2, staff_y + sd*2, color='g') for letter, sign in (('L', -1), ('R', +1)): if letter in repeats: # Draw thin bar bar_x = barline_x + sign * sd/2 pylab.plot([bar_x, bar_x], [staff_y - sd*2, staff_y + sd*2], color='g') for y in (-1, +1): circ = pylab.Circle((bar_x + sign*sd/2, staff_y + y*sd/2), sd/4, color='g') pylab.gcf().gca().add_artist(circ) else: pylab.plot([barline_x, barline_x], [staff_y - sd*2, staff_y + sd*2], color='g')
def bootstrap(self, nBoot, nbins = 20): pops = np.zeros((nBoot, nbins)) #medianpop = [[] for i in data.cat] pylab.figure(figsize = (20,14)) for i in xrange(3): pylab.subplot(1,3,i+1) #if i ==0: #pylab.title("Bootstrap on medians", fontsize = 20.) pop = self.angles[(self.categories == i)]# & (self.GFP > 2000)] for index in xrange(nBoot): newpop = np.random.choice(pop, size=len(pop), replace=True) #medianpop[i].append(np.median(newpop)) newhist, binedges = np.histogram(newpop, bins = nbins) pops[index,:] = newhist/1./len(pop) #pylab.hist(medianpop[i], bins = nbins, label = "{2} median {0:.1f}, std {1:.1f}".format(np.median(medianpop[i]), np.std(medianpop[i]), data.cat[i]), color = data.colors[i], alpha =.2, normed = True) meanpop = np.sum(pops, axis = 0)/1./nBoot stdY = np.std(pops, axis = 0) print "width", binedges[1] - binedges[0] pylab.bar(binedges[:-1], meanpop, width = binedges[1] - binedges[0], label = "mean distribution", color = data.colors[i], alpha = 0.6) pylab.fill_between((binedges[:-1]+binedges[1:])/2., meanpop-stdY, meanpop+stdY, alpha = 0.3) pylab.legend() pylab.title(data.cat[i]) pylab.xlabel("Angle(degree)", fontsize = 15) pylab.ylim([-.01, 0.23]) pylab.savefig("/users/biocomp/frose/frose/Graphics/FINALRESULTS-diff-f3/distrib_nBootstrap{0}_bins{1}_GFPsup{2}_{3}.png".format(nBoot, nbins, 'all', randint(0,999)))
def plotchange(): fig = pylab.figure(10) for name,pf,c in variables: ivals = map(lambda x : chtt(x),pf) imean,istd,imeanpstd,imeanmstd = multimeanstd(ivals) pylab.fill_between(pallthedays, imeanpstd, imeanmstd, facecolor=c, alpha=0.3) for name,pf,c in variables: ivals = map(lambda x : chtt(x),pf) imean,istd,imeanpstd,imeanmstd = multimeanstd(ivals) mdiff = numpy.mean(imean) print "Final change",name,imean[-1] pylab.plot(pallthedays,imean,color=c,label=("Mean (+-1std) Total Change of 30 \"%s\" users" % name)) pylab.legend(loc="upper left") pylab.xlabel("Date") pylab.ylabel("Total Change") pylab.title("Total Change of Users") pylab.ylim([0,1500]) saveFigure("q1cchange") #This graph shows the range over which change can occur whn altering the probability to install. # given the results from this supports the idea that the probability to install corrolates to the amount of packages installed, this is straight forward. #An effect that did not occur, which was hypothesised might, was that over time the amount of installed packages decreases as common dependended packages are installed. #This does not occur, but may be because of the randomness that packages are selected. #It may still occur in reaility, as say a graphics designer will likely install graphics components that will require similar functionality. #Another interesting point from that can be seen in this graphi is the variability created by the soft failures, as discussed previously. # it can be seen that the std increased after a soft failure in install twice a week change curve. uivals = zip(alll,map(lambda x : rempd(x),alll)) for name,ui in uivals: for date, remv in zip(allthedays,ui): if remv >= 100: print date,datetime.date.fromtimestamp(date),name
def plotBkMeasure(bk, ek, vk, figurePath): #print bk #print ek #print vk k = list(range(len(bk))) #for i,j in enumerate(bk): pylab.ioff() pylab.figure() pylab.plot(k, bk, '.', label='Bk') pylab.plot(k, ek, label='E(Bk)') #pylab.plot(k, ek+2*np.sqrt(vk), '-.r', label='limit range') #pylab.plot(k, ek-2*np.sqrt(vk), '-.r') #for i in range(len(ek)): pylab.fill_between(k, ek+4*np.sqrt(vk), ek-4*np.sqrt(vk), facecolor='red', interpolate=True ) # figure setting pylab.xlim(2,k[-1]) pylab.ylim(0,1.0) pylab.legend(loc='upper right') pylab.xlabel('Number of Clusters') pylab.ylabel('Bk') # pylab.title('Bk measure between two algorithm') # show result pylab.savefig(figurePath, format='svg')
def plot_shaded_lines(my_xticks, y1, y2, error1, error2, ylab, xlab, filename): plt.figure(figsize=(6,6)) from matplotlib import rcParams rcParams.update({'figure.autolayout': True}) x = range(0, len(y1)) plt.plot(x, y1, 'k-', color="blue", label='men') plt.fill_between(x, y1-error1, y1+error1, facecolor='blue', alpha=.2) plt.plot(x, y2, 'k-', color="red", label='women') plt.fill_between(x, y2-error2, y2+error2, facecolor='red', alpha=.2) #if isinstance(x, (int, long, float, complex)): # plt.xlim(np.min(x), np.max(x)) plt.gcf().subplots_adjust(bottom=0.3) plt.xticks(x, my_xticks) plt.xticks(rotation=70, fontsize=14) plt.yticks(fontsize=14) #plt.setp(ax.get_xticklabels(), rotation='vertical', fontsize=14) plt.ylabel(ylab, fontsize=14) plt.xlabel(xlab, fontsize=14) plt.legend() plt.savefig(filename)
def plotPhaseTransitionMd(dic): M = dic['M'] S = dic['S'] C = dic['C'] pl.close("all") colors = ['b','g','r','c','m','y'] pl.figure(1,(11,9)) for i,m in enumerate(np.unique(M)): print m if m == 4:#or m == 3: continue index = np.argwhere(M == m) B = binning(S[index],C[index],50,confinter=0.01) pl.fill_between(B['bins'],B['percUp'],B['percDown'],alpha=0.05,color = colors[i]) pl.plot(B['bins'],B['mean'],'x-',label="M = %s"%m,color = colors[i],lw=1) pl.plot(B['bins'],B['percUp'],'-.',color = colors[i]) pl.plot(B['bins'],B['percDown'],'-.',color = colors[i]) pl.plot(S[index],C[index],"o",color = colors[i]) pl.xlabel("Property violation s") pl.ylabel("Cooperation level c") pl.legend(loc=0) return B
def plotTorques(self): print "Plotting torques..." pylab.fill_between(range(len(self.torque_est)), self.torque_sup,\ self.torque_inf, facecolor='blue', alpha=0.3) pylab.plot(self.torque_est, 'b') pylab.plot(self.torque_mes, 'r', alpha=0.1) pylab.plot(self.torque_filt,'r') pylab.show()
def plot(fldname): self.histmoeller(fldname) pl.fill_between(self.jdvec, self.jdvec*0, self.posmean, facecolor='powderblue') pl.fill_between(self.jdvec, self.negmean, self.jdvec*0, facecolor='pink') pl.plot(self.jdvec, self.negmean+self.posmean,lw=1,c='k') pl.gca().xaxis.axis_date()
def plot_gp_pred(sigma, **fillargs): # pdb.set_trace() nugget = (sigma ** 2 / (0.1 + d.astype('float') ** 2)) gp = GaussianProcess(corr='squared_exponential', nugget=nugget) gp.fit(np.atleast_2d(range(n)).T, np.atleast_2d(d).T) x = np.atleast_2d(np.linspace(0, n - 1)).T y_pred, MSE = gp.predict(x, eval_MSE=True) pylab.plot(x, y_pred) pylab.fill_between(x.T[0], y_pred + MSE, y_pred - MSE, **fillargs)
def masterPlot(timeSteps, concentration): ### First make a List which contains all logged values of c logC = [] for c in concentration: logC.append(np.log(c)) ### compute k k, e = np.polyfit(x, logC, 1) ### get the estimates startC = concentration[0] endTime = timeSteps[-1] estTime = [i for i in range(endTime)] estConc = myExp(startC, endTime, k) ### exp = lambda x: startC*np.exp(k*x) AUC, err = integrate.quad(exp, 0, np.inf) text = 'AUC = ' + str(round(AUC/60)) text2 = '$y= %d \cdot e^{-%rt}$' % (startC, round((k*60), 1)) fig = pl.figure() fig.subplots_adjust(bottom=0.025, left=0.025, top = 0.975, right=0.975) pl.subplot(2, 1, 1) pl.scatter(timeSteps, concentration) pl.plot(estTime, estConc, color="green", label=text2) pl.title('Medikament X: AUC und approx. Formel beziehen sich auf Stunden') pl.ylabel('Konzentration c [mg/L]') pl.xlim(0, endTime) pl.grid() pl.legend() #pl.xticks(()) #pl.yticks(()) pl.subplot(2, 2, 3) pl.scatter(timeSteps, concentration) pl.ylabel('Linearisiert (log[c])') pl.grid() pl.xlim(0, endTime) pl.semilogy() pl.xlim(0) #pl.xticks(()) #pl.yticks(()) pl.subplot(2, 2, 4) pl.plot(estTime, estConc, label=text) pl.xlim(0, endTime) pl.ylabel('Regressionskurve') pl.fill_between(estTime, estConc, color='green', alpha=.25) pl.legend() #pl.xticks(()) pl.yticks(()) pl.grid() #pl.subplot(2, 3, 6) #pl.xticks(()) #pl.yticks(()) pl.show()
def plotWithPercentiles(ltraces, color, name=None, whichp=25, plotall=False): m = median(ltraces, axis=1) lp = percentile(ltraces, whichp, axis=1) up = percentile(ltraces, 100-whichp, axis=1) if plotall: for l in ltraces.T: pylab.plot(l, color + '-', alpha=0.3) pylab.plot(m, color + '-', label=name) pylab.fill_between(range(len(m)), lp, up, facecolor=color, alpha=0.1)
def fill_windows( x, y0, y1, fill_xs, fill_c, fill_lab ): for i, xx in enumerate( fill_xs ): y0_clipped = y0 if type( y0 ) is np.ndarray: # if y_bot is a curve x_clipped, y0_clipped, lims = wf.clip_xy( xx, x, y0 ) x_clipped, y1_clipped, lims = wf.clip_xy( xx, x, y1 ) pl.fill_between( x_clipped, y0_clipped, y1_clipped, facecolor=fill_c[0], edgecolor=fill_c[1], label=fill_lab )
def Draw(func1, func2): # генирация точек графиков xlist = mlab.frange(a, b, 0.01) ylist = [func1(x) for x in xlist] ylist2 = [func2(x) for x in xlist] # Генирирум ось y0 = [0 for x in xlist] ############################################################# max1Y = max(ylist) min1Y = min(ylist) max2Y = max(ylist2) min2Y = min(ylist2) minmaxarrayY = [] minmaxarrayX = [] for i in range(len(ylist)): if ((max1Y == ylist[i]) or (min1Y == ylist[i])): minmaxarrayY.append(ylist[i]) minmaxarrayX.append(xlist[i]) for i in range(len(ylist2)): if ((max2Y == ylist2[i]) or (min2Y == ylist2[i])): minmaxarrayY.append(ylist2[i]) minmaxarrayX.append(xlist[i]) ################################################################ extremumX, extremumY = converter(korn, 0, 3, 4, func1) inflectionX, inflectionY = converter(korn1, 0, 3, 4, func1) kornsX, kornsY = converter(table, 1, 3, 4, func1) pylab.plot(extremumX, extremumY, 'go', label='extremum', color='red') pylab.plot(inflectionX, inflectionY, 'go', label='inflection point', color='yellow') pylab.plot(minmaxarrayX, minmaxarrayY, 'go', label='min/max', color='green') pylab.plot(kornsX, kornsY, 'go', label='Korn', color='black') pylab.plot(xlist, ylist, label='$sin(x)/x$') pylab.plot(xlist, y0, color='pink') pylab.plot(xlist, ylist2, label='$0.02*x* x - 4$', color='pink') pylab.legend() # Включаем рисование сетки pylab.grid(True) xlist1 = mlab.frange(float(table2[0][3]), float(table2[len(table2) - 1][3]), 0.01) pylab.fill_between(xlist1, [func1(x) for x in xlist1], [func2(x) for x in xlist1], color='green', alpha=0.25) # если мало разбиений, то переопереляем сетку под шаг if ((round((b - a) / h)) < 25): pylab.xticks([a + i * h for i in range(round((b - a) / h) + 1)]) print() print() print("Минимумы и максимумы:") print("X", "Y", sep="\t") for i in range(len(minmaxarrayY)): print('{:3.5g}'.format(minmaxarrayX[i]), '{:3.5g}'.format(minmaxarrayY[i]), sep='\t\t') # Рисуем фогрму с графиком pylab.show()
def parameter_autocor(sessions, population_fit, param = 'side'): ''' Evaluate within and cross subject variability in specified parameter and autocorrelation across sessions. ''' assert len(population_fit['MAP_fits']) == len(sessions), \ 'Population fit does not match number of sessions.' param_index = population_fit['param_names'].index(param) for i, MAP_fit in enumerate(population_fit['MAP_fits']): sessions[i].side_loading = MAP_fit['params_U'][param_index] sIDs = list(set([s.subject_ID for s in sessions])) p.figure(1) p.clf() p.subplot2grid((2,2),(0,0), colspan = 2) subject_means = [] subject_SDs = [] cor_len = 20 subject_autocorrelations = np.zeros([len(sIDs), 2 * cor_len + 1]) subject_shuffled_autocor = np.zeros([len(sIDs), 2 * cor_len + 1, 1000]) for i, sID in enumerate(sIDs): a_sessions = sorted([s for s in sessions if s.subject_ID == sID], key = lambda s:s.day) sl = [s.side_loading for s in a_sessions] p.plot(sl) subject_means.append(np.mean(sl)) subject_SDs.append(np.std(sl)) sl = (np.array(sl) - np.mean(sl)) / np.std(sl) autocor = np.correlate(sl, sl, 'full') / len(sl) subject_autocorrelations[i,:] = autocor[autocor.size/2 - cor_len: autocor.size/2 + cor_len + 1] for j in range(1000): shuffle(sl) autocor = np.correlate(sl, sl, 'full') / len(sl) subject_shuffled_autocor[i,:,j] = autocor[autocor.size/2 - cor_len: autocor.size/2 + cor_len + 1] mean_shuffled_autocors = np.mean(subject_shuffled_autocor,0) mean_shuffled_autocors.sort(1) p.xlabel('Day') p.ylabel('Subject rotational bias') p.subplot2grid((2,2),(1,0)) p.fill_between(range(-cor_len, cor_len + 1),mean_shuffled_autocors[:,10], mean_shuffled_autocors[:,-10], color = 'k', alpha = 0.2) p.plot(range(-cor_len, cor_len + 1),np.mean(subject_autocorrelations,0),'b.-', markersize = 5) p.xlabel('Lag (days)') p.ylabel('Correlation') p.subplot2grid((2,2),(1,1)) p.bar([0.5,1.5], [np.mean(subject_SDs), np.sqrt(np.var(subject_means))]) p.xticks([1,2], ['Within subject', 'Cross subject']) p.xlim(0.25,2.5) p.ylabel('Standard deviation')
def plot_one(data): kwargs = dict(**data['style']) setdefaults(kwargs,DEFAULTS) mass,limit = get_mass_limit(data) plt.fill_between(mass, limit, y2=1.0, edgecolor=COLORS['blue'], facecolor=COLORS['blue'], alpha=kwargs['alpha']) plot_text(data)
def make_plot(x, y, x_tst, y_tst, y_tst_pred, y_tst_samples, lc, uc, textstr): pylab.figure() pylab.scatter(x, y, color='r') pylab.plot(x_tst, y_tst) pylab.plot(x_tst,y_tst_pred) # pylab.plot(x_tst, y_tst_samples) pylab.fill_between(x_tst.squeeze(), lc.squeeze(), uc.squeeze(), color='gray', alpha=0.5) pylab.xlim([-7,7]) pylab.ylim([-250,250]) pylab.text(-6.8, 150., textstr, fontsize=12)
def shadedplot(x, y, fill=True, label='', color='b'): # y[0,:] mean, median etc; in the middle # y[1,:] lower # y[2,:] upper p = plt.plot(x, y[0, :], label=label, color=color) c = p[-1].get_color() #plt.plot(x, y[1,:], color=c, alpha=0.25) #plt.plot(x, y[2,:], color=c, alpha=0.25) if fill: plt.fill_between(x, y[1, :], y[2, :], color=c, alpha=0.25)
def plot_eb_mode_test_residuals(self, YLIM=[-5, 30], add_title='toto'): print("eb mode plot test residuals") plt.figure(figsize=(12, 8)) plt.subplots_adjust(bottom=0.12, top=0.88, right=0.99) mean_e, std_e = mean_check_finite(self.e_mode_residuals) mean_b, std_b = mean_check_finite(self.b_mode_residuals) Filtre0 = ((np.exp(self.logr[0]) > 6e-3) & (np.exp(self.logr[0]) < 1e-2)) Title = '$\\xi_E(0) = (%.0f \pm %.0f)$ mas$^2$ | $\\xi_B(0) = (%.0f \pm %.0f)$ mas$^2$' % ( (np.mean(mean_e[Filtre0]), np.mean(std_e[Filtre0]), np.mean(mean_b[Filtre0]), np.mean(std_b[Filtre0]))) plt.plot(np.exp(self.logr[0]) * 60., mean_e, 'b', lw=3, label='mean E-mode (test, after GP)') plt.fill_between(np.exp(self.logr[0]) * 60., mean_e - std_e, mean_e + std_e, color='b', alpha=0.4, label='$\pm 1 \sigma$ E-mode') plt.plot(np.exp(self.logr[0]) * 60., mean_b, 'r', lw=3, label='mean B-mode (test, after GP)') plt.fill_between(np.exp(self.logr[0]) * 60., mean_b - std_b, mean_b + std_b, color='r', alpha=0.4, label='$\pm 1 \sigma$ B-mode') plt.plot(np.exp(self.logr[0]) * 60., np.zeros_like(self.logr[0]), 'k--', lw=3) plt.ylim(YLIM[0], YLIM[1]) plt.xlim(0.005 * 60., 1.5 * 60.) plt.xscale('log') plt.xticks(size=20) plt.yticks(size=20) plt.xlabel('$\Delta \\theta$ (arcmin)', fontsize=24) plt.ylabel('$\\xi_{E/B}$ (mas$^2$)', fontsize=24) plt.legend(fontsize=20) if add_title is not None: plt.title(add_title + '\n' + Title, fontsize=24) else: plt.title(Title, fontsize=16)
def phase__1(): """Constant phase lag, no binning, no noise, RED NOISE LEAK TEST: phases from segments without noise, i.e with leak SUMMARY: 1. taper helps alot! both in the scatter and bias """ n = 2**12 dt = 1.0 mu = 100.0 lag = 0.5 nsim = 200 sim = az.SimLC(seed=463284) sim.add_model('powerlaw', [1e-2, -2.]) sim.add_model('constant', lag, lag=True) lag = [] for isim in range(nsim): az.misc.print_progress(isim, nsim, isim == nsim - 1) sim.simulate(n * 4, dt, mu, norm='var') sim.apply_lag(phase=True) l1 = az.LCurve.calculate_lag(sim.y[:n], sim.x[:n], dt, phase=True) l2 = az.LCurve.calculate_lag(sim.y[:n], sim.x[:n], dt, phase=True, taper=True) lag.append([l1, l2]) lag = np.array(lag) fq = lag[0, 0, 0] l = lag[:, :, 1].mean(0) lp = np.percentile(lag[:, :, 1], [50, 16, 100 - 16], 0) plt.rcParams['figure.figsize'] = [8, 4] plt.rcParams['font.size'] = 7 ax = plt.subplot(121) plt.plot(fq, lp[0, 0], color='C0') plt.title('no-taper') plt.fill_between(fq, lp[1, 0], lp[2, 0], alpha=0.5, color='C1') plt.plot(sim.normalized_lag[0, 1:], sim.normalized_lag[1, 1:], color='C2') ax.set_xscale('log') ax = plt.subplot(122) plt.plot(fq, lp[0, 1], color='C0') plt.title('taper') plt.fill_between(fq, lp[1, 1], lp[2, 1], alpha=0.5, color='C1') plt.plot(sim.normalized_lag[0, 1:], sim.normalized_lag[1, 1:], color='C2') ax.set_xscale('log') plt.savefig('png/phase__1.png')
def phase__2(): """Constant phase lag, no binning, NOISE, RED NOISE LEAK TEST: phases from segments (i.e. leak) with noise, SUMMARY: 1. again, taper helps alot! The scatter is very small in intermediate frequencies not affected by noise, unlike in no-tpaer case. """ n = 2**12 dt = 1.0 mu = 100.0 lag = 0.5 nsim = 200 sim = az.SimLC(seed=4634) sim.add_model('powerlaw', [1e-2, -2.]) sim.add_model('constant', lag, lag=True) lag = [] for isim in range(nsim): az.misc.print_progress(isim, nsim, isim == nsim - 1) sim.simulate(n * 4, dt, mu, norm='var') sim.apply_lag(phase=True) scale_fac = 100 x = np.random.poisson(sim.x[:n] * scale_fac) / scale_fac y = np.random.poisson(sim.y[:n] * scale_fac) / scale_fac l1 = az.LCurve.calculate_lag(y, x, dt, phase=True) l2 = az.LCurve.calculate_lag(y, x, dt, phase=True, taper=True) lag.append([l1, l2]) lag = np.array(lag) fq = lag[0, 0, 0] l = lag[:, :, 1].mean(0) lp = np.percentile(lag[:, :, 1], [50, 16, 100 - 16], 0) plt.rcParams['figure.figsize'] = [8, 4] plt.rcParams['font.size'] = 7 ax = plt.subplot(121) plt.plot(fq, lp[0, 0], color='C0') plt.title('no-taper') plt.fill_between(fq, lp[1, 0], lp[2, 0], alpha=0.5, color='C1') plt.plot(sim.normalized_lag[0, 1:], sim.normalized_lag[1, 1:], color='C2') ax.set_xscale('log') ax = plt.subplot(122) plt.plot(fq, lp[0, 1], color='C0') plt.title('taper') plt.fill_between(fq, lp[1, 1], lp[2, 1], alpha=0.5, color='C1') plt.plot(sim.normalized_lag[0, 1:], sim.normalized_lag[1, 1:], color='C2') ax.set_xscale('log') plt.savefig('png/phase__2.png')
def plot_SandI(susceptible, infected, Title): pl.figure(figsize=(5,3)) pl.plot(time, susceptible,label="Susceptible") pl.fill_between(time, susceptible, 0, alpha=0.30) pl.plot(time, infected,label="Infected") pl.fill_between(time, infected, 0, alpha=0.30) pl.ticklabel_format(axis="y", style="sci", scilimits=(0,0)) pl.xlabel('Time in days', fontsize=14) pl.ylabel('Individuals', fontsize=14) pl.title(Title) pl.legend()
def graph_maxmin_fitness(pop, identify, minimize=False, filesave=None): x = [] max_y = [] min_y = [] avg_y = [] for it in pop: x.append(it["generation"]) max_y.append(it["fitMax"]) min_y.append(it["fitMin"]) avg_y.append(it["fitAve"]) pylab.figure() pylab.plot(x, max_y, "g", label="Max fitness") pylab.plot(x, min_y, "r", label="Min fitness") pylab.plot(x, avg_y, "b", label="Avg fitness") pylab.fill_between(x, min_y, max_y, color="g", alpha=0.1, label="Diff max/min") if minimize: raw_max = min(min_y) else: raw_max = max(max_y) if minimize: gen_max = x[min_y.index(raw_max)] else: gen_max = x[max_y.index(raw_max)] if minimize: annot_label = "Minimum (%.2f)" % (raw_max, ) else: annot_label = "Maximum (%.2f)" % (raw_max, ) pylab.annotate( annot_label, xy=(gen_max, raw_max), xycoords='data', xytext=(8, 15), textcoords='offset points', arrowprops=dict(arrowstyle="->", connectionstyle="arc"), ) pylab.xlabel("Generation (#)") pylab.ylabel("Fitness score") pylab.title("Plot of evolution identified by '%s' (fitness scores)" % (identify)) pylab.grid(True) pylab.legend(prop=FontProperties(size="smaller"), loc=0) if filesave: pylab.savefig(filesave) print("Graph saved to %s file !" % (filesave, )) else: pylab.show()
def plot_sausage(X, mean, std, alpha=None, format_fill={ 'alpha': 0.3, 'facecolor': 'k' }, format_line=dict(alpha=1, color='g', lw=3, ls='dashed')): """ plot saussage plot of GP. I.e: .. image:: ../images/sausage.png :height: 8cm **returns:** : [fill_plot, line_plot] The fill and the line of the sausage plot. (i.e. green line and gray fill of the example above) **Parameters:** X : [double] Interval X for which the saussage shall be plottet. mean : [double] The mean of to be plottet. std : [double] Pointwise standard deviation. format_fill : {format} The format of the fill. See http://matplotlib.sourceforge.net/ for details. format_line : {format} The format of the mean line. See http://matplotlib.sourceforge.net/ for details. """ X = X.squeeze() Y1 = (mean + 2 * std) Y2 = (mean - 2 * std) if (alpha is not None): old_alpha_fill = min(1, format_fill['alpha'] * 2) for i, a in enumerate(alpha[:-2]): format_fill['alpha'] = a * old_alpha_fill hf = PL.fill_between(X[i:i + 2], Y1[i:i + 2], Y2[i:i + 2], lw=0, **format_fill) i += 1 hf = PL.fill_between(X[i:], Y1[i:], Y2[i:], lw=0, **format_fill) else: hf = PL.fill_between(X, Y1, Y2, **format_fill) hp = PL.plot(X, mean, **format_line) return [hf, hp]
def plotfill(time, csfr, csfrhi, csfrlo, color, alpha=1.0): plt.fill_between(10**np.append(time, 10.15) / 10**9, np.append(csfr, 0), np.append(csfr + csfrhi, 0), alpha=alpha, color=color) plt.fill_between(10**np.append(time, 10.15) / 10**9, np.append(csfr, 0), np.append(csfr - csfrlo, 0), alpha=alpha, color=color)
def mixing_stats(): pylab.figure(num=2, figsize=(16.5, 11.5)) pylab.suptitle('Mixing') times = [] mixing_stats_lower = [] mixing_stats_mixed = [] mixing_stats_upper = [] stat_files, time_index_end = le_tools.GetstatFiles('./') for sf in stat_files: stat = stat_parser(sf) for i in range( time_index_end[pylab.find(numpy.array(stat_files) == sf)]): times.append(stat['ElapsedTime']['value'][i]) mixing_stats_lower.append(stat['fluid']['Temperature'] ['mixing_bins%cv_normalised'][0][i]) mixing_stats_mixed.append(stat['fluid']['Temperature'] ['mixing_bins%cv_normalised'][1][i]) mixing_stats_upper.append(stat['fluid']['Temperature'] ['mixing_bins%cv_normalised'][2][i]) pylab.plot(times, mixing_stats_lower, label='T < -0.4') pylab.plot(times, mixing_stats_mixed, label='-0.4 < T < 0.4') pylab.plot(times, mixing_stats_upper, label='0.4 < T') time = le_tools.ReadLog('diagnostics/logs/time.log') X_ns = [x - 0.4 for x in le_tools.ReadLog('diagnostics/logs/X_ns.log')] X_fs = [0.4 - x for x in le_tools.ReadLog('diagnostics/logs/X_fs.log')] try: index = pylab.find(numpy.array(X_ns) < 0.4 - 1E-3)[-1] pylab.fill_between([time[index], time[index + 1]], [0, 0], [0.5, 0.5], color='0.3') index = pylab.find(numpy.array(X_fs) < 0.4 - 1E-3)[-1] pylab.fill_between([time[index], time[index + 1]], [0, 0], [0.5, 0.5], color='0.6') except IndexError: print 'not plotting shaded regions on mixing plot as front has not reached end wall' pylab.axis([0, times[-1], 0, 0.5]) pylab.grid("True") pylab.legend(loc=0) pylab.text( times[-1] - (times[-1] / 5), 0.005, 'shaded regions show when \nfronts near the end wall \ndark: free-slip, light: no-slip', bbox=dict(facecolor='white', edgecolor='black')) pylab.xlabel('time (s)') pylab.ylabel('domain fraction') pylab.savefig('diagnostics/plots/mixing.png') return
def show_plot(t_array, th_array): """ Display theta vs t plot. """ th_mean = gv.mean(th_array) th_sdev = gv.sdev(th_array) thp = th_mean + th_sdev thm = th_mean - th_sdev plt.fill_between(t_array, thp, thm, color='0.8') plt.plot(t_array, th_mean, linewidth=0.5) plt.xlabel('$t$') plt.ylabel(r'$\theta(t)$') plt.savefig('pendulum.pdf', bbox_inches='tight') plt.show()
def plot(fldname): self.histmoeller(fldname) pl.fill_between(self.jdvec, self.jdvec * 0, self.posmean, facecolor='powderblue') pl.fill_between(self.jdvec, self.negmean, self.jdvec * 0, facecolor='pink') pl.plot(self.jdvec, self.negmean + self.posmean, lw=1, c='k') pl.gca().xaxis.axis_date()
def make_unit_plots(file_dir, fs): '''Makes waveform plots for sorted unit in unit_waveforms_plots Parameters ---------- file_dir : str, full path to recording directory fs : float, smapling rate in Hz ''' h5_name = h5io.get_h5_filename(file_dir) h5_file = os.path.join(file_dir, h5_name) plot_dir = os.path.join(file_dir, 'unit_waveforms_plots') if os.path.exists(plot_dir): shutil.rmtree(plot_dir, ignore_errors=True) os.mkdir(plot_dir) fs_str = '(%g samples per ms)' % (fs / 1000.0) unit_numbers = get_unit_numbers(h5_file) with tables.open_file(h5_file, 'r+') as hf5: units = hf5.list_nodes('/sorted_units') for i, unit in zip(unit_numbers, units): # plot all waveforms waveforms = unit.waveforms[:] descriptor = hf5.root.unit_descriptor[i] fig, ax = blech_waveforms_datashader.waveforms_datashader( waveforms) ax.set_xlabel('Samples (%s)' % fs_str) ax.set_ylabel('Voltage (microvolts)') unit_title = ( ('Unit %i, total waveforms = %i\nElectrode: %i, ' 'Single Unit: %i, RSU: %i, FSU: %i') % (i, waveforms.shape[0], descriptor['electrode_number'], descriptor['single_unit'], descriptor['regular_spiking'], descriptor['fast_spiking'])) ax.set_title(unit_title) fig.savefig(os.path.join(plot_dir, 'Unit%i.png' % i)) plt.close('all') # Plot mean and SEM of waveforms # Downsample by 10 to remove upsampling from de-jittering fig = plt.figure() mean_wave = np.mean(waveforms[:, ::10], axis=0) std_wave = np.std(waveforms[:, ::10], axis=0) mean_x = np.arange(mean_wave.shape[0]) + 1 plt.plot(mean_x, mean_wave, linewidth=4.0) plt.fill_between(mean_x, mean_wave - std_wave, mean_wave + std_wave, alpha=0.4) plt.xlabel('Samples (%s)' % fs_str) plt.ylabel('Voltage (microvolts)') plt.title(unit_title) fig.savefig(os.path.join(plot_dir, 'Unit%i_mean_sd.png' % i)) plt.close('all')
def plot_mean_std(mn, std, title, plt_num=[1, 1, 1], legend=True): """plot the mean +/- std as timeline """ ax = plt.subplot(plt_num[0], plt_num[1], plt_num[2]) plt.hold(True) plt.fill_between(range(len(std)), mn - std, mn + std, facecolor=clr2) plt.plot(range(len(std)), mn, color=clr1, label='Trial Mean', linewidth=2) if legend: plt.legend(bbox_to_anchor=(0.55, 0.95, 0.4, 0.1), frameon=False, ncol=2) adjust_spines(ax, ['bottom', 'left']) plt.title(title)
def plot_count_spikes( filepath, signal_map): #FIXME integrate this with count_spikes properly """ useful for manual review """ raw, block, segments, header = load_abf(filepath) if list(raw.read_header()['nADCSamplingSeq'][:2]) != [1, 2]: #FIXME print('Not a ledstim file') return None, None nseg = len(segments) gains = header['fTelegraphAdditGain'] #TODO zero = transform_maker(0, 20 * gains[0]) #cell one = transform_maker(0, 5) #led plt.figure() counts = [] for seg, n in zip(segments, range(nseg)): if len(seg.analogsignals) != 2: print('No led channel found.') continue plt.subplot(nseg, 1, n + 1) nas = seg.size()['analogsignals'] signal = zero(seg.analogsignals[0]) led = one(seg.analogsignals[1]) led_on_index, base, maxV, minV = detect_led( led) #FIXME move this to count_spikes if not len(led_on_index): print('No led detected maxV: %s minV: %s' % (maxV, minV)) continue sig_on = signal.base[led_on_index] sig_mean = np.mean(sig_on) sig_std = np.std(sig_on) #plt.plot(sig_std+sig_mean) sm_arr = base * sig_mean std_arr = base * sig_std thresh = 3.3 s_list = detect_spikes(sig_on, thresh, 5) counts.append(len(s_list)) [plt.plot(s, sig_on[s], 'ro') for s in s_list] #plot all the spikes plt.plot(sig_on, 'k-') plt.plot(sm_arr, 'b-') plt.fill_between(np.arange(len(sm_arr)), sm_arr - std_arr * thresh, sm_arr + std_arr * thresh, color=(.8, .8, 1)) plt.xlim((0, len(sig_on))) plt.title('%s spikes %s' % (len(s_list), block.file_origin)) #plt.title(block.file_origin) #plt.title('%s Segment %s'%(block.file_origin,n)) return np.mean(counts), np.var(counts), counts
def plot_limit_fill(data, low=False): mass,limit = get_mass_limit(data) kwargs = dict(**data['style']) setdefaults(kwargs,DEFAULTS) plt.fill_between(mass, limit, y2 = 1 if not low else 0, edgecolor=kwargs['color'], facecolor=kwargs['color'], alpha=kwargs['alpha'], ) plot_text(data)
def get_filters(plotit=True): filters = prep_filters() if plotit: for i, band in enumerate(['r']): pl.fill_between(filters[band]['ls'], 5. * filters[band]['Ts'], alpha=0.3, label=filters[band]['ppkey'], color='g') return filters
def plot_summary(x, s, interval=95, num_samples=100, sample_color='k', sample_alpha=0.4, interval_alpha=0.25, color='r', legend=True, title="", plot_mean=True, plot_median=False, label=""): b = 0.5 * (100 - interval) lower = np.percentile(s, b, axis=0).T upper = np.percentile(s, 100 - b, axis=0).T if plot_median: median = np.percentile(s, [50], axis=0).T lab = 'Median' if len(label) > 0: lab += " %s" % label plt.plot(x.ravel(), median, label=lab, color=color, linewidth=4) if plot_mean: mean = np.mean(s, axis=0).T lab = 'Mean' if len(label) > 0: lab += " %s" % label plt.plot(x.ravel(), mean, '--', label=lab, color=color, linewidth=4) plt.fill_between(x.ravel(), lower.ravel(), upper.ravel(), color=color, alpha=interval_alpha, label='%d%% Interval' % interval) if num_samples > 0: idx_samples = np.random.choice(range(len(s)), size=num_samples, replace=False) plt.plot(x, s[idx_samples, :].T, color=sample_color, alpha=sample_alpha) if legend: plt.legend(loc='best') if len(title) > 0: plt.title(title, fontweight='bold')
def plotTiming(dataPath, figurePath): rawdata = np.loadtxt(dataPath) #item_count adding_time twisting_time sorteddata = sorted(zip(rawdata[:, 0], rawdata[:, 1], rawdata[:, 2])) adding = {} twisting = {} for i in sorteddata: if i[0] in adding: adding[i[0]].append(i[1]) twisting[i[0]].append(i[2]) else: adding[i[0]] = [i[1]] twisting[i[0]] = [i[2]] d = [] for i in adding.iterkeys(): averageAdding = np.average(adding[i]) / 1000.0 averageTwisting = np.average(twisting[i]) / 1000.0 d.append((i, averageAdding, averageTwisting)) data = np.array(sorted(d)) #print data # plot pylab.figure() pylab.plot(data[:, 0], data[:, 1], 'ob') pylab.plot(data[:, 0], data[:, 1] + data[:, 2], 'ob') pylab.fill_between(data[:, 0], data[:, 1], data[:, 1] + data[:, 2], facecolor='red', interpolate=True) pylab.fill_between(data[:, 0], [0 for i in range(len(adding))], data[:, 1], facecolor='green', interpolate=True) p1 = pylab.Rectangle((0, 0), 1, 1, fc="green") p2 = pylab.Rectangle((0, 0), 1, 1, fc="red") pylab.legend([p1, p2], ['adding time', 'twisting time'], loc='upper left') pylab.xlabel("size") pylab.ylabel('time used (seconds)') pylab.xlim([data[0, 0], data[-1, 0]]) pylab.savefig(figurePath, format='svg')
def doTheWork(algorithm, r, generations, gene_length, population): print('Running {}'.format(algorithm.__name__)) runs = [] parameters = [ (r, generations, 0.15, 0.6), # (r, generations, 0.06, 0.3), # (r, generations, 0.4, 0.0), # (r, generations, 0.0, 0.5) ] with tqdm(total=sum(map(lambda item: item[0], parameters))) as bar: for (r, generations, p_mutate, p_crossover) in parameters: algs = [] for _ in range(r): # Prepare genes chromosomes = ga.chromosomes.Chromosome.create_random( gene_length=gene_length, n=population) # Init alg alg = algorithm(chromosomes) # Simulate generations alg.run(generations, p_mutate, p_crossover) algs.append((alg, generations, p_mutate, p_crossover)) bar.update(1) runs.append((r, generations, p_mutate, p_crossover, algs)) for (r, generations, p_mutate, p_crossover, algs) in runs: data = [] for (alg, _, _, _) in algs: d = np.array( [v for k, v in sorted(alg.overall_fittest_fit.items())], dtype=np.float64) if not any(data): data = d else: data += d data /= len(algs) py.plot(data, label='{} ; {} ; {}'.format(generations, p_mutate, p_crossover)) py.fill_between(data, np.percentile(data, q=0.25, axis=0), np.percentile(data, q=0.75, axis=0), alpha=0.3) py.rcParams['figure.figsize'] = [10, 8] py.xlabel('generation') py.ylabel('fitness') py.legend(loc='best') py.show()
def plot_regression(x, y, smoothing=.3): fit = sm.nonparametric.lowess(y, x, frac=smoothing) df = (fit[:, 1] - y)**2 fit_var = sm.nonparametric.lowess(df, x, frac=smoothing) isheld = pl.ishold() pl.hold(1) pl.plot(fit[:, 0], fit[:, 1]) pl.fill_between(fit_var[:, 0], fit[:, 1] - 1 * pl.sqrt(fit_var[:, 1]), fit[:, 1] + 1 * pl.sqrt(fit_var[:, 1]), color=((0, 0, .99, .2), )) pl.plot(x, y, '.') pl.hold(isheld)
def plotting(input_file, savefile): with open(input_file, 'r') as myfile: results = json.load(myfile) res_IE_1D = results['IE_1D'] thetas = np.linspace(res_IE_1D['theta_start'], res_IE_1D['theta_stop'], 1000) p = Problem_FSI_1D(res_IE_1D['n'], **res_IE_1D['parameters']) dt = res_IE_1D['tf'] / res_IE_1D['N_steps'] conv_rates_lims = [p.theoretical_conv_rate(dt, th) for th in thetas] pl.figure() for res, label, marker, ls in [(results['IE_1D'], 'IE 1D', None, '-'), (results['IE_2D'], 'IE 2D', None, '-'), (results['S2_1D'], 'SD2 1D', 'o', ''), (results['S2_2D'], 'SD2 2D', '*', '')]: conv_rates = [] for updates in res['updates']: x = np.array(updates[:-1]) conv_rates.append(np.mean(x[1:] / x[:-1])) pl.semilogy(res['theta'], conv_rates, label=label, marker=marker, linestyle=ls) pl.semilogy(thetas, conv_rates_lims, label=r'$\Sigma(\Theta)$', ls='--', linewidth=3) pl.axvline(res_IE_1D['theta_opt'], ls='-', color='k', label=r'$\Theta_{opt}$') a, b = res_IE_1D['theta_CFL_inf'], res_IE_1D['theta_CFL_zero'] pl.xlim(res_IE_1D['theta_start'], res_IE_1D['theta_stop']) pl.ylim(1e-7, 2) pl.fill_between([min(a, b), max(a, b)], [min(pl.ylim()) / 100] * 2, [max(pl.ylim()) * 100] * 2, alpha=0.2) pl.xlabel(r'$\Theta$', labelpad=-20, position=(1.08, -1), fontsize=20) lp = -50 if label == 'Air-Steel' else -70 pl.ylabel('Conv. rate', rotation=0, labelpad=lp, position=(2., 1.05), fontsize=20) pl.legend(loc=3) pl.savefig(savefile, dpi=100)
def main(): Gamma0, Omega, Sigma, T, Y_obs, amp, mu0, tec, freqs = generate_data() hmm = NonLinearDynamicsSmoother(TecLinearPhaseNestedSampling(freqs)) hmm = jit( partial(hmm, tol=1., maxiter=2, omega_window=None, sigma_window=None, momentum=0., omega_diag_range=(0, jnp.inf), sigma_diag_range=(0, jnp.inf))) # # with disable_jit(): keys = random.split(random.PRNGKey(0), T) # with disable_jit(): res = hmm(Y_obs, Sigma, mu0, Gamma0, Omega, amp, keys) print(res.converged, res.niter) plt.plot(tec, label='true tec') plt.plot(res.post_mu[:, 0], label='infer tec') plt.fill_between(jnp.arange(T), res.post_mu[:, 0] - jnp.sqrt(res.post_Gamma[:, 0, 0]), res.post_mu[:, 0] + jnp.sqrt(res.post_Gamma[:, 0, 0]), alpha=0.5) plt.legend() plt.show() plt.plot(jnp.sqrt(res.post_Gamma[:, 0, 0])) plt.title("Uncertainty tec") plt.show() plt.plot(tec - res.post_mu[:, 0], label='infer') plt.fill_between( jnp.arange(T), (tec - res.post_mu[:, 0]) - jnp.sqrt(res.post_Gamma[:, 0, 0]), (tec - res.post_mu[:, 0]) + jnp.sqrt(res.post_Gamma[:, 0, 0]), alpha=0.5) plt.title("Residual tec") plt.legend() plt.show() plt.plot(jnp.sqrt(res.Omega[:, 0, 0])) plt.title("omega") plt.show() plt.plot( jnp.mean(jnp.sqrt(jnp.diagonal(res.Sigma, axis2=-2, axis1=-1)), axis=-1)) plt.title("mean sigma") plt.show()
def histmoellerplot(self, fldname="Dchl"): self.histmoeller(fldname) figpref.current() pl.close(1) fig = pl.figure(1) pl.subplots_adjust(hspace=0, right=0.85, left=0.15) def subplot(sp, mat, vec, xvec): ax = pl.subplot(2, 1, sp, axisbg='0.8') im = pl.pcolormesh(self.jdvec, xvec, miv(mat.T), rasterized=True, norm=LogNorm(vmin=1, vmax=10000), cmap=WRY()) pl.plot(self.jdvec, vec, lw=2) pl.gca().xaxis.axis_date() pl.setp(ax, xticklabels=[]) return im im = subplot(1, self.posmat, self.posmean, self.hpos) im = subplot(2, self.negmat, self.negmean, -self.hpos) pl.figtext(0.08, 0.5, r'%s d$^{-1}$)' % fldname, rotation='vertical', size=16, va='center') cbar_ax = fig.add_axes([0.87, 0.15, 0.025, 0.7]) cb = fig.colorbar(im, cax=cbar_ax) cb.set_label('Number of grid cells') pl.savefig('figs/liege/histmoeller_%s.pdf' % fldname) pl.close(2) fig = pl.figure(2) pl.fill_between(self.jdvec, self.jdvec * 0, self.posmean, facecolor='powderblue') pl.fill_between(self.jdvec, self.negmean, self.jdvec * 0, facecolor='pink') pl.plot(self.jdvec, self.negmean + self.posmean, lw=1, c='k') #pl.title(fldname) pl.gca().xaxis.axis_date() pl.savefig('figs/liege/meantime_%s.pdf' % fldname)
def _plot(self, kde, X, data_type, filename, bandwidth): if self.plot: if data_type == MZ_INTENSITY_RT: self.logger.debug('3D plotting for %s not implemented' % MZ_INTENSITY_RT) else: fname = 'All' if filename is None else filename title = '%s density estimation for %s - bandwidth %.3f' % (data_type, fname, bandwidth) X_plot = np.linspace(np.min(X), np.max(X), 1000)[:, np.newaxis] log_dens = kde.score_samples(X_plot) plt.figure() plt.fill_between(X_plot[:, 0], np.exp(log_dens), alpha=0.5) plt.plot(X[:, 0], np.full(X.shape[0], -0.01), '|k') plt.title(title) plt.show()
def fightSizeDistributionPlot(samples, color='k', plotConfInt=True, plotErrorBars=False, log=False, alpha=0.4, makePlot=True, confIntP=0.95, removeZeros=False, removeOnes=False, multiple=1, verbose=True, maxSize=None, **kwargs): """ multiple (1) : Multiply probabilities by this factor. Useful for plotting expected number rather than probability. """ #ell = len(samples[0]) dist,confIntList = fightSizeDistribution(samples, \ confIntP=confIntP,removeZeros=removeZeros,removeOnes=removeOnes, maxSize=maxSize) ell = len(dist) dist, confIntList = multiple * dist, multiple * confIntList if makePlot: if plotConfInt: #for confInt in confIntList: # if confInt[0] == 0.: confInt[0] = zeroEquiv #firstZero = pylab.find(dist==0)[2] firstZero = len(dist) pylab.fill_between(range(1,firstZero), \ confIntList[:,0][1:firstZero], \ confIntList[:,1][1:firstZero],color=color,alpha=alpha) if plotErrorBars: yerr = scipy.empty_like(confIntList.T) yerr[0] = dist - confIntList[:, 1] yerr[1] = confIntList[:, 0] - dist pylab.errorbar(range(ell), dist, yerr=yerr, color=color) pylab.plot(range(ell), dist, color=color, **kwargs) if log: plotFn = pylab.yscale('log') if verbose: print("sum(dist) =", sum(dist)) return dist
def animate(i): line1.set_data(GenerData[i, 0], GenerData[i, 1]) ax2.cla() ax2.axis([-1.0, 1.0, 0, 1.0]) ax2.set_xlabel('environmental conditions ($ x $)', fontsize=FontSize) ax2.set_ylabel('average uptake efficiency ($ U(x) $)', fontsize=FontSize) p.fill_between(x, genMeans[i, :] + genSTD[i, :], genMeans[i, :] - genSTD[i, :], color=(0.75, 0.75, 0.75, 0.75)) p.plot(x, genMeans[i, :], linewidth=2, color='k') p.grid(True) p.tight_layout(h_pad=Hpad) return line1
def plot(self, color_line='r', bgcolor='grey', color='yellow', lw=4, hold=False, ax=None): xmax = self.xmax + 1 if ax: pylab.sca(ax) pylab.fill_between([0, xmax], [0, 0], [20, 20], color='red', alpha=0.3) pylab.fill_between([0, xmax], [20, 20], [30, 30], color='orange', alpha=0.3) pylab.fill_between([0, xmax], [30, 30], [41, 41], color='green', alpha=0.3) if self.X is None: X = range(1, self.xmax + 1) pylab.fill_between(X, self.df.mean() + self.df.std(), self.df.mean() - self.df.std(), color=color, interpolate=False) pylab.plot(X, self.df.mean(), color=color_line, lw=lw) pylab.ylim([0, 41]) pylab.xlim([0, self.xmax + 1]) pylab.title("Quality scores across all bases") pylab.xlabel("Position in read (bp)") pylab.ylabel("Quality") pylab.grid(axis='x')