def plot_p_leading_order(frame): mat = scipy.io.loadmat('sound-speed_2D-wave.mat') T=5; nt=T/0.5 pp=mat['U'][nt,:,:] xx=mat['xx'] yy=mat['yy'] fig=pl.figure(figsize=(8, 3.5)) #pl.title("t= "+str(sol.state.t),fontsize=20) pl.xticks(size=20); pl.yticks(size=20) pl.xlabel('x',fontsize=20); pl.ylabel('y',fontsize=20) #pl.pcolormesh(xx,yy,p_subxy,cmap=cm.OrRd) pl.pcolormesh(xx,yy,pp,cmap='RdBu_r') pl.autoscale(tight=True) cb = pl.colorbar(ticks=[0.5,1,1.5,2]); #pl.clim(ticks=[0.5,1,1.5,2]) imaxes = pl.gca(); pl.axes(cb.ax) pl.yticks(fontsize=20); pl.axes(imaxes) #pl.xticks(fontsize=20); pl.axes(imaxes) #pl.axis('equal') pl.axis('tight') fig.tight_layout() pl.savefig('./_plots_to_paper/sound-speed_LO_t'+str(frame)+'_pcolor.png') pl.close()
def plot_field(self,x,y,u=None,v=None,F=None,contour=False,outdir=None,plot='quiver',figname='_field',format='eps'): outdir = self.set_dir(outdir) p = 64 if F is None: F=self.calc_F(u,v) plt.close('all') plt.figure() #fig, axes = plt.subplots(nrows=1) if contour: plt.hold(True) plt.contourf(x,y,F) if plot=='quiver': plt.quiver(x[::p],y[::p],u[::p],v[::p],scale=0.1) if plot=='pcolor': plt.pcolormesh(x[::4],y[::4],F[::4],cmap=plt.cm.Pastel1) plt.colorbar() if plot=='stream': speed = F[::16] plt.streamplot(x[::16], y[::16], u[::16], v[::16], density=(1,1),color='k') plt.xlabel('$x$ (a.u.)') plt.ylabel('$y$ (a.u.)') plt.savefig(os.path.join(outdir,figname+'.'+format),format=format,dpi=320,bbox_inches='tight') plt.close()
def showKernel(dataOrMatrix, fileName = None, useLabels = True, **args) : labels = None if hasattr(dataOrMatrix, 'type') and dataOrMatrix.type == 'dataset' : data = dataOrMatrix k = data.getKernelMatrix() labels = data.labels else : k = dataOrMatrix if 'labels' in args : labels = args['labels'] import matplotlib if fileName is not None and fileName.find('.eps') > 0 : matplotlib.use('PS') from matplotlib import pylab pylab.matshow(k) #pylab.show() if useLabels and labels.L is not None : numPatterns = 0 for i in range(labels.numClasses) : numPatterns += labels.classSize[i] #pylab.figtext(0.05, float(numPatterns) / len(labels), labels.classLabels[i]) #pylab.figtext(float(numPatterns) / len(labels), 0.05, labels.classLabels[i]) pylab.axhline(numPatterns, color = 'black', linewidth = 1) pylab.axvline(numPatterns, color = 'black', linewidth = 1) pylab.axis([0, len(labels), 0, len(labels)]) if fileName is not None : pylab.savefig(fileName) pylab.close()
def save(self, out_path): '''Saves a figure for the monitor Args: out_path: str ''' plt.clf() np.set_printoptions(precision=4) font = { 'size': 7 } matplotlib.rc('font', **font) y = 2 x = ((len(self.d) - 1) // y) + 1 fig, axes = plt.subplots(y, x) fig.set_size_inches(20, 8) for j, (k, v) in enumerate(self.d.iteritems()): ax = axes[j // x, j % x] ax.plot(v, label=k) if k in self.d_valid.keys(): ax.plot(self.d_valid[k], label=k + '(valid)') ax.set_title(k) ax.legend() plt.tight_layout() plt.savefig(out_path, facecolor=(1, 1, 1)) plt.close()
def ploter(X,n,name,path,real_ranges = None): ''' Graph plotting module. Very basic, so feel free to modify it. ''' try: import matplotlib.pylab as plt except ImportError: print '\nMatplotlib is not installed! Either install it, or deselect graph option.\n' return 1 lll = 1+len(X)/n fig = plt.figure(figsize=(16,14),dpi=100) for x in xrange(0,len(X),n): iii = 1+x/n ax = fig.add_subplot(lll,1,iii) if real_ranges != None: for p in real_ranges: if p[0] in range(x,x+n) and p[1] in range(x,x+n): ax.axvspan(p[0]%(n), p[1]%(n), facecolor='g', alpha=0.5) elif p[0] in range(x,x+n) and p[1] > x+n: ax.axvspan(p[0]%(n), n, facecolor='g', alpha=0.5) elif p[0] < x and p[1] in range(x,x+n): ax.axvspan(0, p[1]%(n), facecolor='g', alpha=0.5) elif p[0] < x and p[1] > x+n: ax.axvspan(0, n, facecolor='g', alpha=0.5) ax.plot(X[x:x+n],'r-') ax.set_xlim(0.,n) ax.set_ylim(0.,1.) ax.set_xticklabels(range(x,x+750,100)) #(x,x+n/5,x+2*n/5,x+3*n/5,x+4*n/5,x+5*n/5) ) plt.savefig(path+'HMM_'+name+'.png') plt.close()
def plot_multi_format(plot_funcs, plot_kwargs=None, usetex=False, outdir='plots', setting_funcs=['single', 'span', 'slides', 'thumbnails']): """ Outputs plots formatted 4 ways: Publication ready (narrow and wide), PowerPoint ready, and png thumbnails. input ----- plot_funcs : List of functions that return a mpl figure and a filename (or list of figures and filenames) """ setting_dict = {'single': mpl_single_column, 'span': mpl_span_columns, 'slides': mpl_slides, 'thumbnails': mpl_thumbnails} if not os.path.exists(outdir): os.makedirs(outdir) # For python 3.4 # os.makedirs(outdir, exist_ok=True) if plot_kwargs is None: plot_kwargs=[{}]*len(plot_funcs) for key in setting_funcs: setting_dict[key](usetex=usetex) for plot_func,pkwargs in zip(plot_funcs,plot_kwargs): figs, names = plot_func(**pkwargs) for fig,name in zip(figs,names): fig.savefig(os.path.join(outdir, key+'_'+name)) plt.close('all')
def test_simple_gen(self): self_con = .8 other_con = 0.05 g = self.gen.gen_stoch_blockmodel(min_degree=1, blocks=5, self_con=self_con, other_con=other_con, powerlaw_exp=2.1, degree_seq='powerlaw', num_nodes=1000, num_links=3000) deg_hist = vertex_hist(g, 'total') res = fit_powerlaw.Fit(g.degree_property_map('total').a, discrete=True) print 'powerlaw alpha:', res.power_law.alpha print 'powerlaw xmin:', res.power_law.xmin if len(deg_hist[0]) != len(deg_hist[1]): deg_hist[1] = deg_hist[1][:len(deg_hist[0])] print 'plot degree dist' plt.plot(deg_hist[1], deg_hist[0]) plt.xscale('log') plt.xlabel('degree') plt.ylabel('#nodes') plt.yscale('log') plt.savefig('deg_dist_test.png') plt.close('all') print 'plot graph' pos = sfdp_layout(g, groups=g.vp['com'], mu=3) graph_draw(g, pos=pos, output='graph.png', output_size=(800, 800), vertex_size=prop_to_size(g.degree_property_map('total'), mi=2, ma=30), vertex_color=[0., 0., 0., 1.], vertex_fill_color=g.vp['com'], bg_color=[1., 1., 1., 1.]) plt.close('all') print 'init:', self_con / (self_con + other_con), other_con / (self_con + other_con) print 'real:', gt_tools.get_graph_com_connectivity(g, 'com')
def plot_corner_posteriors(self, savefile=None, labels=["T1", "R1", "Av", "T2", "R2"]): ''' Plots the corner plot of the MCMC results. ''' ndim = len(self.sampler.flatchain[0,:]) chain = self.sampler samples = chain.flatchain samples = samples[:,0:ndim] plt.figure(figsize=(8,8)) fig = corner.corner(samples, labels=labels[0:ndim]) plt.title("MJD: %.2f"%self.mjd) name = self._get_save_path(savefile, "mcmc_posteriors") plt.savefig(name) plt.close("all") plt.figure(figsize=(8,ndim*3)) for n in range(ndim): plt.subplot(ndim,1,n+1) chain = self.sampler.chain[:,:,n] nwalk, nit = chain.shape for i in np.arange(nwalk): plt.plot(chain[i], lw=0.1) plt.ylabel(labels[n]) plt.xlabel("Iteration") name_walkers = self._get_save_path(savefile, "mcmc_walkers") plt.tight_layout() plt.savefig(name_walkers) plt.close("all")
def viz_birth_proposal_2D(curModel, newModel, ktarget, freshCompIDs, title1='Before Birth', title2='After Birth'): ''' Create before/after visualization of a birth move (in 2D) ''' from ..viz import GaussViz, BarsViz from matplotlib import pylab fig = pylab.figure() h1 = pylab.subplot(1,2,1) if curModel.obsModel.__class__.__name__.count('Gauss'): GaussViz.plotGauss2DFromHModel(curModel, compsToHighlight=ktarget) else: BarsViz.plotBarsFromHModel(curModel, compsToHighlight=ktarget, figH=h1) pylab.title(title1) h2 = pylab.subplot(1,2,2) if curModel.obsModel.__class__.__name__.count('Gauss'): GaussViz.plotGauss2DFromHModel(newModel, compsToHighlight=freshCompIDs) else: BarsViz.plotBarsFromHModel(newModel, compsToHighlight=freshCompIDs, figH=h2) pylab.title(title2) pylab.show(block=False) try: x = raw_input('Press any key to continue >>') except KeyboardInterrupt: import sys sys.exit(-1) pylab.close()
def plot_size_of_c(size_of_c, path): xlabel('|C|') ylabel('Max model size |Ci|') grid(True) plot([x+1 for x in range(len(size_of_c))], size_of_c) savefig(os.path.join(path, 'size_of_c.png')) close()
def plot_q(frame,file_prefix='claw',file_format='petsc',path='./_output/',plot_pcolor=True,plot_slices=True,slices_xlimits=None): import sys sys.path.append('.') import gaussian_1d sol=Solution(frame,file_format=file_format,read_aux=False,path=path,file_prefix=file_prefix) x=sol.state.grid.x.centers mx=len(x) bathymetry = 0.5 eta=sol.state.q[0,:] + bathymetry if frame < 10: str_frame = "00"+str(frame) elif frame < 100: str_frame = "0"+str(frame) else: str_frame = str(frame) fig = pl.figure(figsize=(40,10)) ax = fig.add_subplot(111) ax.set_aspect(aspect=1) ax.plot(x,eta) #pl.title("t= "+str(sol.state.t),fontsize=20) #pl.xticks(size=20); pl.yticks(size=20) #pl.xlim([0, gaussian_1d.Lx]) pl.ylim([0.5, 1.0]) pl.xlim([0., 4.0]) #pl.axis('equal') pl.savefig('./_plots/eta_'+str_frame+'_slices.png') pl.close()
def plot_heuristic(heuristic, path): xlabel('|C|') ylabel('h') grid(True) plot(heuristic) savefig(os.path.join(path, 'heuristic.png')) close()
def plot_running_time(running_time, path): xlabel('|C|') ylabel('MTV iteration in secs.') grid(True) plot([x for x in range(len(running_time))], running_time) savefig(os.path.join(path, 'running_time.png')) close()
def triple_plot(cccsum, cccsum_hist, trace, threshold, save=False, savefile=''): r"""Main function to make a triple plot with a day-long seismogram, \ day-long correlation sum trace and histogram of the correlation sum to \ show normality. :type cccsum: numpy.ndarray :param cccsum: Array of the cross-channel cross-correlation sum :type cccsum_hist: numpy.ndarray :param cccsum_hist: cccsum for histogram plotting, can be the same as \ cccsum but included if cccsum is just an envelope. :type trace: obspy.Trace :param trace: A sample trace from the same time as cccsum :type threshold: float :param threshold: Detection threshold within cccsum :type save: bool, optional :param save: If True will svae and not plot to screen, vice-versa if False :type savefile: str, optional :param savefile: Path to save figure to, only required if save=True """ if len(cccsum) != len(trace.data): print('cccsum is: ' + str(len(cccsum))+' trace is: '+str(len(trace.data))) msg = ' '.join(['cccsum and trace must have the', 'same number of data points']) raise ValueError(msg) df = trace.stats.sampling_rate npts = trace.stats.npts t = np.arange(npts, dtype=np.float32) / (df * 3600) # Generate the subplot for the seismic data ax1 = plt.subplot2grid((2, 5), (0, 0), colspan=4) ax1.plot(t, trace.data, 'k') ax1.axis('tight') ax1.set_ylim([-15 * np.mean(np.abs(trace.data)), 15 * np.mean(np.abs(trace.data))]) # Generate the subplot for the correlation sum data ax2 = plt.subplot2grid((2, 5), (1, 0), colspan=4, sharex=ax1) # Plot the threshold values ax2.plot([min(t), max(t)], [threshold, threshold], color='r', lw=1, label="Threshold") ax2.plot([min(t), max(t)], [-threshold, -threshold], color='r', lw=1) ax2.plot(t, cccsum, 'k') ax2.axis('tight') ax2.set_ylim([-1.7 * threshold, 1.7 * threshold]) ax2.set_xlabel("Time after %s [hr]" % trace.stats.starttime.isoformat()) # ax2.legend() # Generate a small subplot for the histogram of the cccsum data ax3 = plt.subplot2grid((2, 5), (1, 4), sharey=ax2) ax3.hist(cccsum_hist, 200, normed=1, histtype='stepfilled', orientation='horizontal', color='black') ax3.set_ylim([-5, 5]) fig = plt.gcf() fig.suptitle(trace.id) fig.canvas.draw() if not save: plt.show() plt.close() else: plt.savefig(savefile) return
def plot_BIC_score(BIC_SCORE, path): xlabel('|C|') ylabel('BIC score') grid(True) plot(BIC_SCORE) savefig(os.path.join(path, 'BIC.png')) close()
def compareTwoUsers(data1, data2, outdir): """Compares data for two users. Currently plots difference in peaks for the users on presslengths for different keycodes.""" def computePeakDifference(d1, d2): edges = findCommonEdges(d1, d2) h1, e = np.histogram(d1, bins=edges, normed=True) h2, e = np.histogram(d2, bins=edges, normed=True) a1, a2 = np.argmax(h1), np.argmax(h2) diff = (edges[a1] + edges[a1 + 1] - edges[a2] - edges[a2 + 1]) / 2.0 return diff commonKeys = set(data1.keystrokePLs_key.keys()) & set(data2.keystrokePLs_key.keys()) peakDiffs = [] for key in commonKeys: dat1 = data1.keystrokePLs_key[key] dat2 = data2.keystrokePLs_key[key] peakDiffs.append(computePeakDifference(dat1, dat2)) peakDiffs.append(computePeakDifference(data1.keystrokePLs, data2.keystrokePLs)) edges = findCommonEdges(peakDiffs) plt.figure() plt.hist(peakDiffs, bins=edges) plt.title("Peak Differences for Keystroke PL for %s and %s" % (data1.user, data2.user)) plt.xlabel("Time (seconds)") plt.savefig("%s/%s_%s_kPLpeakDiff.pdf" % (outdir, data1.user, data2.user)) plt.close()
def threeD_gridplot(nodes, save=False, savefile=''): r"""Function to plot in 3D a series of grid points. :type nodes: list of tuples :param nodes: List of tuples of the form (lat, long, depth) :type save: bool :param save: if True will save without plotting to screen, if False \ (default) will plot to screen but not save :type savefile: str :param savefile: required if save=True, path to save figure to. """ lats = [] longs = [] depths = [] for node in nodes: lats.append(float(node[0])) longs.append(float(node[1])) depths.append(float(node[2])) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(lats, longs, depths) ax.set_ylabel("Latitude (deg)") ax.set_xlabel("Longitude (deg)") ax.set_zlabel("Depth(km)") ax.get_xaxis().get_major_formatter().set_scientific(False) ax.get_yaxis().get_major_formatter().set_scientific(False) if not save: plt.show() plt.close() else: plt.savefig(savefile) return
def plot_selfish_cooperative(num_runs): import seaborn as sns for exp in range(num_runs): fig = plt.figure() cooperators = [3,4,5] selfish = [0,1,2] fname = Parameters.dirname + '/%i_assembly_%i.dat' % (exp, 0) t, pop = np.loadtxt(fname, unpack = True) cooperative_pop = [0.0]*len(pop[:-5]) selfish_pop = [0.0]*len(pop[:-5]) for c in cooperators: fname = Parameters.dirname + '/%i_assembly_%i.dat' % (exp, c) t, pop = np.loadtxt(fname, unpack = True) cooperative_pop = np.add(cooperative_pop,pop[:-5] ) for s in selfish: fname = Parameters.dirname + '/%i_assembly_%i.dat' % (exp, s) t, pop = np.loadtxt(fname, unpack = True) selfish_pop = np.add(selfish_pop,pop[:-5] ) ax = fig.add_subplot(1,1,1) ax.plot(t[:-5], selfish_pop, label = 'selfish', color = 'r') ax.plot(t[:-5], cooperative_pop, label = 'cooperative', color = 'g') ax.legend(loc = 'upper left') plt.xlabel('System Time') plt.ylabel('Total Abundance') #plt.show() plt.savefig(Parameters.dirname + '/%i_cooperative_vs_selfish.png' % exp) plt.close()
def phase_space(self, resolution=200, initial_conds=[], fname_app=None): """ Plot phase space of system. Arguments resolution Resolution of plot initial_conds List of initial conditions for trajectories """ fig = plt.figure() self._plot_vector_field(resolution/10) self._plot_nullclines(resolution) self._plot_trajectories(initial_conds) plt.xlabel(r'$\varphi_0$') plt.ylabel(r'$\varphi_1$') plt.title( 'Phase plane{}'.format( '' if fname_app is None else ' ({})'.format(fname_app))) plt.savefig( 'images/phase_space{}.pdf'.format( '' if fname_app is None else '_{:04}'.format(fname_app))) plt.close()
def plot(frame,dirname,clim=None,axis_limits=None): if not os.path.exists('./figures'): os.makedirs('./figures') try: sol=Solution(frame,file_format='petsc',read_aux=False,path='./saved_data/'+dirname+'/_p/',file_prefix='claw_p') except IOError: 'Data file not found; please unzip the files in saved_data/.' return x=sol.state.grid.x.centers; y=sol.state.grid.y.centers mx=len(x); my=len(y) mp=sol.state.num_eqn yy,xx = np.meshgrid(y,x) p=sol.state.q[0,:,:] if clim is not None: pl.pcolormesh(xx,yy,p,cmap=cm.RdBu_r) else: pl.pcolormesh(xx,yy,p,cmap=cm.Reds) pl.title("t= "+str(sol.state.t),fontsize=20) pl.xticks(size=20); pl.yticks(size=20) cb = pl.colorbar(); if clim is not None: pl.clim(clim[0],clim[1]); imaxes = pl.gca(); pl.axes(cb.ax) pl.yticks(fontsize=20); pl.axes(imaxes) pl.axis('equal') if axis_limits is None: pl.axis([np.min(x),np.max(x),np.min(y),np.max(y)]) else: pl.axis([axis_limits[0],axis_limits[1],axis_limits[2],axis_limits[3]]) pl.savefig('./figures/'+dirname+'.png') pl.close()
def _on_button_press(event): if event.button != 1 or not event.inaxes: return lon, lat = m(event.xdata, event.ydata, inverse=True) # Convert to colat to ease indexing. colat = rotations.lat2colat(lat) x_range = (self.setup["physical_boundaries_x"][1] - self.setup["physical_boundaries_x"][0]) x_frac = (colat - self.setup["physical_boundaries_x"][0]) / x_range x_index = int(((self.setup["boundaries_x"][1] - self.setup["boundaries_x"][0]) * x_frac) + self.setup["boundaries_x"][0]) y_range = (self.setup["physical_boundaries_y"][1] - self.setup["physical_boundaries_y"][0]) y_frac = (lon - self.setup["physical_boundaries_y"][0]) / y_range y_index = int(((self.setup["boundaries_y"][1] - self.setup["boundaries_y"][0]) * y_frac) + self.setup["boundaries_y"][0]) plt.figure(1, figsize=(3, 8)) depths = available_depths values = data[x_index, y_index, :] plt.plot(values, depths) plt.grid() plt.ylim(depths[-1], depths[0]) plt.show() plt.close() plt.figure(0)
def nova_plot(): erg2mev=624151. fig=plot.figure() yrange = [1e-6,2e-4] xrange = [1e-1,1e5] plot.fill_between([0.2,10e3],[yrange[1],yrange[1]],[yrange[0],yrange[0]],facecolor='yellow',interpolate=True,color='yellow',alpha=0.5) plot.annotate('AMEGO',xy=(3,9e-5),xycoords='data',fontsize=26,color='black') lat=ascii.read("data/NMon2012.LAT.dat",names=['energy','en_low','en_high','flux','flux_err','tmp']) plot.scatter(lat['energy'],lat['flux']*erg2mev,color='red') plot.errorbar(lat['energy'],lat['flux']*erg2mev,xerr=[lat['en_low'],lat['en_high']],yerr=lat['flux_err']*erg2mev,ecolor='red',capsize=0,fmt='none') latul=ascii.read("data/NMon2012.LAT.limits.dat",names=['energy','en_low','en_high','flux','tmp1','tmp2','tmp3','tmp4']) plot.errorbar(latul['energy'],latul['flux']*erg2mev,xerr=[latul['en_low'],latul['en_high']],yerr=0.5*latul['flux']*erg2mev,uplims=True,ecolor='red',capsize=0,fmt='none') plot.scatter(latul['energy'],latul['flux']*erg2mev,color='red') leptonic=ascii.read("data/sp-NMon12-IC-best-fit-1MeV-30GeV.txt",names=['energy','flux'],data_start=1) hadronic=ascii.read("data/sp-NMon12-pi0-and-secondaries.txt",names=['energy','flux1','flux2'],data_start=1) plot.plot(leptonic['energy'],leptonic['flux']*erg2mev,'r--',color='black',lw=2,label='Leptonic') plot.plot(hadronic['energy'],hadronic['flux2']*erg2mev,color='black',lw=2,label='Hadronic+Secondary Leptons') plot.legend(loc='upper right',fontsize='small',frameon=False,framealpha=0.5) plot.xscale('log') plot.yscale('log') plot.ylim(yrange) plot.xlim(xrange) plot.xlabel(r'Energy (MeV)') plot.ylabel(r'Energy$^2 \times $ Flux (Energy) (erg cm$^{-2}$ s$^{-1}$)') plot.title('Nova V339 Del 2013') plot.savefig('Nova_SED.png', bbox_inches='tight') plot.savefig('Nova_SED.eps', bbox_inches='tight') plot.show() plot.close()
def plot_predlinks_roc(infile, outfile): preddf = pickle.load(open(infile, 'r'))['df'] preddf['tp'] = preddf['t_t'] / preddf['t_tot'] preddf['fp'] = preddf['f_t'] / preddf['f_tot'] preddf['frac_wrong'] = 1.0 - (preddf['t_t'] + preddf['f_f']) / (preddf['t_tot'] + preddf['f_tot']) f = pylab.figure(figsize=(2, 2)) ax = f.add_subplot(1, 1, 1) # group by cv set for row_name, cv_df in preddf.groupby('cv_idx'): cv_df_m = cv_df.groupby('pred_thold').mean().sort('fp') ax.plot(cv_df_m['fp'], cv_df_m['tp'] , c='k', alpha=0.3) fname = infile[0].split('-')[0] ax.set_title(fname) ax.set_xticks([0.0, 1.0]) ax.set_yticks([0.0, 1.0]) ax.set_xlim(0, 1) ax.set_ylim(0, 1) f.savefig(outfile) pylab.close(f)
def analyze(title, x, y, func, func_title): print('-' * 80) print(title) print('x: %s:%s %s' % (list(x.shape), x.dtype, [x.min(), x.max()])) print('y: %s:%s %s' % (list(y.shape), y.dtype, [y.min(), y.max()])) popt, pcov = curve_fit(func, x, y) print('popt=%s' % popt) print('pcov=\n%s' % pcov) a, b = popt print('a=%e' % a) print('b=%e' % b) print(func_title(a, b)) xf = np.linspace(x.min(), x.max(), 100) yf = func(xf, a, b) print('xf: %s:%s %s' % (list(xf.shape), xf.dtype, [xf.min(), xf.max()])) print('yf: %s:%s %s' % (list(yf.shape), yf.dtype, [yf.min(), yf.max()])) plt.title(func_title(a, b)) # plt.xlim(0, x.max()) # plt.ylim(0, y.max()) plt.semilogx(x, y, label='data') plt.semilogx(xf, yf, label='fit') plt.legend(loc='best') plt.savefig('%s.png' % title) plt.close()
def check_HDF5(size=64): """ Plot images with landmarks to check the processing """ # Get hdf5 file hdf5_file = os.path.join(data_dir, "CelebA_%s_data.h5" % size) with h5py.File(hdf5_file, "r") as hf: data_color = hf["training_color_data"] data_lab = hf["training_lab_data"] data_black = hf["training_black_data"] for i in range(data_color.shape[0]): fig = plt.figure() gs = gridspec.GridSpec(3, 1) for k in range(3): ax = plt.subplot(gs[k]) if k == 0: img = data_color[i, :, :, :].transpose(1,2,0) ax.imshow(img) elif k == 1: img = data_lab[i, :, :, :].transpose(1,2,0) img = color.lab2rgb(img) ax.imshow(img) elif k == 2: img = data_black[i, 0, :, :] / 255. ax.imshow(img, cmap="gray") gs.tight_layout(fig) plt.show() plt.clf() plt.close()
def test_PhysicalNodeLayout(self): # # Graph: Physical Layout of Nodes required_files = ["s1_layout." + img_extn] # for f in required_files: try: os.remove(f) except: pass figsize = cb.latexify(columns=_texcol, factor=_texfac) base_config = aietes.Simulation.populate_config( aietes.Tools.get_config('bella_static.conf'), retain_default=True ) texify = lambda t: "${0}_{1}$".format(t[0], t[1]) node_positions = {texify(k): np.asarray(v['initial_position'], dtype=float) for k, v in base_config['Node']['Nodes'].items() if 'initial_position' in v} node_links = {0: [1, 2, 3], 1: [0, 1, 2, 3, 4, 5], 2: [0, 1, 5], 3: [0, 1, 4], 4: [1, 3, 5], 5: [1, 2, 4]} fig = cb.plot_nodes(node_positions, figsize=figsize, node_links=node_links, radius=0, scalefree=True, square=True) fig.tight_layout(pad=0.3) savefig(fig,"s1_layout", transparent=True) plt.close(fig) for f in required_files: self.assertTrue(os.path.isfile(f)) self.generated_files.append(f)
def test_ThroughputLines(self): # # Plot Throughput Lines required_files = [ "throughput_sep_lines_static." + img_extn, "throughput_sep_lines_all_mobile." + img_extn ] for f in required_files: try: os.remove(f) except: pass cb.latexify(columns=_texcol, factor=_texfac) for mobility in mobilities: df = get_mobility_stats(mobility) fig = plt.figure(facecolor='white') ax = fig.add_subplot(1, 1, 1) for (k, g), ls in zip(df.groupby('separation'), itertools.cycle(["-", "--", "-.", ":"])): ax.plot(g.rate, g.throughput, label=k, linestyle=ls) ax.legend(loc="upper left") ax.set_xlabel("Packet Emission Rate (pps)") ax.set_ylabel("Avg. Throughput (bps)") fig.tight_layout() savefig(fig,"throughput_sep_lines_{0}".format( mobility), transparent=True, facecolor='white') plt.close(fig) for f in required_files: self.assertTrue(os.path.isfile(f)) self.generated_files.append(f)
def plot_df(self,show=False): from matplotlib import pylab as plt if self.afp is None: print 'afp not initilized. call update afp' return -1 linecords,td,df,rtn,minmaxy = self.afp formatter = PlotDateFormatter(df.index) #fig = plt.figure() #ax = plt.addsubplot() fig, ax = plt.subplots() ax.xaxis.set_major_formatter(formatter) ax.plot(np.arange(len(df)), df['p']) for cord in linecords: plt.plot(cord[0],cord[1],color='red') fig.autofmt_xdate() plt.xlim(-10,len(df.index) + 10) plt.ylim(df.p.min() - 10,df.p.max() + 10) plt.grid(ax) #if show: # plt.show() #"{0}{1}.png".format("./data/",datetime.datetime.strftime(datetime.datetime.now(),'%Y%M%m%S')) if self.plot_file: save_path = self.plot_file.format(self.symbol) if os.path.exists(os.path.dirname(save_path)): plt.savefig(save_path) plt.clf() plt.close()
def plot_ea(frame1, filt_df, dst_path, uplift_rate, riv_case): f = plt.figure() ax = filt_df.plot(x='ApatiteHeAge', y='Elevation', style='o-', ax=f.gca()) plt.title('Age-Elevation') plt.xlabel('ApatiteHeAge [Ma]') plt.ylabel('Elevation [Km]') #tread line sup_age, slope, r_square = find_max_treadline(filt_df, uplift_rate * np.sin(np.deg2rad(60)), riv_case) x = filt_df[filt_df['ApatiteHeAge'] < sup_age]['ApatiteHeAge'] y = filt_df[filt_df['ApatiteHeAge'] < sup_age]['Points:2'] z = np.polyfit(x, y, 1) p = np.poly1d(z) # plt.legend(point_lables, loc='best', fontsize=10) # n = np.linspace(min(frame1[frame1['Points:2'] > min(frame1['Points:2'])]['ApatiteHeAge']), max(frame1['ApatiteHeAge']), 21) n = np.linspace(min(filt_df[filt_df['Points:2'] >= min(filt_df['Points:2'])]['ApatiteHeAge']), max(filt_df['ApatiteHeAge']), 21) plt.plot(n, p(n) - min(frame1['Points:2']),'-r') ax.text(np.mean(n), np.mean(p(n) - min(frame1['Points:2'])), 'y=%.6fx + b'%(z[0]), fontsize = 20) txs = np.linspace(np.round(min(filt_df['Elevation'])), np.ceil(max(filt_df['Elevation'])), 11) lebs = ['0'] + [str(i) for i in txs[1:]] plt.yticks(txs, list(reversed(lebs))) plt.savefig(dst_path) plt.close() return z[0]
def test_one_box(box,tree,graphics=False,callback=None):#,f): print 'box',box[0],box[1],':', s = tree.search(box) print "" print "box search:", s print "len(s):", len( s ) boxes = tree.boxes() if graphics: plt.close() gfx.show_uboxes(boxes) gfx.show_uboxes(boxes, S=s, col='r') if len(s) < ((tree.dim**tree.depth)/2): # dim^depth/2 t = tree.insert(box) if graphics: boxes = tree.boxes() gfx.show_uboxes(boxes, S=t, col='c') print 'ins:',t else: t = tree.remove(s) print 'rem:',t if graphics: gfx.show_box(box,col='g',alpha=0.5) if callback: plt.gcf().canvas.mpl_connect('button_press_event', callback) plt.show()
def boxplot(pd_df, features, colsplitfeature, cols, celsplitfeature, cels, label={ 'x': '', 'y': '', 'title': '', 'ticklabelssprecision': '' }, order=None, supress_plot=False, figure_name='figure.png', uselatex=False, scalefont=1.): """This function plot a matrix of boxplot of correlations for several properties. Parameters: ----------- features: dict. Mapping the features names (keys) and its label (values). The order of the features in the figure follows the same order of the presented in this dict. colsplitfeature: str. The scatternplot matrix will contain data splited per column according to this feature values. cols: dict. Mapping the colsplitfeature feature values (keys) and its label (dict values and column labels). The columns in the figure follows the same order of this dict. celsplitfeature: str or None. The scatternplot matrix will contain data splited per cell according to this feature values. None wil desable multi plot per scattermatrix cell. cels: dict or None. If dict, it map the celsplitfeature feature value (keys) and its labels (dict values and plot labels). The order of the plots in the cells in the figure follows the same order of this dict. labels: a dict ({'x': '', 'y': '', 'title': '', ticklabelssprecision=''}) The x, y, and the title labels. figure_name: str, (figure.png). The name of the figure. uselatex: bollean, (False) If True, the figure text will copiled with latex. scalefont: float, (1.) Scale the fontsize by this factor. Return: ------- XXX A dictionary with the folloyings keys: 'corrs': np.array of floats with three dimentions. The correlations calculated with cbcomp function. If the plot were calculated: 'fig': pyplot figure. The figure. 'axis': pyplat axis. The axis. 'angular_parameter': np.array of floats with three dimentions. The angular parameters of the linear model fitting the data. If bootstrap were employed: 'alt_test', 'null_test': np.array of booleans with three dimentions. The result of the hypothesis test, H1 and H0, respectively. 'null_test_pval': np.array of floats with three dimentions. The p-values. 'alt_test_confimax', 'alt_test_confimin': np.array of booleans with three dimentions. Confidence maximum and minimun. """ print("Initializing scatter plot") # If were requested, latex will be employed to build the figure. #if uselatex: rc('text', usetex=True) rcParams['text.latex.preamble'] = [ r'\usepackage[version=4]{mhchem} \usepackage{amsmath}' r'\usepackage{amsfonts} \usepackage{mathtools}' r'\usepackage[T1]{fontenc} \boldmath' ] rcParams['axes.titleweight'] = 'bold' # Features: if not isinstance(features, dict): print("Error: features should be a dictionary!") sys.exit(1) checkmissingkeys( list(features.keys()), pd_df.columns.to_list(), "the " "pandas.DataFrame does not present the following " "features") # Cells: # if there only one plot per cell, a fake dimension will be crated if celsplitfeature is None: celsplitfeature = 'fake_celsplitfeature' pd_df[celsplitfeature] = np.ones(len(pd_df)) cels = {1: ''} if colsplitfeature is None: colsplitfeature = 'fake_colsplitfeature' pd_df[colsplitfeature] = np.ones(len(pd_df)) cols = {1: ''} grouped = pd_df.groupby([colsplitfeature, celsplitfeature]) depth = len(cels) height = len(features) width = len(cols) print('depth:', depth, 'height:', height, 'width:', width) # creating empth plot! plt.close('all') figwidth = int((width * 1.) * 2) figheight = int((height * 1.) * 2) fig, axis = plt.subplots(nrows=height, ncols=width, sharex='col', sharey='row', figsize=(figwidth, figheight)) # label sizes: axis_title_font_size = 30 * scalefont axis_label_font_size = 25 * scalefont tick_label_font_size = 25 * scalefont anotation_font_size = 20 marker_size = 50 # Symbols/markers, lines, slines = ['-', '--', ':', '-.'] scolors = ['y', 'g', 'm', 'c', 'b', 'r'] smarker = ['o', 's', 'D', '^', '*', 'o', 's', 'x', 'D', '+', '^', 'v', '>'] # Adding the scatterplos and linear models size = len(pd_df) nouts = np.zeros((height, width, depth)) ndata = np.zeros((height, width, depth)) #print(nouts) for indf, feature in enumerate(list(features.keys())): for colindex, colvals in enumerate(list(cols.keys())): if len(cels) > 1: # plot boxplot = sns.boxplot( x=celsplitfeature, y=feature, data=pd_df[pd_df[colsplitfeature] == colvals], orient='v', order=order, ax=axis[indf, colindex]) # info for celindex, celvals in enumerate(list(cels.keys())): aux = np.logical_and( np.array(pd_df[colsplitfeature].to_numpy()) == colvals, np.array(pd_df[celsplitfeature].to_numpy()) == celvals) aux2 = pd_df[aux] data = np.array(aux2[feature].to_numpy()) output = boxplot_info(data) ndata[indf, colindex, celindex] = output[-2] nouts[indf, colindex, celindex] = output[-1] else: # plot boxplot = sns.boxplot( x=feature, data=pd_df[pd_df[colsplitfeature] == colvals], orient='v', order=order, ax=axis[indf, colindex]) # info data = np.array(pd_df[pd_df[colsplitfeature] == colvals] [feature].to_numpy()) output = boxplot_info(data) ndata[indf, colindex, 0] = output[-2] nouts[indf, colindex, 0] = output[-1] # removind individual plots labels boxplot.set_xlabel('') boxplot.set_ylabel('') #print # per feature for indf, feature in enumerate(list(features.keys())): print("{}: {:.3f}%".format( features[feature], 100 * np.sum(nouts[indf, :, :]) / np.sum(ndata[indf, :, :]))) # per column for colindex, colvals in enumerate(list(cols.keys())): print("{}: {:.3f}%".format( cols[colvals], 100 * np.sum(nouts[:, colindex, :]) / np.sum(ndata[:, colindex, :]))) # per cell if len(cels) > 1: for celindex, celvals in enumerate(list(cels.keys())): print("{}: {:.3f}%".format( cels[celvals], 100 * np.sum(nouts[:, :, celindex]) / np.sum(ndata[:, :, celindex]))) # total print('Total: {:.3f}%'.format(100 * np.sum(nouts) / np.sum(ndata))) # adding labels for indf, feature in enumerate(list(features.keys())): for colindex, colvals in enumerate(list(cols.keys())): if True: axis[indf, colindex].xaxis.set_tick_params(direction='in', length=5, width=0.9) axis[indf, colindex].yaxis.set_tick_params(direction='in', length=5, width=0.9) # Ajuste do alinhamento dos labels, quantidade de casa deciamais, axis[0, colindex].xaxis.set_label_position("top") axis[0, colindex].set_xlabel(cols[colvals], va='center', ha='center', labelpad=40, size=axis_label_font_size) axis[indf, 0].set_ylabel(features[feature], va='center', ha='center', labelpad=40, size=axis_label_font_size, rotation=60) for tikslabel in axis[indf, 0].yaxis.get_ticklabels(): tikslabel.set_fontsize(tick_label_font_size) for tikslabel in axis[-1, colindex].xaxis.get_ticklabels(): tikslabel.set_fontsize(tick_label_font_size) #tikslabel.set_rotation(60) #y = label['ticklabelssprecision'][1][indf] #x = label['ticklabelssprecision'][0] #print(list(axis[indf, colindex].xaxis.get_ticklabels())) #axis[indf, colindex].xaxis.set_major_formatter(FormatStrFormatter('%0.'+str(x)+'f')) #axis[indf, colindex].yaxis.set_major_formatter(FormatStrFormatter('%0.'+str(y)+'f')) #print(list(axis[indf, colindex].xaxis.get_ticklabels())) #axis[indf, colindex].xaxis.set_major_locator(plt.MaxNLocator(3)) #axis[indf, colindex].yaxis.set_major_locator(plt.MaxNLocator(3)) #axis[indf, colindex].xaxis.set_ticklabels(axis[indf, colindex].xaxis.get_ticklabels(), {'fontweight':'bold'}) #axis[indf, colindex].yaxis.set_ticklabels(axis[indf, colindex].yaxis.get_ticklabels(), {'fontweight':'bold'}) #axis[indf, colindex].xaxis.set_major_formatter(width='bold') #axis[indf, colindex].yaxis.set_major_formatter(width='bold') plt.subplots_adjust(left=0.125, right=0.92, bottom=0.1, top=0.9, wspace=0.0, hspace=0.0) # Adicionando os principais captions da figura. fig.text(0.01, 0.524, label['y'], ha='center', rotation='vertical', size=axis_title_font_size) fig.text(0.5, 0.95, label['title'], ha='center', size=axis_title_font_size) fig.text(0.5, 0.01, label['x'], ha='center', size=axis_title_font_size) # Salvando a figura para um arquivo plt.savefig(figure_name, dpi=300) return
plt.savefig( "diagrams/TKR4p149/pathways/pathways_zm_del_{0}.png".format(base), dpi=400, bbox_inches='tight') dann.remove() lann = plt.text(simX(0.345, 0.3), simY(0.3), r'L', fontsize=14) plt.xlim([0.45, 0.55]) plt.ylim([0.25, 0.25 + rt3by2 * 0.1]) plt.savefig( "diagrams/TKR4p149/pathways/pathways_zm_lav_{0}.png".format(base), dpi=400, bbox_inches='tight') plt.close() # Plot phase diagram plt.figure(1, figsize=(10, 7.5)) # inches plt.plot(XS, YS, '-k') plt.plot(X0, Y0, '-k', zorder=1) plt.axis('off') gann = plt.text(simX(0.010, 0.495), simY(0.495), r'$\gamma$', fontsize=14) dann = plt.text(simX(0.230, 0.010), simY(0.010), r'$\delta$', fontsize=14) lann = plt.text(simX(0.340, 0.275), simY(0.275), r'L', fontsize=14)
def triple_plot(cccsum, cccsum_hist, trace, threshold, save=False, savefile=''): r"""Main function to make a triple plot with a day-long seismogram, \ day-long correlation sum trace and histogram of the correlation sum to \ show normality. :type cccsum: numpy.ndarray :param cccsum: Array of the cross-channel cross-correlation sum :type cccsum_hist: numpy.ndarray :param cccsum_hist: cccsum for histogram plotting, can be the same as \ cccsum but included if cccsum is just an envelope. :type trace: obspy.Trace :param trace: A sample trace from the same time as cccsum :type threshold: float :param threshold: Detection threshold within cccsum :type save: bool, optional :param save: If True will svae and not plot to screen, vice-versa if False :type savefile: str, optional :param savefile: Path to save figure to, only required if save=True """ if len(cccsum) != len(trace.data): print('cccsum is: ' + str(len(cccsum)) + ' trace is: ' + str(len(trace.data))) msg = ' '.join( ['cccsum and trace must have the', 'same number of data points']) raise ValueError(msg) df = trace.stats.sampling_rate npts = trace.stats.npts t = np.arange(npts, dtype=np.float32) / (df * 3600) # Generate the subplot for the seismic data ax1 = plt.subplot2grid((2, 5), (0, 0), colspan=4) ax1.plot(t, trace.data, 'k') ax1.axis('tight') ax1.set_ylim( [-15 * np.mean(np.abs(trace.data)), 15 * np.mean(np.abs(trace.data))]) # Generate the subplot for the correlation sum data ax2 = plt.subplot2grid((2, 5), (1, 0), colspan=4, sharex=ax1) # Plot the threshold values ax2.plot([min(t), max(t)], [threshold, threshold], color='r', lw=1, label="Threshold") ax2.plot([min(t), max(t)], [-threshold, -threshold], color='r', lw=1) ax2.plot(t, cccsum, 'k') ax2.axis('tight') ax2.set_ylim([-1.7 * threshold, 1.7 * threshold]) ax2.set_xlabel("Time after %s [hr]" % trace.stats.starttime.isoformat()) # ax2.legend() # Generate a small subplot for the histogram of the cccsum data ax3 = plt.subplot2grid((2, 5), (1, 4), sharey=ax2) ax3.hist(cccsum_hist, 200, normed=1, histtype='stepfilled', orientation='horizontal', color='black') ax3.set_ylim([-5, 5]) fig = plt.gcf() fig.suptitle(trace.id) fig.canvas.draw() if not save: plt.show() plt.close() else: plt.savefig(savefile) return
def NR_plot(stream, NR_stream, detections, false_detections=False, size=(18.5, 10), save=False, title=False): r"""Function to plot the Network response alongside the streams used -\ highlights detection times in the network response. :type stream: :class: obspy.Stream :param stream: Stream to plot :type NR_stream: :class: obspy.Stream :param NR_stream: Stream for the network response :type detections: list of datetime objects :param detections: List of the detections :type false_detections: list of datetime :param false_detections: Either False (default) or list of false detection\ times :type size: tuple :param size: Size of figure, default is (18.5,10) :type save: bool :param save: Save figure or plot to screen, if not False, must be string\ of save path. :type title: str :param title: String for the title of the plot, set to False """ import datetime as dt import matplotlib.dates as mdates fig, axes = plt.subplots(len(stream) + 1, 1, sharex=True, figsize=size) if len(stream) > 1: axes = axes.ravel() else: return mintime = stream.sort(['starttime'])[0].stats.starttime stream.sort(['network', 'station', 'starttime']) for i, tr in enumerate(stream): delay = tr.stats.starttime - mintime delay *= tr.stats.sampling_rate y = tr.data x = [ tr.stats.starttime + dt.timedelta(seconds=s / tr.stats.sampling_rate) for s in xrange(len(y)) ] x = mdates.date2num(x) axes[i].plot(x, y, 'k', linewidth=1.1) axes[i].set_ylabel(tr.stats.station + '.' + tr.stats.channel, rotation=0) axes[i].yaxis.set_ticks([]) axes[i].set_xlim(x[0], x[-1]) # Plot the network response tr = NR_stream[0] delay = tr.stats.starttime - mintime delay *= tr.stats.sampling_rate y = tr.data x = [ tr.stats.starttime + dt.timedelta(seconds=s / tr.stats.sampling_rate) for s in range(len(y)) ] x = mdates.date2num(x) axes[i].plot(x, y, 'k', linewidth=1.1) axes[i].set_ylabel(tr.stats.station + '.' + tr.stats.channel, rotation=0) axes[i].yaxis.set_ticks([]) axes[-1].set_xlabel('Time') axes[-1].set_xlim(x[0], x[-1]) # Plot the detections! ymin, ymax = axes[-1].get_ylim() if false_detections: for detection in false_detections: xd = mdates.date2num(detection) axes[-1].plot((xd, xd), (ymin, ymax), 'k--', linewidth=0.5, alpha=0.5) for detection in detections: xd = mdates.date2num(detection) axes[-1].plot((xd, xd), (ymin, ymax), 'r--', linewidth=0.75) # Set formatters for x-labels mins = mdates.MinuteLocator() timedif = tr.stats.endtime.datetime - tr.stats.starttime.datetime if timedif.total_seconds() >= 10800 and timedif.total_seconds() <= 25200: hours = mdates.MinuteLocator(byminute=[0, 15, 30, 45]) elif timedif.total_seconds() <= 1200: hours = mdates.MinuteLocator(byminute=range(0, 60, 2)) elif timedif.total_seconds > 25200 and timedif.total_seconds() <= 172800: hours = mdates.HourLocator(byhour=range(0, 24, 3)) elif timedif.total_seconds() > 172800: hours = mdates.DayLocator() else: hours = mdates.MinuteLocator(byminute=range(0, 60, 5)) hrFMT = mdates.DateFormatter('%Y/%m/%d %H:%M:%S') axes[-1].xaxis.set_major_locator(hours) axes[-1].xaxis.set_major_formatter(hrFMT) axes[-1].xaxis.set_minor_locator(mins) plt.gcf().autofmt_xdate() axes[-1].fmt_xdata = mdates.DateFormatter('%Y/%m/%d %H:%M:%S') plt.subplots_adjust(hspace=0) if title: axes[0].set_title(title) if not save: plt.show() plt.close() else: plt.savefig(save) return
def pretty_template_plot(template, size=(18.5, 10.5), save=False, title=False, background=False, picks=False): r"""Function to make a pretty plot of a single template, designed to work \ better than the default obspy plotting routine for short data lengths. :type template: :class: obspy.Stream :param template: Template stream to plot :type size: tuple :param size: tuple of plot size :type save: bool :param save: if False will plot to screen, if True will save :type title: bool :param title: String if set will be the plot title :type background: :class: obspy.stream :param background: Stream to plot the template within. :type picks: list of :class: eqcorrscan.utils.Sfile_util.PICK :param picks: List of eqcorrscan type picks. """ fig, axes = plt.subplots(len(template), 1, sharex=True, figsize=size) if len(template) > 1: axes = axes.ravel() else: return if not background: mintime = template.sort(['starttime'])[0].stats.starttime else: mintime = background.sort(['starttime'])[0].stats.starttime template.sort(['network', 'station', 'starttime']) lengths = [] for i, tr in enumerate(template): delay = tr.stats.starttime - mintime y = tr.data x = np.linspace(0, len(y) * tr.stats.delta, len(y)) x += delay if background: btr = background.select(station=tr.stats.station, channel=tr.stats.channel)[0] bdelay = btr.stats.starttime - mintime by = btr.data bx = np.linspace(0, len(by) * btr.stats.delta, len(by)) bx += bdelay axes[i].plot(bx, by, 'k', linewidth=1) axes[i].plot(x, y, 'r', linewidth=1.1) lengths.append(max(bx[-1], x[-1])) else: axes[i].plot(x, y, 'k', linewidth=1.1) lengths.append(x[-1]) # print(' '.join([tr.stats.station, str(len(x)), str(len(y))])) axes[i].set_ylabel('.'.join([tr.stats.station, tr.stats.channel]), rotation=0, horizontalalignment='right') axes[i].yaxis.set_ticks([]) # Plot the picks if they are given if picks: tr_picks = [ pick for pick in picks if pick.station == tr.stats.station and pick.channel[0] + pick.channel[-1] == tr.stats.channel[0] + tr.stats.channel[-1] ] for pick in tr_picks: if pick.phase == 'P': pcolor = 'red' elif pick.phase == 'S': pcolor = 'blue' else: pcolor = 'k' pdelay = pick.time - mintime # print(pdelay) axes[i].axvline(x=pdelay, color=pcolor, linewidth=2) # axes[i].plot([pdelay, pdelay], []) axes[i].set_xlim([0, max(lengths)]) axes[len(template) - 1].set_xlabel('Time (s) from start of template') plt.subplots_adjust(hspace=0, left=0.175, right=0.95, bottom=0.07) if title: axes[0].set_title(title) else: plt.subplots_adjust(top=0.98) if not save: plt.show() plt.close() else: plt.savefig(save)
def quit(self): ''' Have to explicitly call quit, in order to close all of the plot windows. ''' close('all') exit()
""" import numpy as np from numpy import pi as pi import matplotlib.pylab as plt from time import clock from CREATE_IDEAL_MESH import * from Meshing_Tools import * from VISUAL_TRI import * from IC_conditions import * from HLLC_FLUX import * from HLLC_SOLVER_TRI import * plt.close('all') # ----------------------------------------------------------- # case 3: INITIAL CONDITIONS IN EACH QUADRANT # (p,d,u,v) # ----------------------------------------------------------- upper_left = [0.3, 0.5323, 1.206, 0.0] lower_left = [0.029, 0.138, 1.206, 1.206] upper_right = [1.5, 1.5, 0.0, 0.0] lower_right = [0.3, 0.5323, 0.0, 1.206] # simulation time and Courant number for stability sim_time = 0.3 # simulation time CFL = 0.1 # Courant number # -----------------------------------------------------------
def scatter_colorbar(pd_df, mainprop, features, colsplitfeature, cols, celsplitfeature, cels, show='', label={ 'x': '', 'y': '', 'title': '', 'ticklabelssprecision': '' }, cbcomp=comp_spearman, cb_info={ 'min': -1., 'max': 1., 'label': '' }, bootstrap_info={ 'n': 0, 'alpha': 0.25 }, supress_plot=False, figure_name='figure.png', uselatex=False, scalefont=1.): """This function plot a scatterplot of correlations between properties and a target property. Parameters: ----------- mainprop: str. This feature will be the horizontal cell axes, shered whichin each column. features: dict. Mapping the features names (keys) and its label (values). The order of the features in the figure follows the same order of the presented in this dict. colsplitfeature: str. The scatternplot matrix will contain data splited per column according to this feature values. cols: dict. Mapping the colsplitfeature feature values (keys) and its label (dict values and column labels). The columns in the figure follows the same order of this dict. celsplitfeature: str or None. The scatternplot matrix will contain data splited per cell according to this feature values. None wil desable multi plot per scattermatrix cell. cels: dict or None. If dict, it map the celsplitfeature feature value (keys) and its labels (dict values and plot labels). The order of the plots in the cells in the figure follows the same order of this dict. labels: a dict ({'x': '', 'y': '', 'title': '', ticklabelssprecision=''}) The x, y, and the title labels. cbcomp: function, function that compute the information (correlation/mutual info) with the paired data. cb_info: dict with three values ({'min'=-1.,'max'=1.,'label'=''}). The max and min values for the colorbar values and its label. bootstrap_info: a dict with three ({n=0,alpha=0.25,show:test,corrcut=0}). If 'n' value were larger then zero the bootstrap analysis will be performed with (under the null and alternative hypothesis) with number of bootstraped samples equal to 'n' value and a conconfidence level 'alpha'. Moreover, the it will make return pvalues and both hypothese test results. show: str ['corr','pval', 'test', 'testred', 'ang', 'confint']. Define what information will be shown in the scatterplot, if bootstrap were employed. If 'corr', the correlation value will be printed. If 'test', the information whether the correlation pass in the bootstrap hypothesis tests or not will be presented. If 'testred', the following information will be printed: * if the correlation passed in both tests, + if the correlaiton passed in one test, and nothing if the correlation fail for both tests. If 'pval', the p-value of the correlation bootstrap under null hypothesis test will be shown. If 'confint', the confidence interval for the correlations will be show. If 'ang', the angular value of the linear regression will be show. If '', nothing will be show. figure_name: str, (figure.png). The name of the figure. uselatex: bollean, (False) If True, the figure text will copiled with latex. scalefont: float, (1.) Scale the fontsize by this factor. Return: ------- A dictionary with the folloyings keys: 'corrs': np.array of floats with three dimentions. The correlations calculated with cbcomp function. If the plot were calculated: 'fig': pyplot figure. The figure. 'axis': pyplat axis. The axis. 'angular_parameter': np.array of floats with three dimentions. The angular parameters of the linear model fitting the data. If bootstrap were employed: 'alt_test', 'null_test': np.array of booleans with three dimentions. The result of the hypothesis test, H1 and H0, respectively. 'null_test_pval': np.array of floats with three dimentions. The p-values. 'alt_test_confimax', 'alt_test_confimin': np.array of booleans with three dimentions. Confidence maximum and minimun. """ print("Initializing scatter plot") # If were requested, latex will be employed to build the figure. if uselatex: rc('text', usetex=True) rcParams[ 'text.latex.preamble'] = r'\usepackage[version=4]{mhchem} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{mathtools} \usepackage[T1]{fontenc}' # \boldmath' #rcParams['axes.titleweight'] = 'bold' # Features: if not isinstance(features, dict): print("Error: features should be a dictionary!") sys.exit(1) checkmissingkeys( list(features.keys()), pd_df.columns.to_list(), "the " "pandas.DataFrame does not present the following " "features") # Cells: # if there only one plot per cell, a fake dimension will be crated if celsplitfeature is None: celsplitfeature = 'fake_celsplitfeature' pd_df[celsplitfeature] = np.ones(len(pd_df)) cels = {1: ''} if colsplitfeature is None: colsplitfeature = 'fake_colsplitfeature' pd_df[colsplitfeature] = np.ones(len(pd_df)) cols = {1: ''} grouped = pd_df.groupby([colsplitfeature, celsplitfeature]) depth = len(cels) height = len(features) width = len(cols) print('depth:', depth, 'height:', height, 'width:', width) # Calcalationg the property to be ploted in colors corrs_plot = np.zeros([depth, height, width]) test_apply = np.zeros([depth, height, width], dtype=bool) for findex, feature in enumerate(list(features.keys())): for colindex, colvals in enumerate(list(cols.keys())): for celindex, celvals in enumerate(list(cels.keys())): try: group = grouped.get_group((colvals, celvals)) except KeyError: continue datax, datay = tonparray(group[mainprop], group[feature]) if (len(datax) > 1) and (not np.all(datax == datax[0])) and ( not np.all(datay == datay[0])): test_apply[celindex, findex, colindex] = True corrs_plot[celindex, findex, colindex] = cbcomp(datax, datay) else: corrs_plot[celindex, findex, colindex] = 0 corrs_plot = np.nan_to_num(corrs_plot) # Correlation Bootstrap if bootstrap_info['n']: # bootstraping the correlations print('Bootstrap analysis') null_test = np.zeros([depth, height, width], dtype=bool) null_test_pval = np.zeros([depth, height, width], dtype=float) alt_test = np.zeros([depth, height, width], dtype=bool) alt_test_confimax = np.zeros([depth, height, width], dtype=float) alt_test_confimin = np.zeros([depth, height, width], dtype=float) for findex, feature in enumerate(list(features.keys())): for colindex, colvals in enumerate(list(cols.keys())): for celindex, celvals in enumerate(list(cels.keys())): group = grouped.get_group((colvals, celvals)) datax, datay = tonparray(group[mainprop], group[feature]) if test_apply[celindex, findex, colindex]: # null hypothesisi test, pval = bstnull(datay, datax, corr_method=cbcomp, nresamp=bootstrap_info['n'], alpha=bootstrap_info['alpha']) null_test[celindex, findex, colindex] = test null_test_pval[celindex, findex, colindex] = pval # alternative hypothesis test, confi = bstalt(datay, datax, corr_method=cbcomp, nresamp=bootstrap_info['n'], alpha=bootstrap_info['alpha']) alt_test[celindex, findex, colindex] = test alt_test_confimax[celindex, findex, colindex] = confi[1] alt_test_confimin[celindex, findex, colindex] = confi[0] print("completed: ", findex + 1, ' of ', len(features)) # printing latex table if False: #TODO correct this if statement np.set_printoptions(formatter={'float': '{: 0.2f}'.format}) s = ' & ' sup = np.array(np.round(alt_test_confimax, 2), str) inf = np.array(np.round(alt_test_confimin, 2), str) r = np.vectorize(lambda t1, t2: t1 + ',' + t2)(sup, inf) if r.shape[0] > 1: firstrow = s + s for colindex, colvals in enumerate(list(cols.keys())): firstrow += cols[colvals] + s firstrow += ' \\\\' print(firstrow) for indf, feature in enumerate(list(features.keys())): for celindex, celvals in enumerate(list(cels.keys())): rowtotalsup = features[feature] + s rowtotalinf = s rowtotalsup += cels[celvals] + s + 'sup' + s rowtotalinf += s + 'inf' + s for colindex, colvals in enumerate(list(cols.keys())): rowtotalsup += sup[celindex, indf, colindex] + s rowtotalinf += inf[celindex, indf, colindex] + s print(rowtotalsup + ' \\\\') print(rowtotalinf + ' \\\\') else: firstrow = s for colindex, colvals in enumerate(list(cols.keys())): firstrow += cols[colvals] + s + s firstrow += ' \\\\' print(firstrow) for indf, feature in enumerate(list(features.keys())): rowsup = features[feature] + s + 'sup' + s rowinf = s + 'inf' + s for colindex, colvals in enumerate(list(cols.keys())): celtotalsup = sup[0, indf, colindex] celtotalinf = inf[0, indf, colindex] rowsup += celtotalsup + s rowinf += celtotalinf + s print(rowsup + ' \\\\') print(rowinf + ' \\\\') if True: #TODO correct this if statement... np.set_printoptions(formatter={'float': '{: 0.2f}'.format}) s = ' & ' r = np.array(np.round(corrs_plot, 2), str) corr = show if corrs_plot.shape[0] > 1: firstrow = "Features" + s + s for colindex, colvals in enumerate(list(cols.keys())): firstrow += cols[colvals] + s firstrow += ' \\\\' print(firstrow) for indf, feature in enumerate(list(features.keys())): for celindex, celvals in enumerate(list(cels.keys())): rowtotal = features[feature] + s + cels[ celvals] + s + corr + s for colindex, colvals in enumerate(list(cols.keys())): rowtotal += r[celindex, indf, colindex] + s print(rowtotal + ' \\\\') else: firstrow = "Features" + s for colindex, colvals in enumerate(list(cols.keys())): firstrow += cols[colvals] + s firstrow += ' \\\\' print(firstrow) for indf, feature in enumerate(list(features.keys())): row = features[feature] + s + corr + s for colindex, colvals in enumerate(list(cols.keys())): celtotal = r[0, indf, colindex] row += celtotal + s print(row + ' \\\\') # If requested, the plot is supressed now if supress_plot: if bootstrap_info['n']: return { 'corrs': corrs_plot, 'alt_test': alt_test, 'alt_test_confimax': alt_test_confimax, 'alt_test_confimin': alt_test_confimin, 'null_test': null_test, 'null_test_pval': null_test_pval, 'test_apply': test_apply } return {'corrs': corrs_plot, 'test_apply': test_apply} # creating empth plot! plt.close('all') figwidth = int((width * 1.) * 2) figheight = int((height * 1.) * 2) fig, axis = plt.subplots(nrows=height, ncols=width, sharex='col', sharey='row', figsize=(figwidth, figheight)) # if width or height == 1 (means 1 column, or 1 row) axis has only one # dimension, which make some problems, so it must be reshaped if height == 1 or width == 1: axis = axis.reshape(height, width) # Creatin the colormap and mapping the correlation values in the colors cmap = matplotlib.cm.get_cmap('coolwarm') normalize = matplotlib.colors.Normalize(vmin=cb_info['min'], vmax=cb_info['max']) colors = [cmap(normalize(value)) for value in corrs_plot] colors = np.array(colors) # label sizes: axis_title_font_size = 30 * scalefont axis_label_font_size = 25 * scalefont tick_label_font_size = 25 * scalefont anotation_font_size = 20 marker_size = 50 # Symbols/markers, lines, slines = ['-', '--', ':', '-.'] * 5 scolors = ['y', 'g', 'm', 'c', 'b', 'r'] * 5 smarker = ['o', 's', 'D', '^', '*', 'o', 's', 'x', 'D', '+', '^', 'v', '>'] # Adding the scatterplos and linear models angular_parameter = np.zeros([depth, height, width]) for indf, feature in enumerate(list(features.keys())): for colindex, colvals in enumerate(list(cols.keys())): for celindex, celvals in enumerate(list(cels.keys())): try: group = grouped.get_group((colvals, celvals)) except KeyError: continue # if ((not all(group[feature] == 0.0)) # and (not all(np.isnan(group[feature])))): datax, datay = tonparray(group[mainprop], group[feature]) if datax.tolist(): if test_apply[celindex, indf, colindex]: # Linear Regresion degreee = 1 parameters = np.polyfit(datax, datay, degreee) fit_fn = np.poly1d(parameters) angular_parameter[celindex, indf, colindex] = parameters[0] # variavel auxiliar pra nao plotar o linha obtida na # regressao alem dos dados do set (isso pode acontecer # para as variaveisb2 onde nem todos os samples # apresentam dados) yfited_values = np.array(fit_fn(datax)) argmin = np.argmin(datax) argmax = np.argmax(datax) trend_x = datax[[argmin, argmax]] trend_y = yfited_values[[argmin, argmax]] #print(colvals,celvals,len(datax),len(datay)) # plotando linha obtida com dados da regressao axis[indf, colindex].plot(trend_x, trend_y, marker=None, linestyle=slines[celindex], color='k') # plotando dados da celula axis[indf, colindex].scatter(datax, datay, marker=smarker[celindex], s=marker_size, linestyle='None', label=cels[celvals], color=colors[celindex, indf, colindex], alpha=0.8) if cels[celvals]: axis[indf, colindex].legend() axis[indf, colindex].xaxis.set_tick_params(direction='in', length=5, width=0.9) axis[indf, colindex].yaxis.set_tick_params(direction='in', length=5, width=0.9) # Ajuste do alinhamento dos labels, quantidade de casa deciamais, axis[0, colindex].xaxis.set_label_position("top") axis[0, colindex].set_xlabel(cols[colvals], va='center', ha='center', labelpad=15, size=axis_label_font_size) axis[indf, 0].set_ylabel(features[feature], va='center', ha='center', labelpad=40, size=axis_label_font_size, rotation=60) for tikslabel in axis[indf, 0].yaxis.get_ticklabels(): tikslabel.set_fontsize(tick_label_font_size) for tikslabel in axis[-1, colindex].xaxis.get_ticklabels(): tikslabel.set_fontsize(tick_label_font_size) tikslabel.set_rotation(60) #y = label['ticklabelssprecision'][1][indf] #x = label['ticklabelssprecision'][0] #print(list(axis[indf, colindex].xaxis.get_ticklabels())) #axis[indf, colindex].xaxis.set_major_formatter(FormatStrFormatter('%0.'+str(x)+'f')) #axis[indf, colindex].yaxis.set_major_formatter(FormatStrFormatter('%0.'+str(y)+'f')) #print(list(axis[indf, colindex].xaxis.get_ticklabels())) #axis[indf, colindex].xaxis.set_major_locator(plt.MaxNLocator(3)) #axis[indf, colindex].yaxis.set_major_locator(plt.MaxNLocator(3)) #print(list(axis[indf, colindex].xaxis.get_ticklabels())) #axis[indf, colindex].xaxis.set_ticklabels(axis[indf, colindex].xaxis.get_ticklabels(), {'fontweight':'bold'}) #axis[indf, colindex].yaxis.set_ticklabels(axis[indf, colindex].yaxis.get_ticklabels(), {'fontweight':'bold'}) #axis[indf, colindex].xaxis.set_major_formatter(width='bold') #axis[indf, colindex].yaxis.set_major_formatter(width='bold') # Colorbar, pra aprensetar as corres das correlacoes cax, _ = matplotlib.colorbar.make_axes(axis[0, 0], orientation='vertical', shrink=80., ancor=(2., 2.), pancor=False) cbar = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap, norm=normalize) cax.set_position([0.93, 0.1, 0.04, 0.8]) cax.set_aspect(40) # boxY/boxX cbar.ax.tick_params(labelsize=tick_label_font_size, labelrotation=90) plt.subplots_adjust(left=0.125, right=0.92, bottom=0.1, top=0.9, wspace=0.0, hspace=0.0) # Defining what will be ploted if show == 'test': # The result of the test truefalse = {True: 'T', False: 'F'} binfo_plot = np.vectorize( lambda x, y: truefalse[x] + ',' + truefalse[y])(null_test, alt_test) if show == 'testred': def auxfunc(x, y): if not x and y: return 'x' if x and not y: return 'o' return '+' binfo_plot = np.vectorize(auxfunc)(null_test, alt_test) if show == 'confint': # The confidence intervals binfo_plot = np.vectorize( lambda x, y: str(round(x, 2)) + ',' + str(round(y, 2)))( alt_test_confimax, alt_test_confimin) if show == 'pval': # the p-value binfo_plot = np.array(np.round(null_test_pval, 5), dtype=str) if show == 'ang': # the angle of the linear model binfo_plot = np.array(np.round(angular_parameter, 2), dtype=str) if bootstrap_info['n'] and show in ['pval', 'test', 'ang', 'confint']: for findex, feature in enumerate(features): for colindex, colvals in enumerate(list(cols.keys())): for celindex, celvals in enumerate(list(cels.keys())): if not test_apply[celindex, findex, colindex]: binfo_plot[celindex, findex, colindex] = '' print(binfo_plot) for indf, feature in enumerate(features): for colindex in range(width): for celindex in range(depth): if test_apply[celindex, indf, colindex]: bbox = dict(facecolor=scolors[celindex], alpha=0.1) ypos = 0.155 + (depth - celindex - 1) * 0.2 axis[indf, colindex].text(0.06, ypos, binfo_plot[celindex, indf, colindex], fontsize=anotation_font_size, transform=axis[indf, colindex].transAxes, bbox=bbox) if bootstrap_info['n'] and show in ['testred']: for findex, feature in enumerate(features): for colindex, colvals in enumerate(list(cols.keys())): for celindex, celvals in enumerate(list(cels.keys())): if not test_apply[celindex, findex, colindex]: binfo_plot[celindex, findex, colindex] = ' ' print(binfo_plot) for indf, feature in enumerate(features): for colindex in range(width): text = '' for celindex in range(depth): text += binfo_plot[celindex, indf, colindex] if celindex < depth - 1: text += ',' if np.any(test_apply[:, indf, colindex]): bbox = dict(facecolor=scolors[0], alpha=0.01) ypos = 0.155 + (depth - celindex - 1) * 0.2 axis[indf, colindex].text(0.06, ypos, text, fontsize=anotation_font_size, transform=axis[indf, colindex].transAxes, bbox=bbox) # Adicionando os principais captions da figura. fig.text(0.01, 0.524, label['y'], ha='center', rotation='vertical', size=axis_title_font_size) fig.text(0.5, 0.95, label['title'], ha='center', size=axis_title_font_size) fig.text(0.5, 0.01, label['x'], ha='center', size=axis_title_font_size) cbar.set_label(cb_info['label'], size=axis_title_font_size) print(list(axis[0, 0].yaxis.get_ticklabels())) # Salvando a figura para um arquivo plt.savefig(figure_name, dpi=300) if bootstrap_info['n']: return { 'fig': fig, 'axis': axis, 'corrs': corrs_plot, 'alt_test': alt_test, 'alt_test_confimax': alt_test_confimax, 'alt_test_confimin': alt_test_confimin, 'null_test': null_test, 'null_test_pval': null_test_pval, 'angular_parameter': angular_parameter, 'test_apply': test_apply } return { 'fig': fig, 'axis': axis, 'corrs': corrs_plot, 'angular_parameter': angular_parameter, 'test_apply': test_apply }
def plot_3d_point_cloud(x, y, z, show=True, show_axis=False, in_u_sphere=False, marker='.', s=10, alpha=.8, figsize=(2.56, 2.56), elev=10, azim=240, axis=None, title=None, filename=None, colorize=None, *args, **kwargs): if axis is None: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111, projection='3d') else: ax = axis fig = axis if title is not None: plt.title(title) if colorize is not None: cm = plt.get_cmap(colorize) col = [cm(float(i) / (x.shape[0])) for i in range(x.shape[0])] sc = ax.scatter(x, y, z, marker=marker, s=s, alpha=alpha, c=col, *args, **kwargs) else: sc = ax.scatter(x, y, z, marker=marker, s=s, alpha=alpha, *args, **kwargs) # sc = ax.scatter(x, y, z, marker=marker, s=s, alpha=alpha, *args, **kwargs) ax.view_init(elev=elev, azim=azim) if in_u_sphere: ax.set_xlim3d(-0.5, 0.5) ax.set_ylim3d(-0.5, 0.5) ax.set_zlim3d(-0.5, 0.5) else: # Multiply with 0.7 to squeeze free-space. miv = 0.7 * np.min([np.min(x), np.min(y), np.min(z)]) mav = 0.7 * np.max([np.max(x), np.max(y), np.max(z)]) ax.set_xlim(miv, mav) ax.set_ylim(miv, mav) ax.set_zlim(miv, mav) #plt.tight_layout() if not show_axis: plt.axis('off') if 'c' in kwargs: plt.colorbar(sc) if filename is not None: plt.savefig(filename) if show: plt.show() plt.close('all') return fig
def bstalt(data_x, data_y, corr_method=comp_spearman, alpha=0.05, nresamp=2000, hist=''): """This function bootstrap the Spearaman rank correlation. The bootstraped sample configurations that present all elements equal are not considered, because the correlations can\'t be calculated. Parameters ---------- data_x, datay : numpy arrays (n,) shaped. Paired data to analyse. corr_method : a function (default = comp_spearman). A function that takes two sets of points (x,y in np arrays) and return its correlation. alpha : float. (optional, default=0.05) The confidence limit. nresamp : intiger. (optional, default=2000) The quantity of data resamples in the procedure. hist : string (optional, default=''). If hist == 'plot' a figure with data result histogram will be displayed on screem, otherwise the same figure will be saved to a file named hist + '.png'. Return ------ reject_null : boolean. True if the null hypothese could be rejected withing the confidence level. Example ------ >>> data_y = np.array([0.29210368, 0.09100691, 0.03445345, 0.1953896 , 0.09828076, 0.06194474, 0.07301951, 0.05899114, 0.05012644, 0.03095898, 0.10257979, 0.08892738, 0.05457695, 0.02178669, 0.0326735 ]) >>> data_x = np.array([-4.38 , -3.9418, -4.0413, -4.1549, -4.2052, -3.915 , -4.1796, -4.1815, -3.972 , -4.0494, -4.2255, -4.2772, -3.9947, -3.9589, -3.8393]) >>> bootstraping_spearmancorr(data_x, data_y) True """ rs = corr_method(data_x, data_y) # original data correlation data = np.zeros(nresamp) # the data will be stored in this variable possible_data = np.array(range(0, len(data_x))) # auxiliar variable interation = 0 # auxiliar variable while interation < nresamp: # resampling pairs with replacement: resampled_pairs = resample(possible_data) resampled_data_x = data_x[resampled_pairs] resampled_data_y = data_y[resampled_pairs] # to guarantee that the elements are not all equal if np.all(resampled_data_x == resampled_data_x[0]): continue if np.all(resampled_data_y == resampled_data_y[0]): continue # calculating correlation for the resampled data boostrapted_corr = corr_method(resampled_data_x, resampled_data_y) # storing the correlacao data[interation] = boostrapted_corr interation += 1 # Sorting data data.sort() index_lower_confidence = int(round(nresamp * alpha / 2.)) index_upper_confidence = int(round(nresamp - nresamp * alpha / 2.)) confidence_data = data[index_lower_confidence:index_upper_confidence] confidence_interval = [confidence_data[0], confidence_data[-1]] # H0 is rejected if 0 is within the confidence interval: reject_null = np.all(np.sign(confidence_data) == np.sign(rs)) if hist != '': plt.close('all') plt.ylabel('Frequancy') plt.xlabel('Correlation Coefficient') plt.xlim(-1.02, 1.02) bins = 201 ranges = (-1., 1.) plt.hist(confidence_data, bins=bins, range=ranges, label="Bootstraped Data in the CI") plt.hist(data, bins=bins, histtype='step', range=ranges, label="Bootstraped Data") plt.plot([rs, rs], [0, np.histogram(data, bins=bins, range=ranges)[0].max()], label="Sample Correlation") plt.plot([0, 0], [0, np.histogram(data, bins=bins, range=ranges)[0].max()], label="Correlation = 0") plt.legend() # ploting or saving to a file: if hist == 'plot': plt.show() else: plt.savefig(hist + ".png") return reject_null, confidence_interval
def run_one_step(self): """ """ # find the faulted node with the largest drainage area. largest_da = np.max(self._model.grid.at_node['drainage_area'][ self._model.boundary_handler['NormalFault'].faulted_nodes == True]) largest_da_ind = np.where( self._model.grid.at_node['drainage_area'] == largest_da)[0][0] start_node = self._model.grid.at_node['flow__receiver_node'][ largest_da_ind] (profile_IDs, dists_upstr) = analyze_channel_network_and_plot( self._model.grid, number_of_channels=1, starting_nodes=[start_node], create_plot=False) elevs = model.z[profile_IDs] self.relative_times.append(self._model.model_time / model.params['run_duration']) offset = np.min(elevs[0]) max_distance = np.max(dists_upstr[0][0]) self.channel_segments.append( np.array((dists_upstr[0][0], elevs[0] - offset)).T) self.xnormalized_segments.append( np.array((dists_upstr[0][0] / max_distance, elevs[0] - offset)).T) self.relative_times.append(self._model.model_time / model.params['run_duration']) colors = cm.viridis_r(self.relative_times) xmin = [xy.min(axis=0)[0] for xy in self.channel_segments] ymin = [xy.min(axis=0)[1] for xy in self.channel_segments] xmax = [xy.max(axis=0)[0] for xy in self.channel_segments] ymax = [xy.max(axis=0)[1] for xy in self.channel_segments] fs = (8, 6) fig, ax = plt.subplots(figsize=fs, dpi=300) ax.set_xlim(0, max(xmax)) ax.set_ylim(0, max(ymax)) line_segments = LineCollection(self.channel_segments, colors=colors, linewidth=0.5) ax.add_collection(line_segments) yr = str(self._model.model_time / (1e6)).zfill(4) plt.savefig('profile_' + yr + '.png') plt.close() fig, ax = plt.subplots(figsize=fs, dpi=300) ax.set_xlim(0, 1) ax.set_ylim(0, max(ymax)) line_segments = LineCollection(self.xnormalized_segments, colors=colors, linewidth=0.5) ax.add_collection(line_segments) yr = str(self._model.model_time / (1e6)).zfill(4) plt.savefig('normalized_profile_' + yr + '.png') plt.close() plt.figure() plot_channels_in_map_view(self._model.grid, profile_IDs) plt.savefig('topography_' + yr + '.png') plt.close() plt.figure() imshow_grid(model.grid, model.grid.at_node['soil__depth'], cmap='viridis', limits=(0, 15)) plt.savefig('soil_' + yr + '.png') plt.close() plt.figure() imshow_grid(self._model.grid, self._model.grid.at_node['sediment__flux'], cmap='viridis') plt.savefig('sediment_flux_' + yr + '.png') plt.close() U_eff = U_fast + U_back U_eff_slow = U_slow + U_back area = np.sort(self._model.grid.at_node['drainage_area'][ self._model.boundary_handler['NormalFault'].faulted_nodes == True]) area = area[area > 0] little_q = ( area * self._model.params['runoff_rate'])**self._model.params['m_sp'] #area_to_the_m = area ** self._model.params['m_sp'] detachment_prediction = ( (U_eff / (self._model.params['K_rock_sp'])) **(1.0 / self._model.params['n_sp']) * (1.0 / little_q)**(1.0 / self._model.params['n_sp'])) transport_prediction = ( ((U_eff * self._model.params['v_sc']) / (self._model.params['K_sed_sp'] * self._model.params['runoff_rate'])) + ((U_eff) / (self._model.params['K_sed_sp'])))**( 1.0 / self._model.params['n_sp']) * ( (1.0 / little_q)**(1.0 / self._model.params['n_sp'])) space_prediction = ( ((U_eff * self._model.params['v_sc']) * (1.0 - Ff) / (self._model.params['K_sed_sp'] * self._model.params['runoff_rate'])) + ((U_eff) / (self._model.params['K_rock_sp'])))**( 1.0 / self._model.params['n_sp']) * ( (1.0 / little_q)**(1.0 / self._model.params['n_sp'])) detachment_prediction_slow = ( (U_eff_slow / (self._model.params['K_rock_sp'])) **(1.0 / self._model.params['n_sp']) * (1.0 / little_q)**(1.0 / self._model.params['n_sp'])) transport_prediction_slow = ( ((U_eff_slow * self._model.params['v_sc']) / (self._model.params['K_sed_sp'] * self._model.params['runoff_rate'])) + ((U_eff_slow) / (self._model.params['K_sed_sp'])))**( 1.0 / self._model.params['n_sp']) * ( (1.0 / little_q)**(1.0 / self._model.params['n_sp'])) space_prediction_slow = ( ((U_eff_slow * self._model.params['v_sc']) * (1.0 - Ff) / (self._model.params['K_sed_sp'] * self._model.params['runoff_rate'])) + ((U_eff_slow) / (self._model.params['K_rock_sp'])))**( 1.0 / self._model.params['n_sp']) * ( (1.0 / little_q)**(1.0 / self._model.params['n_sp'])) # TODO need to fix space predictions here to include new soil thickness. fs = (8, 6) fig, ax = plt.subplots(figsize=fs, dpi=300) plt.plot(area, detachment_prediction, 'c', lw=5, label='Detachment Prediction') plt.plot(area, transport_prediction, 'b', label='Transport Prediction') plt.plot(area, space_prediction, 'm', label='Space Prediction') plt.plot(area, detachment_prediction_slow, 'c', lw=5, alpha=0.3) plt.plot(area, transport_prediction_slow, 'b', alpha=0.3) plt.plot(area, space_prediction_slow, 'm', alpha=0.3) plt.plot(self._model.grid.at_node['drainage_area'][ self._model.boundary_handler['NormalFault'].faulted_nodes == True], self._model.grid.at_node['topographic__steepest_slope'] [self._model.boundary_handler['NormalFault'].faulted_nodes == True], 'k.', label='Fault Block Nodes') plt.plot(self._model.grid.at_node['drainage_area'] [self._model.boundary_handler['NormalFault'].faulted_nodes == False], self._model.grid.at_node['topographic__steepest_slope'] [self._model.boundary_handler['NormalFault'].faulted_nodes == False], 'r.', label='Unfaulted Nodes') plt.plot(self._model.grid.at_node['drainage_area'][profile_IDs], self._model.grid.at_node['topographic__steepest_slope'] [profile_IDs], 'g.', label='Main Channel Nodes') plt.legend() ax.set_xscale('log') ax.set_yscale('log') plt.xlabel('log 10 Area') plt.ylabel('log 10 Slope') plt.savefig('slope_area_' + yr + '.png') plt.close()
def run_analysis(idx, fdr=0.2, **kwargs): np.random.seed(8 + idx) torch.manual_seed(8 + idx) print('Generating synthetic covariates') X = generate_synthetic_X(**kwargs) print('Getting synthetic response') z, h_true, signal_indices, coefficients = generate_synthetic_z(X, **kwargs) if h_true.sum() == 0: print('WARNING: no signals!') raise Exception() print( f'{h_true.sum()} true positives, {(~h_true).sum()} nulls, and {len(signal_indices)} nonnull features.' ) direc = 'pure-synthetic' if not os.path.exists(f'data/{direc}'): os.makedirs(f'data/{direc}') #### Two-groups empirical bayes model #### print('Stage 1: Creating blackbox 2-groups model') fdr_model = BlackBoxTwoGroupsModel(X, z, fdr=fdr, estimate_null=True) print('Training') sys.stdout.flush() results = fdr_model.train(save_dir=f'data/{direc}/twogroups', verbose=False, batch_size=None, num_folds=5, num_epochs=100) # Save the Stage 1 significant experimental outcome results h_predictions = results['predictions'] bh_preds = bh_predictions( p_value_2sided(z, mu0=fdr_model.null_dist[0], sigma0=fdr_model.null_dist[1]), fdr) #### Posterior EB knockoffs model #### print('Stage 2: Empirical Bayes knockoffs') crt = PosteriorConditionalRandomizationTester(fdr_model, fdr=fdr) # Run a CRT for each feature crt.run(verbose=False) print('Getting the predictions') crt_results = crt.predictions() t_crt = crt_results['tstats'] h_crt = crt_results['discoveries'] # Try the model-X knockoffs approach using the same knockoff samples from knockoffs import knockoff_filter knockoff_preds = np.zeros(len(crt.tstats), dtype=bool) true_tstat = ( fdr_model.posteriors * np.log(fdr_model.posteriors) + (1 - fdr_model.posteriors) * np.log(1 - fdr_model.posteriors)).mean() true_tstat -= crt.tstats_mean true_tstat /= crt.tstats_std plt.hist(true_tstat - crt.tstats, bins=30) plt.savefig('plots/knockoffs-temp.pdf', bbox_inches='tight') plt.close() knockoff_preds[knockoff_filter(true_tstat - crt_results['tstats'], fdr, offset=1.0)] = True # Save everything to file np.save(f'data/{direc}/h_predictions_{idx}.npy', h_predictions) np.save(f'data/{direc}/bh_predictions_{idx}.npy', bh_preds) np.save(f'data/{direc}/feature_predictions_{idx}.npy', h_crt) np.save(f'data/{direc}/h_priors_{idx}.npy', fdr_model.priors) np.save(f'data/{direc}/h_posteriors_{idx}.npy', fdr_model.posteriors) np.save(f'data/{direc}/z_empirical_null_{idx}.npy', fdr_model.null_dist) np.save(f'data/{direc}/z_alternative_{idx}.npy', [fdr_model.alt_dist.x, fdr_model.alt_dist.y]) np.save(f'data/{direc}/knockoff_predictions_{idx}.npy', knockoff_preds) np.save(f'data/{direc}/knockoff_empirical_null_{idx}.npy', [crt.null_dist.bins, crt.null_dist.w]) np.save(f'data/{direc}/knockoff_alternative_{idx}.npy', [crt.alt_dist.bins, crt.alt_dist.w]) np.save(f'data/{direc}/knockoff_prior_{idx}.npy', [crt.pi0]) np.save(f'data/{direc}/knockoff_posteriors_{idx}.npy', crt.posteriors) # Save the ground truth np.save(f'data/{direc}/h_true_{idx}.npy', h_true) np.save(f'data/{direc}/signal_indices_{idx}.npy', signal_indices) np.save(f'data/{direc}/signal_coefs_{idx}.npy', coefficients) # Report results tpp1_debt = (h_true & h_predictions).sum() / max(1, h_true.sum()) tpp1_bh = (h_true & bh_preds).sum() / max(1, h_true.sum()) tpp2_debt = len([s for s in signal_indices if h_crt[s] ]) / len(signal_indices) tpp2_knockoffs = len([s for s in signal_indices if knockoff_preds[s] ]) / len(signal_indices) fdp1_debt = ((~h_true) & h_predictions).sum() / max(1, h_predictions.sum()) fdp1_bh = ((~h_true) & bh_preds).sum() / max(1, bh_preds.sum()) fdp2_debt = len([ s for s, h in enumerate(h_crt) if h and s not in signal_indices ]) / max(1, h_crt.sum()) fdp2_knockoffs = len([ s for s, h in enumerate(knockoff_preds) if h and s not in signal_indices ]) / max(1, knockoff_preds.sum()) print('') print( f'DEBT discoveries: {h_predictions.sum()} TPP: {tpp1_debt*100:.2f}% FDP: {fdp1_debt*100:.2f}%' ) print( f'BH discoveries: {bh_preds.sum()} TPP: {tpp1_bh*100:.2f}% FDP: {fdp1_bh*100:.2f}%' ) print( f'DEBT features: {h_crt.sum()} TPP: {tpp2_debt*100:.2f}% FDP: {fdp2_debt*100:.2f}%' ) print( f'Knockoffs features: {knockoff_preds.sum()} TPP: {tpp2_knockoffs*100:.2f}% FDP: {fdp2_knockoffs*100:.2f}%' )
def plot_triangle(sim_samples, gp_samples, burnin_frac=0.1, emulator=True, gp_error=False): if gp_error and emulator: label_gp = 'GP (Error)' elif emulator and not gp_error: label_gp = 'GP (Mean)' else: label_gp = 'Simulator (MOPED)' ndim = sim_samples.shape[-1] names = ["x%s" % i for i in range(ndim)] labels = [ r"\Omega_{m}", r"w_{0}", r"M_{B}", r"\delta M", r"\alpha", r"\beta" ] # for the simulator burnin = int(burnin_frac * sim_samples.shape[1]) samples_exact = sim_samples[:, burnin:, :].reshape((-1, ndim)) cut_samps = samples_exact[samples_exact[:, 0] >= 0.0, :] samples1 = MCSamples(samples=cut_samps, names=names, labels=labels, ranges={'x0': (0.0, None)}) # for the emulator burnin = int(burnin_frac * gp_samples.chain.shape[1]) samples_emu = gp_samples.chain[:, burnin:, :].reshape((-1, ndim)) cut_samps = samples_emu[samples_emu[:, 0] >= 0.0, :] samples2 = MCSamples(samples=cut_samps, names=names, labels=labels, ranges={'x0': (0.0, None)}) # setups for plotting sim_color = '#EEC591' gp_color = 'Blue' alpha_tri = 0.1 red_patch = mpatches.Patch(color=sim_color, label='Simulator', alpha=alpha_tri) gp_line = Line2D([0], [0], color=gp_color, linewidth=3, linestyle='--', label=label_gp) rec_leg = [red_patch, gp_line] contours = np.array([0.68, 0.95]) G = plots.getSubplotPlotter(subplot_size=3.5) samples1.updateSettings({'contours': [0.68, 0.95]}) G.triangle_plot([samples1], filled=True, line_args={ 'lw': 3, 'color': sim_color }, contour_colors=[sim_color]) G.settings.num_plot_contours = 2 plt.legend(handles=rec_leg, loc='best', prop={'size': 25}, bbox_to_anchor=(0.7, 6.0), borderaxespad=0.) G.settings.alpha_filled_add = alpha_tri for i in range(0, 6): for j in range(0, i + 1): if i != j: ax = G.subplots[i, j] a, b = G.get_param_array(samples2, ['x' + str(j), 'x' + str(i)]) density = G.sample_analyser.get_density_grid(samples2, a, b) density.contours = density.getContourLevels(contours) contour_levels = density.contours ax.contour(density.x, density.y, density.P, sorted(contour_levels), colors=gp_color, linewidths=3, linestyles='--') ax.tick_params(labelsize=20) ax.yaxis.label.set_size(20) ax.xaxis.label.set_size(20) else: ax = G.subplots[i, j] dense = samples2.get1DDensity('x' + str(i)) dense.normalize(by='max') ax.plot(dense.x, dense.P, lw=3, c=gp_color, linestyle='--') ax.tick_params(labelsize=20) ax.yaxis.label.set_size(20) ax.xaxis.label.set_size(20) plt.savefig('images/triangle_plot.jpg', bbox_inches='tight', transparent=False) plt.close()
def kohonen(): """Example for using create_data, plot_data and som_step. """ plb.close('all') dim = 28 * 28 data_range = 255.0 # load in data and labels data = np.array(np.loadtxt('data.txt')) labels = np.loadtxt('labels.txt') # select 4 digits name = 'Seth Vanderwilt' # REPLACE BY YOUR OWN NAME targetdigits = name2digits( name) # assign the four digits that should be used print(targetdigits) # output the digits that were selected # this selects all data vectors that corresponds to one of the four digits data = data[np.logical_or.reduce([labels == x for x in targetdigits]), :] # get the label for each data vector corresponding to one of the four digits labels = labels[np.logical_or.reduce([labels == x for x in targetdigits])] dy, dx = data.shape #set the size of the Kohonen map. In this case it will be 6 X 6 size_k = 6 # default 6, also tried 7, 8, 10 #set the width of the neighborhood via the width of the gaussian that #describes it sigma = 3.0 # default 3, also tried 0.5, 1, 2, 5 #initialise the centers randomly centers = np.random.rand(size_k**2, dim) * data_range #build a neighborhood matrix neighbor = np.arange(size_k**2).reshape((size_k, size_k)) #set the learning rate eta = 0.5 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE #set the maximal iteration count tmax = 10000 # this might or might not work; use your own convergence criterion #tmax = 500 # this might or might not work; use your own convergence criterion #set the random order in which the datapoints should be presented i_random = np.arange(tmax) % dy np.random.shuffle(i_random) # NOTE change to false if not on question 5 decreasing_width = True print("initial sigma: " + str(sigma)) for t, i in enumerate(i_random): if decreasing_width: sigma *= 0.9999 # decrease by 0.1% som_step(centers, data[i, :], neighbor, eta, sigma) print("final sigma: " + str(sigma)) # Our centers print(centers) # NOTE assign each center the label of the nearest datapoint assigned_digit = [] for ci in range(len(centers)): nearest_datapoint = np.argmin(np.sum((centers[ci] - data[:])**2, 1)) # Get label of nearest datapoint assigned_digit.append(int(labels[nearest_datapoint])) # for visualization, you can use this: for i in range(size_k**2): ax = plb.subplot(size_k, size_k, i + 1) ax.set_title(assigned_digit[i]) plb.imshow(np.reshape(centers[i, :], [28, 28]), interpolation='bilinear') plb.axis('off') # fix display problem plb.subplots_adjust(hspace=0.5) # leave the window open at the end of the loop plb.show() plb.draw()
def shap_deep_explainer(self, model_no, num_reference, img_input, norm_reverse=True, blend_original_image=False, gif_fps=1, ranked_outputs=1, base_dir_save='/tmp/DeepExplain'): # region mini-batch because of GPU memory limitation list_shap_values = [] batch_size = self.dicts_models[model_no]['batch_size'] split_times = math.ceil(num_reference / batch_size) for i in range(split_times): #shap 0.26 #shap 0.4, check_additivity=False # shap_values_tmp1 = self.list_e[model_no][i].shap_values(img_input, ranked_outputs=ranked_outputs, # check_additivity=check_additivity) shap_values_tmp1 = self.list_e[model_no][i].shap_values( img_input, ranked_outputs=ranked_outputs, ) # shap_values ranked_outputs # [0] [0] (1,299,299,3) # [1] predict_class array shap_values_copy = copy.deepcopy(shap_values_tmp1) list_shap_values.append(shap_values_copy) for i in range(ranked_outputs): for j in range(len(list_shap_values)): if j == 0: shap_values_tmp2 = list_shap_values[0][0][i] else: shap_values_tmp2 += list_shap_values[j][0][i] shap_values_results = copy.deepcopy(list_shap_values[0]) shap_values_results[0][i] = shap_values_tmp2 / split_times # endregion # region save files str_uuid = str(uuid.uuid1()) list_classes = [] list_images = [] for i in range(ranked_outputs): predict_class = int( shap_values_results[1][0][i]) # numpy int 64 - int list_classes.append(predict_class) save_filename = os.path.join( base_dir_save, str_uuid, 'Shap_Deep_Explainer{}.jpg'.format(predict_class)) os.makedirs(os.path.dirname(save_filename), exist_ok=True) list_images.append(save_filename) pred_class_num = len(shap_values_results[0]) if blend_original_image: if norm_reverse: img_original = np.uint8(input_norm_reverse(img_input[0])) else: img_original = np.uint8(img_input[0]) img_original_file = os.path.join(os.path.dirname(list_images[0]), 'deepshap_original.jpg') cv2.imwrite(img_original_file, img_original) for i in range(pred_class_num): # predict_max_class = attributions[1][0][i] attribution1 = shap_values_results[0][i] # attributions.shape: (1, 299, 299, 3) data = attribution1[0] data = np.mean(data, -1) abs_max = np.percentile(np.abs(data), 100) abs_min = abs_max # dx, dy = 0.05, 0.05 # xx = np.arange(0.0, data1.shape[1], dx) # yy = np.arange(0.0, data1.shape[0], dy) # xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy) # extent = xmin, xmax, ymin, ymax # cmap = 'RdBu_r' # cmap = 'gray' cmap = 'seismic' plt.axis('off') # plt.imshow(data1, extent=extent, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max) # plt.imshow(data, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max) # fig = plt.gcf() # fig.set_size_inches(2.99 / 3, 2.99 / 3) # dpi = 300, output = 700*700 pixels plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) plt.margins(0, 0) if blend_original_image: plt.imshow(data, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max) save_filename1 = list_images[i] plt.savefig(save_filename1, bbox_inches='tight', pad_inches=0) plt.close() img_heatmap = cv2.imread(list_images[i]) (tmp_height, tmp_width) = img_original.shape[:-1] img_heatmap = cv2.resize(img_heatmap, (tmp_width, tmp_height)) img_heatmap_file = os.path.join( os.path.dirname(list_images[i]), 'deepshap_{0}.jpg'.format(i)) cv2.imwrite(img_heatmap_file, img_heatmap) dst = cv2.addWeighted(img_original, 0.65, img_heatmap, 0.35, 0) img_blend_file = os.path.join( os.path.dirname(list_images[i]), 'deepshap_blend_{0}.jpg'.format(i)) cv2.imwrite(img_blend_file, dst) # region create gif import imageio mg_paths = [ img_original_file, img_heatmap_file, img_blend_file ] gif_images = [] for path in mg_paths: gif_images.append(imageio.imread(path)) img_file_gif = os.path.join(os.path.dirname(list_images[i]), 'deepshap_{0}.gif'.format(i)) imageio.mimsave(img_file_gif, gif_images, fps=gif_fps) list_images[i] = img_file_gif # endregion else: plt.imshow(data, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max) save_filename1 = list_images[i] plt.savefig(save_filename1, bbox_inches='tight', pad_inches=0) plt.close() # endregion return list_classes, list_images
def mypltshow(fnfig): plt.savefig(fnfig) myshell("sleep 1;gv " + fnfig + "&") plt.close() #plt.show()
def plot_climate_cflux(cflux_data: Tuple, climate_data: np.array, time: np.array, plot_path: str = ".") -> None: """The function plots the carbon flux with climatological data Args: cflux_path (str): The path to cflux ouput file. climate_path (str): The path to climate data input file. plot_path (str): The path to the ouput graph. Returns: None: And plots in the specified folder """ av_rain = climate_data[0, :] av_temp = climate_data[1, :] av_irradiance = climate_data[2, :] #fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5,1, figsize=(12,8)) nee_arr, gpp_arr, biomass_arr, deadwood_arr = cflux_data strt = 0 end = climate_data.shape[1] end = 2000 av_rain = climate_data[0, strt:end] av_temp = climate_data[1, strt:end] av_irradiance = climate_data[2, strt:end] time = time[strt:end] # Multiplying by 100 for unit conversion nee_arr = nee_arr[strt:end] * 100 gpp_arr = gpp_arr[strt:end] * 100 biomass_arr = biomass_arr[strt:end] * 100 deadwood_arr = deadwood_arr[strt:end] * 100 if strt > 0 or end < climate_data.shape[1]: plot_path = plot_path.split('.')[0] + '_short.' + plot_path.split( '.')[1] fig, (ax1, ax3, ax4, ax5, ax6, ax7, ax8) = plt.subplots(7, 1, figsize=(16, 14)) ax1.plot(time, av_irradiance, color="darkorange") ax1.set_xlim(left=time[0], right=time[-1]) ax1.set_ylabel("Solar Irradiance [$micro mol$ $s^{-1}$ $m^{-2}$]") # ax2.plot(time, av_pet) ax3.plot(time, av_temp, color="indianred") ax3.set_xlim(left=time[0], right=time[-1]) ax3.set_ylabel(u'Temperature $[\u00B0C]$') ax4.bar(time, av_rain, color="mediumblue") ax4.set_xlim(left=time[0], right=time[-1]) ax4.set_ylabel("Precipitation [$mm$ $d^{-1}$]") # Cflux plots ax5.plot(time, nee_arr, color="lightblue") ax5.plot(time, np.mean(nee_arr, axis=1), color="k") #nee_positive = np.where(nee_extend<0, 0, nee_extend) #nee_negative = np.where(nee_extend>0, 0, nee_extend) # ax5.fill_between(time, nee_positive, color = "slateblue") # ax5.fill_between(time, nee_negative, color = "lightcoral") ax5.axhline(color="k", linestyle="--") # ax5.set_ylabel("NEE [$t_C$ $ha^{-1}$ $a^{-1}$]") ax5.set_ylabel("NEE [$g_C$ $m^{-2}$ $a^{-1}$]") ax5.set_xlabel("Time [Years]") ax5.set_xlim(left=time[0], right=time[-1]) # We are doing a custom hline here y = np.mean(np.mean(gpp_arr, axis=1)) ax6.plot(time, gpp_arr, color="lightblue") ax6.plot(time, np.mean(gpp_arr, axis=1), color="k") ax6.axhline(y=y, color="k", linestyle="--") # ax6.set_ylabel("GPP [$t_C$ $ha^{-1}$ $a^{-1}$]") ax6.set_ylabel("GPP [$g_C$ $m^{-2}$ $a^{-1}$]") ax6.set_xlabel("Time [Years]") ax6.set_xlim(left=time[0], right=time[-1]) y = np.mean(np.mean(biomass_arr, axis=1)) ax7.plot(time, biomass_arr, color="lightblue") ax7.plot(time, np.mean(biomass_arr, axis=1), color="k") ax7.axhline(y=y, color="k", linestyle="--") # ax7.set_ylabel("Cpool_Biomass [$t_C$ $ha^{-1}$]") ax7.set_ylabel("Cpool_Biomass [$g_C$ $m^{-2}$]") ax7.set_xlabel("Time [Years]") ax7.set_xlim(left=time[0], right=time[-1]) y = np.mean(np.mean(deadwood_arr, axis=1)) ax8.plot(time, deadwood_arr, color="lightblue") ax8.plot(time, np.mean(deadwood_arr, axis=1), color="k") ax8.axhline(y=y, color="k", linestyle="--") # ax8.set_ylabel("Cpool_Deadwood [$t_C$ $ha^{-1}$]") ax8.set_ylabel("Cpool_Deadwood [$g_C$ $m^{-2}$]") ax8.set_xlabel("Time [Years]") ax8.set_xlim(left=time[0], right=time[-1]) plt.tight_layout() plt.savefig(plot_path) plt.close() ### Another figure # fig, (ax1, ax2) = plt.subplots(2,1, figsize=(8,8)) # av_gpp = np.mean(gpp_arr, axis =1) # ax1.scatter(av_gpp, av_temp, color = "rebeccapurple") # ax1.set_ylabel("av_temp [..]") # ax1.set_xlabel("gpp [..]") # ax2.scatter(av_gpp, av_rain, color = "rebeccapurple") # ax2.set_ylabel("av_rainp [..]") # ax2.set_xlabel("gpp [..]") # plt.savefig('plots/scatter.png') return
def plot_3d_point_cloud(batch_img1, plot_kwargs, *args, **kwargs): ith = plot_kwargs.get('ith', 0) epoch = plot_kwargs.get('epoch', -1) show = plot_kwargs.get('show', False) show_axis = plot_kwargs.get('show_axis', True) in_u_sphere = plot_kwargs.get('in_u_sphere', False) marker = plot_kwargs.get('marker', '.') s = plot_kwargs.get('s', 8) alpha = plot_kwargs.get('alpha' , .8) figsize = plot_kwargs.get('figsize' , (5,5)) elev = plot_kwargs.get('elev' , 10) azim = plot_kwargs.get('azim' , 240) axis = plot_kwargs.get('axis' , None) title = plot_kwargs.get('title' , None) save = plot_kwargs.get('save' , True) save_dir = plot_kwargs.get('save_dir' , './') file_name = plot_kwargs.get('file_name' , None) x = batch_img1[ith][:, 0] y = batch_img1[ith][:, 1] z = batch_img1[ith][:, 2] if axis is None: fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111, projection='3d') else: ax = axis fig = axis if title is not None: plt.title(title) sc = ax.scatter(x, y, z, marker=marker, s=s, alpha=alpha, *args, **kwargs) ax.view_init(elev=elev, azim=azim) if in_u_sphere: ax.set_xlim3d(-0.5, 0.5) ax.set_ylim3d(-0.5, 0.5) ax.set_zlim3d(-0.5, 0.5) else: miv = 0.7 * np.min([np.min(x), np.min(y), np.min(z)]) # Multiply with 0.7 to squeeze free-space. mav = 0.7 * np.max([np.max(x), np.max(y), np.max(z)]) ax.set_xlim(miv, mav) ax.set_ylim(miv, mav) ax.set_zlim(miv, mav) plt.tight_layout() if not show_axis: plt.axis('off') if 'c' in kwargs: plt.colorbar(sc) if show: plt.show() if save: print(f'image saved at {save_dir}/{file_name}') plt.savefig(osp.join(save_dir, file_name)) plt.close('all') # return fig
def plot_qpi_sphere(qpi_real, qpi_sim, path=None, simtype="simulation"): """Plot QPI sphere analysis data""" fig = plt.figure(figsize=(9, 5)) px_um = qpi_real["pixel size"] * 1e6 radius_um = qpi_sim["sim radius"] * 1e6 center = qpi_sim["sim center"] index = qpi_sim["sim index"] real_phase = qpi_real.pha kw_phase = { "px_um": px_um, "cbar": True, "imtype": "phase", "vmin": real_phase.min(), "vmax": real_phase.max(), } real_inten = qpi_real.amp**2 kw_inten = { "px_um": px_um, "cbar": True, "imtype": "intensity", "vmin": real_inten.min(), "vmax": real_inten.max(), } # real phase ax1 = plt.subplot(231, title="data (phase)") plot_image(data=real_phase, ax=ax1, **kw_phase) # simulated phase ax2 = plt.subplot(232, title=simtype + " (phase)") plot_image(data=qpi_sim.pha, ax=ax2, **kw_phase) ax2.text( 0.01, .99, "index: {:.5f}\n".format(index) + "radius: {:.3f}µm".format(radius_um), horizontalalignment="left", verticalalignment="top", color="w", transform=ax2.transAxes, ) # phase residuals ax3 = plt.subplot(233, title="phase residuals") errmax = qpi_sim.pha.max() * .2 plot_image(data=qpi_sim.pha - real_phase, ax=ax3, imtype="phase error", vmax=errmax, vmin=-errmax, px_um=px_um) # real intensity ax4 = plt.subplot(234, title="data (intensity)") plot_image(data=real_inten, ax=ax4, **kw_inten) # computed intensity ax5 = plt.subplot(235) if len(simtype) > 9: # sometimes the title is too long and is printed on top of the units kw5 = {"loc": "right", "ha": "right"} else: kw5 = {} ax5.set_title(simtype + " (intensity)", **kw5) plot_image(data=qpi_sim.amp**2, ax=ax5, **kw_inten) # plot detected radius for ax in [ax1, ax2, ax4, ax5]: circ = mpl.patches.Circle( xy=((center[1] + .5) * px_um, (center[0] + .5) * px_um), radius=radius_um, facecolor="none", edgecolor="w", ls=(0, (3, 8)), lw=.5, ) ax.add_patch(circ) # line plot through center ax6 = plt.subplot(236, title="phase line plot") if int(center[0]) >= 0 and int(center[0]) < qpi_sim.shape[0]: x = np.arange(qpi_real.shape[1]) * px_um ax6.plot(x, qpi_sim.pha[int(center[0])], label=simtype) ax6.plot(x, qpi_real.pha[int(center[0])], label="data") ax6.set_xlabel("[µm]") ax6.legend(loc="center right") # remove unused labels for ax in [ax1, ax2, ax3]: ax.set_xlabel("") for ax in [ax2, ax3, ax5]: ax.set_ylabel("") plt.tight_layout(rect=(0, 0, 1, .93), pad=.1, h_pad=.6) # add identifier fig.text(x=.5, y=.99, s=qpi_sim["identifier"], verticalalignment="top", horizontalalignment="center", fontsize=14) if path: fig.savefig(path) plt.close() else: return fig
def index(self, document): inv = obspy.read_inventory(document, format="stationxml") indices = [] for network in inv: for station in network: for channel in station: if channel.response: if channel.response.instrument_sensitivity: _i = channel.response.instrument_sensitivity total_sensitivity = _i.value sensitivity_frequency = _i.frequency units_after_sensitivity = _i.input_units else: total_sensitivity = None sensitivity_frequency = None units_after_sensitivity = None else: total_sensitivity = None sensitivity_frequency = None units_after_sensitivity = None index = { # Information. "network": network.code, "network_name": network.description, "station": station.code, "station_name": station.description if station.description else station.site.name, "location": channel.location_code, "channel": channel.code, # Coordinates and orientation. "latitude": channel.latitude, "longitude": channel.longitude, "elevation_in_m": channel.elevation, "depth_in_m": channel.depth, "dip": channel.dip, "azimuth": channel.azimuth, # Dates. "start_date": str(channel.start_date), "end_date": str(channel.end_date) if channel.end_date is not None else None, # This is strictly speaking not channel level # information but needed to for a fast generation of # the station level fdsnws responses. "station_creation_date": str(station.creation_date) if station.creation_date is not None else None, # Characteristics. "sample_rate": float(channel.sample_rate), "sensor_type": channel.sensor.type if channel.sensor else None, # Some things have to be extracted from the response. "total_sensitivity": total_sensitivity, "sensitivity_frequency": sensitivity_frequency, "units_after_sensitivity": units_after_sensitivity, # Geometry for PostGIS. "geometry": [Point(channel.longitude, channel.latitude)], } try: plt.close() except: pass # Sometimes fails. Wrap in try/except. try: # Plot response. with io.BytesIO() as plot: channel.plot(min_freq=1E-3, outfile=plot) plot.seek(0) index["attachments"] = { "response": { "content-type": "image/png", "data": plot.read() } } except Exception: pass finally: try: plt.close() except: pass indices.append(index) return indices
pylab.xlabel("Data as of " + lastdate) pylab.title('Security Space Survey of\nPublic Subversion DAV Servers') # End drawing ########################################################### png = open(OUTPUT_FILE, 'w') pylab.savefig(png) png.close() os.rename(OUTPUT_FILE, OUTPUT_FILE + ".tmp.png") try: im = Image.open(OUTPUT_FILE + ".tmp.png", 'r') (width, height) = im.size print("Original size: %d x %d pixels" % (width, height)) scale = float(OUTPUT_IMAGE_WIDTH) / float(width) width = OUTPUT_IMAGE_WIDTH height = int(float(height) * scale) print("Final size: %d x %d pixels" % (width, height)) im = im.resize((width, height), Image.ANTIALIAS) im.save(OUTPUT_FILE, im.format) os.unlink(OUTPUT_FILE + ".tmp.png") except Exception, e: sys.stderr.write("Error attempting to resize the graphic: %s\n" % (str(e))) os.rename(OUTPUT_FILE + ".tmp.png", OUTPUT_FILE) raise pylab.close() if __name__ == '__main__': dates, counts = load_stats() draw_graph(dates, counts) print("Don't forget to update ../../www/svn-dav-securityspace-survey.html!")
def plot_options_greedy(self, sess, coord, saver): eigenvectors_path = os.path.join( os.path.join(self.config.stage_logdir, "models"), "eigenvectors.npy") eigenvalues_path = os.path.join( os.path.join(self.config.stage_logdir, "models"), "eigenvalues.npy") eigenvectors = np.load(eigenvectors_path) eigenvalues = np.load(eigenvalues_path) for k in ["poz", "neg"]: for option in range(len(eigenvalues)): # eigenvalue = eigenvalues[option] eigenvector = eigenvectors[ option] if k == "poz" else -eigenvectors[option] prefix = str(option) + '_' + k + "_" plt.clf() with sess.as_default(), sess.graph.as_default(): for idx in range(self.nb_states): dx = 0 dy = 0 d = False s, i, j = self.env.get_state(idx) if not self.env.not_wall(i, j): plt.gca().add_patch( patches.Rectangle( (j, self.config.input_size[0] - i - 1), # (x,y) 1.0, # width 1.0, # height facecolor="gray")) continue # Image.fromarray(np.asarray(scipy.misc.imresize(s, [512, 512], interp='nearest'), np.uint8)).show() feed_dict = {self.orig_net.observation: np.stack([s])} fi = sess.run(self.orig_net.fi, feed_dict=feed_dict)[0] transitions = [] terminations = [] for a in range(self.action_size): s1, r, d, _ = self.env.fake_step(a) feed_dict = { self.orig_net.observation: np.stack([s1]) } fi1 = sess.run(self.orig_net.fi, feed_dict=feed_dict)[0] transitions.append( self.cosine_similarity((fi1 - fi), eigenvector)) terminations.append(d) transitions.append( self.cosine_similarity(np.zeros_like(fi), eigenvector)) terminations.append(True) a = np.argmax(transitions) # if a == 4: # d = True if a == 0: # up dy = 0.35 elif a == 1: # right dx = 0.35 elif a == 2: # down dy = -0.35 elif a == 3: # left dx = -0.35 if terminations[a] or np.all( transitions[a] == np.zeros_like( fi)): # termination circle = plt.Circle( (j + 0.5, self.config.input_size[0] - i + 0.5 - 1), 0.025, color='k') plt.gca().add_artist(circle) continue plt.arrow(j + 0.5, self.config.input_size[0] - i + 0.5 - 1, dx, dy, head_width=0.05, head_length=0.05, fc='k', ec='k') plt.xlim([0, self.config.input_size[1]]) plt.ylim([0, self.config.input_size[0]]) for i in range(self.config.input_size[1]): plt.axvline(i, color='k', linestyle=':') plt.axvline(self.config.input_size[1], color='k', linestyle=':') for j in range(self.config.input_size[0]): plt.axhline(j, color='k', linestyle=':') plt.axhline(self.config.input_size[0], color='k', linestyle=':') plt.savefig( os.path.join( self.summary_path, "SuccessorFeatures_" + prefix + 'policy.png')) plt.close()
def cov_plot(self, matrix, station="", hour="", date="", averaged=""): """ Basic plot for the correlation matrix """ var = self.var_dics[self.var]['name'] fig, ax = plt.subplots() date = self.date_prettyfier(date) hour = str(hour).replace('0', '00:00').replace('1', '12:00') if not averaged: title = "Stat: " + station + ', H: ' + hour + ', Date: ' + date + ', ' + var filename = 'Cov_' + station + '_hour_' + hour.replace( ':', '') + '_date_' + str(date).replace('/', '') + '_' + var elif averaged: title = var.replace( 'temp', 'Temp.') + " , Stat: " + station + ', H: ' + str( hour) + ', Date: ' + str(date) filename = 'Cov_' + station + '_hour_' + str(hour).replace( ':', '') + '_averaged_' + str(date).replace('/', '') + '_' + var plt.title(title.replace('_', ' '), y=1.03, fontsize=self.font - 2) num = len(matrix[0, :]) Num = range(num) vmin, vmax = -3, 3 if self.var == 'direction': vmin, vmax = -10, 10 color_map = plt.imshow( matrix, interpolation='nearest', cmap='RdYlBu', vmin=vmin, vmax=vmax ) # nearest serves for discreete grid # cmaps blue, seismic plt.ylim(-0.5, 15.5) plt.xlim(-0.5, 15.5) plt.xticks(Num, Num) plt.xlabel('Pressure level an_dep [hPa]', fontsize=self.font - 2) plt.yticks(Num, Num) plt.ylabel('Pressure level fg_dep [hPa]', fontsize=self.font - 2) ax.set_xticklabels(labels=self.pretty_pressure, fontsize=self.font - 4, rotation=45) ax.set_yticklabels(labels=self.pretty_pressure, fontsize=self.font - 4) bar = plt.colorbar() bar.ax.set_ylabel("Covariance", fontsize=self.font) for i in Num: # creating text labels for j in Num: value = '{0:.2f}'.format(matrix[i, j]) text = ax.text(j, i, value, ha='center', va='center', color='black', fontsize=5) if not os.path.isdir('plots/covariances/' + station): os.mkdir('plots/covariances/' + station) plt.savefig('plots/covariances/' + station + '/' + filename + '.png', bbox_inches='tight', dpi=200) plt.close()
def extract_data(files): # find max test accuracy (i.e. test error convergence) data = np.genfromtxt(files[0], delimiter=",")[1:] # get max test index and value max_test_idx = data.argmax(axis=0)[2] with open("../Out/csv/5_max.csv", "w") as f: f.write("epoch,train accuracy,test_accuracy\n") entry = data[max_test_idx] f.write("%s,%s,%s\n" % (entry[0], entry[1], entry[2])) # process data train_data = np.delete(data, [0, 2, 3], axis=1) test_data = np.delete(data, [0, 1, 3], axis=1) # plot to nearest hundreds rounded_epoch = roundup_hundreds(max_test_idx) fig, ax = plt.subplots(figsize=(16, 8)) ax.set_ylabel("Accuracy") ax.set_xlabel("Epoch") ax.plot( range(rounded_epoch), train_data[0:rounded_epoch], label="Train Accuracy", color="#0000FF", ) ax.plot( range(rounded_epoch), test_data[0:rounded_epoch], label="Test Accuracy", color="#FF0000", ) ax.legend() fig.savefig("../Out/graph/5_epoch_" + str(rounded_epoch) + ".png") plt.close() # plot to max epoch fig, ax = plt.subplots(figsize=(16, 8)) ax.set_ylabel("Accuracy") ax.set_xlabel("Epoch") ax.plot(range(epochs), train_data, label="Train Accuracy", color="#0000FF") ax.plot(range(epochs), test_data, label="Test Accuracy", color="#FF0000") ax.legend() fig.savefig("../Out/graph/5_epoch_" + str(epochs) + ".png") plt.close() # plot to larger of 1 and 5 epoch data_1 = np.genfromtxt(files[1], delimiter=",")[1:] # process data 1 train_data_1 = np.delete(data_1, [0, 2, 3], axis=1) test_data_1 = np.delete(data_1, [0, 1, 3], axis=1) max_test_idx_1 = int(data_1.argmax(axis=0)[2]) with open("../Out/csv/4_optimal_max.csv", "w") as f: f.write("epoch,train accuracy,test_accuracy\n") entry = data_1[max_test_idx_1] f.write("%s,%s,%s\n" % (entry[0], entry[1], entry[2])) # find index of higher test accuracy convergence if data[max_test_idx][2] <= data_1[max_test_idx_1][2]: max_idx = max_test_idx_1 else: max_idx = max_test_idx rounded_epoch = roundup_hundreds(max_idx) # plot train vs train fig, ax = plt.subplots(figsize=(16, 8)) ax.set_ylabel("Accuracy") ax.set_xlabel("Epoch") ax.plot( range(rounded_epoch), train_data[0:rounded_epoch], label="Train Accuracy (4-Layer)", color="#0000FF", ) ax.plot( range(rounded_epoch), train_data_1[0:rounded_epoch], label="Train Accuracy (3-Layer)", color="#FF0000", ) ax.legend() fig.savefig("../Out/graph/5_epoch_" + str(rounded_epoch) + "_train_comparison.png") plt.close() # plot test vs test fig, ax = plt.subplots(figsize=(16, 8)) ax.set_ylabel("Accuracy") ax.set_xlabel("Epoch") ax.plot( range(rounded_epoch), test_data[0:rounded_epoch], label="Test Accuracy (4-Layer)", color="#0000FF", ) ax.plot( range(rounded_epoch), test_data_1[0:rounded_epoch], label="Test Accuracy (3-Layer)", color="#FF0000", ) ax.legend() fig.savefig("../Out/graph/5_epoch_" + str(rounded_epoch) + "_test_comparison.png") plt.close()
def plotTEx(job, tex, filterName, texSpecName='design', outputPrefix=''): """Plot TEx correlation function measurements and thresholds. Parameters ---------- job : `lsst.verify.Job` `Job` providing access to metrics, specs, and measurements tex : `lsst.verify.Measurement The ellipticity residual correlation `Measurement` object filterName : str Name of the filter of the images texSpecName : str Level of requirement to compare against. Must be a into the metrics specified in the tex Measurement object Typically one of 'design', 'minimum', 'stretch' outputPrefix : str, optional Prefix to use for filename of plot file. Effects ------- Saves an output plot file to that starts with specified outputPrefix. """ fig = plt.figure(figsize=(10, 6)) ax1 = fig.add_subplot(1, 1, 1) # Plot correlation vs. radius radius = tex.extras['radius'].quantity xip = tex.extras['xip'].quantity xip_err = tex.extras['xip_err'].quantity D = tex.extras['D'].quantity bin_range_operator = tex.extras['bin_range_operator'].quantity ax1.errorbar(radius.value, xip.value, yerr=xip_err.value) ax1.set_xscale('log') ax1.set_xlabel('Separation (arcmin)', size=19) ax1.set_ylabel('Median Residual Ellipticity Correlation', size=19) # Overlay requirements level metric_name = tex.metric_name texSpec = job.specs[Name(package=metric_name.package, metric=metric_name.metric, spec=texSpecName)] texSpecLabel = '{tex.datum.label} {specname}: {texSpec:.2g}'.format( tex=tex, texSpec=texSpec.threshold, specname=texSpecName) ax1.axhline(texSpec.threshold.value, 0, 1, linewidth=2, color='red', label=texSpecLabel) # Overlay measured KPM whether it passed or failed. if texSpec.check(tex.quantity): texStatus = 'passed' else: texStatus = 'failed' texLabelTemplate = '{tex.datum.label} measured: {tex.quantity:.2g} ({status})' texLabel = texLabelTemplate.format(tex=tex, status=texStatus) ax1.axhline(tex.quantity.value, 0, 1, linewidth=2, color='black', label=texLabel) titleTemplate = """ {metric} Residual PSF Ellipticity Correlation {bin_range_operator:s} {D.value:.1f}{D.unit:latex} """ title = titleTemplate.format(metric=tex.datum.label, bin_range_operator=bin_range_operator, D=D) ax1.set_title(title) ax1.set_xlim(0.0, 20.0) ax1.set_xlabel('{radius.label} ({unit})'.format( radius=tex.extras['radius'], unit=radius.unit._repr_latex_())) ax1.set_ylabel('Correlation') ax1.legend(loc='upper right', fontsize=16) ext = 'png' pathFormat = '{metric}_D_{D:d}_{Dunits}.{ext}' plotPath = makeFilename(outputPrefix, pathFormat, metric=tex.datum.label, D=int(D.value), Dunits=D.unit, ext=ext) plt.tight_layout() # fix padding plt.savefig(plotPath, dpi=300, ext=ext) plt.close(fig) print("Wrote plot:", plotPath)
def outliers_example(self, corr='', out='', date='', N='', lower='', upper='', median='', flag='', upper_s='', lower_s='', station='', what=''): pressure = self.pretty_pressure_dic[str(self.an_p)] var = self.var_dics[self.var]['name'] hour = str(self.hour).replace('0', '00:00').replace('1', '12:00') plt.title(var + ' ' + what + ' Outliers - Stat: ' + station + ', H: ' + hour + ', P: ' + pressure + ' [hPa]', y=1.03) corr_ = [n for n in corr if not np.isnan(n)] out_ = [n for n in out if not np.isnan(n)] num_a = '{:.1f}'.format(len(corr_) / len(out_ + corr_) * 100) num_o = '{:.1f}'.format(len(out_) / len(out_ + corr_) * 100) plt.scatter(date, corr, label='Accepted [' + num_a + '%]', color='cyan', s=3) plt.scatter(date, out, label='Outliers [' + num_o + '%]', color='black', s=3) X = [min(date), max(date)] plt.plot(X, [lower, lower], label='Lower', color='blue', ls='--') plt.plot(X, [upper, upper], label='Upper', color='red', ls='--') # adding the upper and lower values for skewed distributions plt.plot(X, [lower_s, lower_s], label='Lower Skewed', color='blue', ls='-') plt.plot(X, [upper_s, upper_s], label='Upper Skewed', color='red', ls='-') plt.plot(X, [median, median], label='Median [' + '{:.1f}'.format(median) + ']', color='black', ls='--') plt.legend(fontsize=self.font - 6, loc='upper right', ncol=2) plt.grid(linestyle=':', color='lightgray', lw=1.2) plt.ylabel('Departure ' + self.var_dics[self.var]['units'], fontsize=self.font) plt.xlabel('Date', fontsize=self.font) plt.xticks(rotation=45) out_c = [n for n in out if not np.isnan(n)] corr_c = [n for n in corr if not np.isnan(n)] plt.xlim(min(date) - 1 / 365, max(date) + 1 / 365) plt.ylim(-10, 10) plt.savefig('plots/outliers/outliers_' + flag + '_' + str(N) + '_date_' + str(min(date)) + '_hour_' + self.hour + '_' + self.var + '_anp_' + str(self.an_p) + '_fgp_' + str(self.fg_p) + '.png', bbox_inches='tight') plt.close()
def plotPA1(pa1, outputPrefix=""): """Plot the results of calculating the LSST SRC requirement PA1. Creates a file containing the plot with a filename beginning with `outputPrefix`. Parameters ---------- pa1 : `lsst.verify.Measurement` A `Measurement` of the PA1 `Metric`. outputPrefix : `str`, optional Prefix to use for filename of plot file. Will also be used in plot titles. E.g., outputPrefix='Cfht_output_r_' will result in a file named ``'Cfht_output_r_AM1_D_5_arcmin_17.0-21.5.png'`` for an ``AMx.name=='AM1'`` and ``AMx.magRange==[17, 21.5]``. """ diffRange = (-100, +100) magDiff = pa1.extras['magDiff'].quantity magMean = pa1.extras['magMean'].quantity rms = pa1.extras['rms'].quantity iqr = pa1.extras['iqr'].quantity fig = plt.figure(figsize=(18, 12)) ax1 = fig.add_subplot(1, 2, 1) ax1.scatter(magMean[0], magDiff[0], s=10, color=color['bright'], linewidth=0) # index 0 because we show only the first sample from multiple trials ax1.axhline(+rms[0].value, color=color['rms'], linewidth=3) ax1.axhline(-rms[0].value, color=color['rms'], linewidth=3) ax1.axhline(+iqr[0].value, color=color['iqr'], linewidth=3) ax1.axhline(-iqr[0].value, color=color['iqr'], linewidth=3) ax2 = fig.add_subplot(1, 2, 2, sharey=ax1) ax2.hist(magDiff[0], bins=25, range=diffRange, orientation='horizontal', histtype='stepfilled', normed=True, color=color['bright']) ax2.set_xlabel("relative # / bin") labelTemplate = r'PA1({label}) = {q.value:4.2f} {q.unit:latex}' yv = np.linspace(diffRange[0], diffRange[1], 100) ax2.plot(scipy.stats.norm.pdf(yv, scale=rms[0]), yv, marker='', linestyle='-', linewidth=3, color=color['rms'], label=labelTemplate.format(label='RMS', q=rms[0])) ax2.plot(scipy.stats.norm.pdf(yv, scale=iqr[0]), yv, marker='', linestyle='-', linewidth=3, color=color['iqr'], label=labelTemplate.format(label='IQR', q=iqr[0])) ax2.set_ylim(*diffRange) ax2.legend() ax1.set_xlabel("psf magnitude") ax1.set_ylabel(r"psf magnitude diff ({0.unit:latex})".format(magDiff)) for label in ax2.get_yticklabels(): label.set_visible(False) plt.tight_layout() # fix padding ext = 'png' pathFormat = "{name}.{ext}" plotPath = makeFilename(outputPrefix, pathFormat, name="PA1", ext=ext) plt.savefig(plotPath, format=ext) plt.close(fig) print("Wrote plot:", plotPath)
def plotAstrometryErrorModel(dataset, astromModel, outputPrefix=''): """Plot angular distance between matched sources from different exposures. Creates a file containing the plot with a filename beginning with `outputPrefix`. Parameters ---------- dataset : `lsst.verify.Blob` Blob with the multi-visit photometry model. photomModel : `lsst.verify.Blob` A `Blob` containing the analytic photometry model. outputPrefix : str, optional Prefix to use for filename of plot file. Will also be used in plot titles. E.g., ``outputPrefix='Cfht_output_r_'`` will result in a file named ``'Cfht_output_r_check_astrometry.png'``. """ bright, = np.where( dataset['snr'].quantity > astromModel['brightSnr'].quantity) dist = dataset['dist'].quantity numMatched = len(dist) dist_median = np.median(dist) bright_dist_median = np.median(dist[bright]) fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(18, 12)) ax[0].hist(dist, bins=100, color=color['all'], histtype='stepfilled', orientation='horizontal') ax[0].hist(dist[bright], bins=100, color=color['bright'], histtype='stepfilled', orientation='horizontal') ax[0].set_ylim([0., 500.]) ax[0].set_ylabel("Distance [{unit:latex}]".format(unit=dist.unit)) plotOutlinedAxline( ax[0].axhline, dist_median.value, color=color['all'], label="Median RMS: {v.value:.1f} {v.unit:latex}".format(v=dist_median)) plotOutlinedAxline( ax[0].axhline, bright_dist_median.value, color=color['bright'], label="SNR > {snr:.0f}\nMedian RMS: {v.value:.1f} {v.unit:latex}". format(snr=astromModel['brightSnr'].quantity.value, v=bright_dist_median)) ax[0].legend(loc='upper right') snr = dataset['snr'].quantity ax[1].scatter(snr, dist, s=10, color=color['all'], label='All') ax[1].scatter(snr[bright], dist[bright], s=10, color=color['bright'], label='SNR > {0:.0f}'.format( astromModel['brightSnr'].quantity.value)) ax[1].set_xlabel("SNR") ax[1].set_xscale("log") ax[1].set_ylim([0., 500.]) matchCountTemplate = '\n'.join( ['Matches:', '{nBright:d} high SNR,', '{nAll:d} total']) ax[1].text(0.6, 0.6, matchCountTemplate.format(nBright=len(bright), nAll=numMatched), transform=ax[1].transAxes, ha='left', va='baseline') w, = np.where(dist < 200 * u.marcsec) plotAstromErrModelFit(snr[w], dist[w], astromModel, ax=ax[1]) ax[1].legend(loc='upper right') ax[1].axvline(astromModel['brightSnr'].quantity, color='red', linewidth=4, linestyle='dashed') plotOutlinedAxline(ax[0].axhline, dist_median.value, color=color['all']) plotOutlinedAxline(ax[0].axhline, bright_dist_median.value, color=color['bright']) # Using title rather than suptitle because I can't get the top padding plt.suptitle("Astrometry Check : %s" % outputPrefix, fontsize=30) ext = 'png' pathFormat = "{name}.{ext}" plotPath = makeFilename(outputPrefix, pathFormat, name="check_astrometry", ext=ext) plt.savefig(plotPath, format=ext) plt.close(fig) print("Wrote plot:", plotPath)
def plotPhotometryErrorModel(dataset, photomModel, filterName='', outputPrefix=''): """Plot photometric RMS for matched sources. Parameters ---------- dataset : `lsst.verify.Blob` A `Blob` with the multi-visit photometry model. photomModel : `lsst.verify.Blob` A `Blob` hlding the analytic photometry model parameters. filterName : str, optional Name of the observed filter to use on axis labels. outputPrefix : str, optional Prefix to use for filename of plot file. Will also be used in plot titles. E.g., ``outputPrefix='Cfht_output_r_'`` will result in a file named ``'Cfht_output_r_check_photometry.png'``. """ bright, = np.where( dataset['snr'].quantity > photomModel['brightSnr'].quantity) numMatched = len(dataset['mag'].quantity) magrms = dataset['magrms'].quantity mmagRms = magrms.to(u.mmag) mmagRmsHighSnr = mmagRms[bright] magerr = dataset['magerr'].quantity mmagErr = magerr.to(u.mmag) mmagErrHighSnr = mmagErr[bright] mmagrms_median = np.median(mmagRms) bright_mmagrms_median = np.median(mmagRmsHighSnr) fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(18, 16)) ax[0][0].hist(mmagRms, bins=100, range=(0, 500), color=color['all'], histtype='stepfilled', orientation='horizontal') ax[0][0].hist(mmagRmsHighSnr, bins=100, range=(0, 500), color=color['bright'], histtype='stepfilled', orientation='horizontal') plotOutlinedAxline(ax[0][0].axhline, mmagrms_median.value, color=color['all'], label="Median RMS: {v:.1f}".format(v=mmagrms_median)) plotOutlinedAxline(ax[0][0].axhline, bright_mmagrms_median.value, color=color['bright'], label="SNR > {snr:.0f}\nMedian RMS: {v:.1f}".format( snr=photomModel['brightSnr'].quantity.value, v=bright_mmagrms_median)) ax[0][0].set_ylim([0, 500]) ax[0][0].set_ylabel("{magrms.label} [{mmagrms.unit:latex}]".format( magrms=dataset['magrms'], mmagrms=mmagRms)) ax[0][0].legend(loc='upper right') mag = dataset['mag'].quantity ax[0][1].scatter(mag, mmagRms, s=10, color=color['all'], label='All') ax[0][1].scatter(mag[bright], mmagRmsHighSnr, s=10, color=color['bright'], label='{label} > {value:.0f}'.format( label=photomModel['brightSnr'].label, value=photomModel['brightSnr'].quantity.value)) ax[0][1].set_xlabel("{label} [{unit:latex}]".format(label=filterName, unit=mag.unit)) ax[0][1].set_ylabel("{label} [{unit:latex}]".format( label=dataset['magrms'].label, unit=mmagRmsHighSnr.unit)) ax[0][1].set_xlim([17, 24]) ax[0][1].set_ylim([0, 500]) ax[0][1].legend(loc='upper left') plotOutlinedAxline(ax[0][1].axhline, mmagrms_median.value, color=color['all']) plotOutlinedAxline(ax[0][1].axhline, bright_mmagrms_median.value, color=color['bright']) matchCountTemplate = '\n'.join( ['Matches:', '{nBright:d} high SNR,', '{nAll:d} total']) ax[0][1].text(0.1, 0.6, matchCountTemplate.format(nBright=len(bright), nAll=numMatched), transform=ax[0][1].transAxes, ha='left', va='top') ax[1][0].scatter(mmagRms, mmagErr, s=10, color=color['all'], label=None) ax[1][0].scatter(mmagRmsHighSnr, mmagErrHighSnr, s=10, color=color['bright'], label=None) ax[1][0].set_xscale('log') ax[1][0].set_yscale('log') ax[1][0].plot([0, 1000], [0, 1000], linestyle='--', color='black', linewidth=2) ax[1][0].set_xlabel("{label} [{unit:latex}]".format( label=dataset['magrms'].label, unit=mmagRms.unit)) ax[1][0].set_ylabel("Median Reported Magnitude Err [{unit:latex}]".format( unit=mmagErr.unit)) brightSnrMag = 2.5 * np.log10( 1 + (1 / photomModel['brightSnr'].quantity.value)) * u.mag label = r'$SNR > {snr:.0f} \equiv \sigma < {snrMag:0.1f}$'.format( snr=photomModel['brightSnr'].quantity.value, snrMag=brightSnrMag.to(u.mmag)) ax[1][0].axhline(brightSnrMag.to(u.mmag).value, color='red', linewidth=4, linestyle='dashed', label=label) ax[1][0].set_xlim([1, 500]) ax[1][0].set_ylim([1, 500]) ax[1][0].legend(loc='upper center') ax[1][1].scatter(mag, mmagErr, color=color['all'], label=None) ax[1][1].set_yscale('log') ax[1][1].scatter(np.asarray(mag)[bright], mmagErrHighSnr, s=10, color=color['bright'], label=None) ax[1][1].set_xlabel("{name} [{unit:latex}]".format(name=filterName, unit=mag.unit)) ax[1][1].set_ylabel("Median Reported Magnitude Err [{unit:latex}]".format( unit=mmagErr.unit)) ax[1][1].set_xlim([17, 24]) ax[1][1].set_ylim([1, 500]) ax[1][1].axhline(brightSnrMag.to(u.mmag).value, color='red', linewidth=4, linestyle='dashed', label=None) w, = np.where(mmagErr < 200. * u.mmag) plotPhotErrModelFit(mag[w].to(u.mag).value, magerr[w].to(u.mmag).value, photomModel, ax=ax[1][1]) ax[1][1].legend(loc='upper left') plt.suptitle("Photometry Check : %s" % outputPrefix, fontsize=30) ext = 'png' pathFormat = "{name}.{ext}" plotPath = makeFilename(outputPrefix, pathFormat, name="check_photometry", ext=ext) plt.savefig(plotPath, format=ext) plt.close(fig) print("Wrote plot:", plotPath)
def plotAMx(job, amx, afx, filterName, amxSpecName='design', outputPrefix=""): """Plot a histogram of the RMS in relative distance between pairs of stars. Creates a file containing the plot with a filename beginning with `outputPrefix`. Parameters ---------- job : `lsst.verify.Job` `~lsst.verify.Job` providing access to metrics, specs and measurements amx : `lsst.verify.Measurement` afx : `lsst.verify.Measurement` filterName : `str` amxSpecName : `str`, optional Name of the AMx specification to reference in the plot. Default: ``'design'``. outputPrefix : `str`, optional Prefix to use for filename of plot file. Will also be used in plot titles. E.g., ``outputPrefix='Cfht_output_r_'`` will result in a file named ``'Cfht_output_r_AM1_D_5_arcmin_17.0-21.5.png'`` for an ``AMx.name=='AM1'`` and ``AMx.magRange==[17, 21.5]``. """ if np.isnan(amx.quantity): print("Skipping %s -- no measurement" % str(amx.metric_name)) return fig = plt.figure(figsize=(10, 6)) ax1 = fig.add_subplot(1, 1, 1) histLabelTemplate = 'D: [{inner.value:.1f}{inner.unit:latex}-{outer.value:.1f}{outer.unit:latex}]\n'\ 'Mag: [{magBright:.1f}-{magFaint:.1f}]' annulus = amx.extras['annulus'].quantity magRange = amx.extras['magRange'].quantity ax1.hist(amx.extras['rmsDistMas'].quantity, bins=25, range=(0.0, 100.0), histtype='stepfilled', label=histLabelTemplate.format(inner=annulus[0], outer=annulus[1], magBright=magRange[0], magFaint=magRange[1])) metric_name = amx.metric_name amxSpec = job.specs[Name(package=metric_name.package, metric=metric_name.metric, spec=amxSpecName)] amxSpecLabelTemplate = '{amx.datum.label} {specname}: {amxSpec.threshold:.1f}' amxSpecLabel = amxSpecLabelTemplate.format(amx=amx, specname=amxSpecName, amxSpec=amxSpec) ax1.axvline(amxSpec.threshold.value, 0, 1, linewidth=2, color='red', label=amxSpecLabel) if amxSpec.check(amx.quantity): amxStatus = 'passed' else: amxStatus = 'failed' amxLabelTemplate = '{amx.datum.label} measured: {amx.quantity:.1f} ({status})' amxLabel = amxLabelTemplate.format(amx=amx, status=amxStatus) ax1.axvline(amxSpec.threshold.value, 0, 1, linewidth=2, color='black', label=amxLabel) afxSpec = job.specs[Name(package=afx.metric_name.package, metric=afx.metric_name.metric, spec='srd')] if afxSpec.check(afx.quantity): afxStatus = 'passed' else: afxStatus = 'failed' afxLabelTemplate = '{afx.datum.label} {afxSpec.name}: {afxSpec.threshold}%\n' + \ '{afx.datum.label} measured: {afx.quantity:.1f}% ({status})' afxLabel = afxLabelTemplate.format(afx=afx, afxSpec=afxSpec, status=afxStatus) ax1.axvline((amx.quantity + afx.extras['ADx'].quantity).value, 0, 1, linewidth=2, color='green', label=afxLabel) title = '{metric} Astrometric Repeatability over {D.value:.0f}{D.unit:latex}'.format( metric=amx.datum.label, D=amx.extras['D'].quantity) ax1.set_title(title) ax1.set_xlim(0.0, 100.0) ax1.set_xlabel('{rmsDistMas.label} ({unit})'.format( rmsDistMas=amx.extras['rmsDistMas'], unit=amx.extras['rmsDistMas'].quantity.unit._repr_latex_())) ax1.set_ylabel('# pairs / bin') ax1.legend(loc='upper right', fontsize=16) ext = 'png' pathFormat = '{metric}_D_{D:d}_{Dunits}_' + \ '{magBright.value}_{magFaint.value}_{magFaint.unit}.{ext}' plotPath = makeFilename(outputPrefix, pathFormat, metric=amx.datum.label, D=int(amx.extras['D'].quantity.value), Dunits=amx.extras['D'].quantity.unit, magBright=magRange[0], magFaint=magRange[1], ext=ext) plt.tight_layout() # fix padding plt.savefig(plotPath, dpi=300, format=ext) plt.close(fig) print("Wrote plot:", plotPath)