def test_rotated_labels_parameters(x_alignment, y_alignment, x_tick_label_width, y_tick_label_width, rotation): fig, _ = __plot() if x_alignment: plt.xticks(ha=x_alignment, rotation=rotation) if y_alignment: plt.yticks(ha=y_alignment, rotation=rotation) # convert to tikz file _, tmp_base = tempfile.mkstemp() tikz_file = tmp_base + '_tikz.tex' extra_dict = {} if x_tick_label_width: extra_dict['x tick label text width'] = x_tick_label_width if y_tick_label_width: extra_dict['y tick label text width'] = y_tick_label_width matplotlib2tikz.save( tikz_file, figurewidth='7.5cm', extra_axis_parameters=extra_dict ) # close figure plt.close(fig) # delete file os.unlink(tikz_file)
def test_rotated_labels_parameters_no_ticks(): fig, ax = __plot() ax.xaxis.set_ticks([]) plt.tick_params(axis='x', which='both', bottom='off', top='off') plt.tick_params(axis='y', which='both', left='off', right='off') # convert to tikz file _, tmp_base = tempfile.mkstemp() tikz_file = tmp_base + '_tikz.tex' matplotlib2tikz.save( tikz_file, figurewidth='7.5cm' ) # close figure plt.close(fig) # delete file os.unlink(tikz_file) return
def zero_padding(): samples = 77 rec_period = 4 sampling_rate = samples/rec_period time = np.linspace(0, rec_period, samples) sin = np.sin(time*3.75*np.pi) win = np.hanning(len(sin)) pad_count = 23 padded_sin = np.pad(sin, (0,pad_count), 'constant') fft = np.fft.rfft(sin) fft_padded = np.fft.rfft(padded_sin) bins = (np.fft.rfftfreq(len(sin))*sampling_rate)[1:] plt.subplot(321) plt.plot(time, sin) plt.subplot(322) plt.plot(bins, (np.abs(fft))[1:]*2/samples, "o") plt.subplot(323) plt.plot(np.linspace(0, (samples+pad_count)*rec_period/float(samples), samples+pad_count), padded_sin) plt.subplot(324) plt.plot((np.fft.rfftfreq(len(padded_sin))*sampling_rate)[1:], (np.abs(fft_padded))[1:]*2/samples, "o") plt.subplot(325) padded_sin_win = np.pad(sin*win, (0, pad_count), 'constant') plt.plot(np.linspace(0, (samples+pad_count)*rec_period/float(samples), samples+pad_count), padded_sin_win) plt.subplot(326) plt.plot((np.fft.rfftfreq(len(padded_sin_win))*sampling_rate)[1:], np.abs(np.fft.rfft(padded_sin_win))[1:]*2/samples, "o") matplotlib2tikz.save( 'myfile.tikz' ) plt.show()
def _main(): args = _parse_input_arguments() # read the file handle = open(args.filename) data = yaml.load(handle) handle.close() # Plot Newton residuals. # Mind that the last Newton datum only contains the final ||F||. num_newton_steps = len(data['Newton results']) x = range(num_newton_steps) # Extract Newton residuals y = np.empty(num_newton_steps) for k in xrange(num_newton_steps): y[k] = data['Newton results'][k]['Fx_norm'] # Plot it. pp.semilogy(x, y) pp.xlabel('Newton step') pp.ylabel('||F||') pp.title('Krylov: %s Prec: %r ix-defl: %r extra defl: %r ExpRes: %r Newton iters: %d' % (data['krylov'], data['preconditioner type'], data['ix deflation'], data['extra deflation'], data['explicit residual'], num_newton_steps )) # Write the info out to files. if args.imgfile: pp.savefig(args.imgfile) if args.tikzfile: matplotlib2tikz.save(args.tikzfile) return
def test_rotated_labels_parameters_different_values(x_tick_label_width, y_tick_label_width): fig, ax = __plot() plt.xticks(ha='left', rotation=90) plt.yticks(ha='left', rotation=90) ax.xaxis.get_majorticklabels()[0].set_rotation(20) ax.yaxis.get_majorticklabels()[0].set_horizontalalignment('right') # convert to tikz file _, tmp_base = tempfile.mkstemp() tikz_file = tmp_base + '_tikz.tex' extra_dict = {} if x_tick_label_width: extra_dict['x tick label text width'] = x_tick_label_width if y_tick_label_width: extra_dict['y tick label text width'] = y_tick_label_width matplotlib2tikz.save( tikz_file, figurewidth='7.5cm', extra_axis_parameters=extra_dict ) # close figure plt.close(fig) # delete file os.unlink(tikz_file)
def _main(): args = _parse_input_arguments() # read the file handle = open(args.filename) data = yaml.load(handle) handle.close() # Plot relresvecsi. #pp.subplot(121) # Mind that the last Newton datum only contains the final ||F||. num_newton_steps = len(data['Newton results']) - 1 for k in xrange(num_newton_steps): pp.semilogy( data['Newton results'][k]['relresvec'], color=str(1.0 - float(k+1)/num_newton_steps) ) pp.xlabel('Krylov step') pp.ylabel('||r||/||b||') pp.title('Krylov: %s Prec: %r ix-defl: %r extra defl: %r ExpRes: %r Newton iters: %d' % (data['krylov'], data['preconditioner type'], data['ix deflation'], data['extra deflation'], data['explicit residual'], num_newton_steps) ) if args.xmax: pp.xlim([0, args.xmax]) pp.ylim([1e-10, 10]) # Write the info out to files. if args.imgfile: pp.savefig(args.imgfile) if args.tikzfile: matplotlib2tikz.save(args.tikzfile) return
def savefig(fig, name, extn="pdf", tight=True, ax=None, **kwargs): _kwargs = Tools.kwarger() _kwargs.update(kwargs) if tight: fig.tight_layout(pad=0.1) if ax is not None: if isinstance(ax, list): map(format_axes, ax) else: format_axes(ax) fig.savefig("{0}.{1}".format(name, extn), **_kwargs) try: mpl2tkz.save("{0}.tex".format(name), fig, show_info=False) except: warnings.warn("Couldn't tkzify {0}, skipping".format(name)) plt.close(fig)
def test_rotated_labels_parameters_no_ticks(): fig, ax = __plot() ax.xaxis.set_ticks([]) plt.tick_params(axis="x", which="both", bottom="off", top="off") plt.tick_params(axis="y", which="both", left="off", right="off") # convert to tikz file _, tmp_base = tempfile.mkstemp() tikz_file = tmp_base + "_tikz.tex" matplotlib2tikz.save(tikz_file, figurewidth="7.5cm") # close figure plt.close(fig) # delete file os.unlink(tikz_file) return
def face_validation(time, u, y_model, y_exp, title): yaw_rate = y_model[0] yaw_rate_experiment = y_exp[0] plt.figure() ax1 = plt.subplot(211) lines = plt.plot(time[::10], u[::10], "k") plt.setp(lines, linewidth=2.5) plt.ylabel("steer angle in rad") plt.subplot(212, sharex=ax1) lines = plt.plot(time[::10], yaw_rate[::10], "r", time[::10], yaw_rate_experiment[::10], ":k") plt.setp(lines, linewidth=2.5) plt.legend(["single track model", "CarMaker"], loc=3) plt.xlabel("time in seconds") plt.ylabel("yaw rate in rad per seconds") matplotlib2tikz.save(title + ".tex")
def test_rotated_labels_parameters_no_ticks(): fig, ax = __plot() ax.xaxis.set_ticks([]) plt.tick_params(axis='x', which='both', bottom='off', top='off') plt.tick_params(axis='y', which='both', left='off', right='off') # convert to tikz file _, tmp_base = tempfile.mkstemp() tikz_file = tmp_base + '_tikz.tex' matplotlib2tikz.save(tikz_file, figurewidth='7.5cm') # close figure plt.close(fig) # delete file os.unlink(tikz_file) return
def plot_ctrl_perf(data, dir_name=None): for env, res in data.items(): algos = res.keys() stats_cost = res.values() means = np.asarray([stat[0][0] for stat in stats_cost]) stds = np.asarray([stat[0][1] for stat in stats_cost]) final_cost = np.asarray([stat[1][-1] for stat in stats_cost]) # means = [m / fc for m,fc in zip(means, final_cost)] # stds = [100 * s / fc for s,fc in zip(stds, final_cost)] for i, algo in enumerate(algos): print("{} {} {} {} +- {}".format(env, algo, final_cost[i], means[i], stds[i])) means = [m / fc for m, fc in zip(means, final_cost)] stds = [100 * s / fc for s, fc in zip(stds, final_cost)] for i, algo in enumerate(algos): print("{} {} {} +- {}".format(env, algo, means[i], stds[i])) fig, ax = plt.subplots() x_pos = np.arange(len(algos)) ax.bar(x_pos, means, yerr=stds, align='center', color="white", edgecolor='k', linewidth=1) ax.set_ylabel('Controller Cost / Predicted Cost') ax.set_xticks(x_pos) ax.set_xticklabels(algos) ax.set_title(env) ax.yaxis.grid(True) # Save the figure and show plt.tight_layout() if dir_name is not None: plt.savefig(os.path.join(dir_name, "{}_ctrl_perf.png".format(env)), bbox_inches='tight', format='png') matplotlib2tikz.save( os.path.join(dir_name, "{}_ctrl_perf.tex".format(env)))
def plot_throughput_pdf(T): fig = plt.figure(figsize=(10.24, 7.68)) plt.rc('text', usetex=True) plt.rc('font', family='serif') matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.size'] = 40 matplotlib.rcParams['xtick.labelsize'] = 'small' matplotlib.rcParams['ytick.labelsize'] = 'small' matplotlib.rcParams['legend.fontsize'] = 'small' matplotlib.rcParams['text.latex.preamble'] = [ r'\usepackage{amsmath}', r'\usepackage{amssymb}' ] labels = [] num_bins = 40 for data in T: data_ = T[data] counts, bin_edges = np.histogram(data_, bins=num_bins, density=True) pdf = counts #np.cumsum(counts) / counts.sum() ax = fig.gca() if data == 'mmWave only': style = 'r-' labels.append(data) elif data == 'Sub-6 only': style = 'b-' labels.append(data) else: continue ax.plot(bin_edges[1:], pdf, style, linewidth=2) plt.legend(labels, loc="best") plt.grid() plt.xlabel('Throughput [Mbps]') plt.ylabel('Throughput pdf') plt.tight_layout() plt.savefig('figures/throughputs_pdf_{}.pdf'.format(p_randomness), format='pdf') matplotlib2tikz.save( 'figures/throughputs_pdf_{}.tikz'.format(p_randomness))
def plot_coefficient_dnn_vs_gan_error_over_training(single_log_directory): """Plots error over training comparing DNN to GAN.""" logs = Log.create_all_in_directory(single_log_directory) if re.search(r'/GAN/', logs[0].event_file_name): gan_log, dnn_log = logs[0], logs[1] else: dnn_log, gan_log = logs[0], logs[1] figure, axes = plt.subplots() axes.set_xlabel('Training Step') axes.set_ylabel('MAE') dnn_log.scalars_data_frame.plot(y='1_Validation_Error/MAE', ax=axes, label='DNN', color=dnn_color) gan_log.scalars_data_frame.plot(y='1_Validation_Error/MAE', ax=axes, label='GAN', color=gan_color) matplotlib2tikz.save(os.path.join('latex', 'error-over-training.tex')) plt.show() plt.close(figure)
def plot_training(energies, parameters, symmetries): _, (eax, pax) = plt.subplots(ncols=2) eax.semilogy(np.abs(3 - np.asarray(energies[2])), label=r"$\psi_{PJ}$") eax.semilogy(np.abs(3 - np.asarray(energies[0])), label=r"$\psi_{DNN}$") eax.semilogy(np.abs(3 - np.asarray(energies[1])), label=r"$\psi_{SDNN}$") eax.set_xlabel(r"% of training") eax.set_ylabel(r"Absolute error in $\langle E_L\rangle$ [a.u]") eax.legend() pax.plot(np.asarray(parameters[0])[:, 4:50]) pax.set_xlabel(r"% of training") matplotlib2tikz.save(__file__ + ".tex") _, sax = plt.subplots() sax.semilogx(symmetries, label=r"$S(\psi_{DNN})$") sax.set_ylabel("Symmetry") sax.set_xlabel(r"% of training") sax.legend(loc="lower right") matplotlib2tikz.save(__file__ + ".symmetry.tex")
def savePlot(name): name = showAndSave.prefix + name name = ''.join(ch for ch in name if ch.isalnum() or ch == '_') fig = plt.gcf() ax = plt.gca() gitMetadata = get_git_metadata() informationText = 'By Kjetil Lye@ETHZ <*****@*****.**>\nCommit: %s\nRepo: %s\nHostname: %s' % ( gitMetadata['git_commit'], gitMetadata['git_remote_url'], socket.gethostname()) ax.text(0.95, 0.01, informationText, fontsize=3, color='gray', ha='right', va='bottom', alpha=0.5, transform=ax.transAxes) matplotlib2tikz.save(name + '.tikz', figureheight='\\figureheight', figurewidth='\\figurewidth', show_info=False) savenamepng = name + '.png' plt.savefig(savenamepng, bbox_inches='tight') writeMetadata( savenamepng, {'Copyright': 'Copyright Kjetil Lye@ETHZ <*****@*****.**>'}) add_git_information(savenamepng) writeMetadata( savenamepng, { 'working_directory': os.getcwd(), 'hostname': socket.gethostname(), 'generated_on_date': str(datetime.datetime.now()) })
def _main(): args = _parse_input_arguments() # read the file handle = open(args.filename) data = yaml.load(handle) handle.close() # Plot relresvecsi. # pp.subplot(121) # Mind that the last Newton datum only contains the final ||F||. num_newton_steps = len(data["Newton results"]) - 1 for k in range(num_newton_steps): pp.semilogy( data["Newton results"][k]["relresvec"], color=str(1.0 - float(k + 1) / num_newton_steps), ) pp.xlabel("Krylov step") pp.ylabel("||r||/||b||") pp.title( "Krylov: %s Prec: %r ix-defl: %r extra defl: %r ExpRes: %r Newton iters: %d" % ( data["krylov"], data["preconditioner type"], data["ix deflation"], data["extra deflation"], data["explicit residual"], num_newton_steps, )) if args.xmax: pp.xlim([0, args.xmax]) pp.ylim([1e-10, 10]) # Write the info out to files. if args.imgfile: pp.savefig(args.imgfile) if args.tikzfile: matplotlib2tikz.save(args.tikzfile) return
def plot_primary(X, Y, title, xlabel, ylabel, ymin=0, ymax=MAX_EPISODES_DEEP, filename='plot.pdf'): fig = plt.figure(figsize=(10.24, 7.68)) plt.rc('text', usetex=True) plt.rc('font', family='serif') matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.size'] = 20 matplotlib.rcParams['text.latex.preamble'] = [ r'\usepackage{amsmath}', r'\usepackage{amssymb}' ] #plt.title(title) plt.xlabel(xlabel) ax = fig.gca() ax.xaxis.set_major_locator(MultipleLocator(1)) # Format the ticklabel to be 2 raised to the power of `x` ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**x))) ax.set_autoscaley_on(False) plot_, = ax.plot(X, Y, 'k^-') #, label='ROC') # ax.set_xlim(xmin=0.15, xmax=0.55) ax.set_ylabel(ylabel) ax.set_ylim(ymin, ymax) plt.grid(True) # plt.legend([plot_], ['ROC'], loc='upper right') fig.tight_layout() plt.savefig('{}'.format(filename), format='pdf') matplotlib2tikz.save('figures/{}.tikz'.format(filename))
def convert_plot_to_tikz(tikz_file,tikz_dir,show=False,remove_all_files=True): import sys import matplotlib2tikz as pylab2tikz pylab2tikz.save(tikz_file,show_info=False) with open(tikz_file,'r') as f: tikz_string = f.read() # create a latex wrapper for the tikz wrapper = r'''\documentclass{standalone} \usepackage[utf8]{inputenc} \usepackage{pgfplots} \usepackage{amsmath,amssymb,amsfonts} \usepgfplotslibrary{groupplots} \pgfplotsset{compat=newest} \begin{document} %s \end{document}''' % (tikz_string) os.remove(tikz_file) compile_tikz_figure(wrapper,tikz_file,tikz_dir,show,remove_all_files)
def VisualizeFusedTSSAndAnnotations(annotations_folder, fused_tss_file_path, tikz_file_out_path=None): # Load the annotations annotations = {} anno_files = glob.glob(os.path.join(annotations_folder, '*.csv')) for anno_file in anno_files: df = pd.read_csv(anno_file) annotations[os.path.basename(anno_file)] = df # Load the fused TSS fused_tss = pd.read_csv(fused_tss_file_path) const_type_mask = fused_tss.iloc[:, 1] == CONST_VAL const_times = fused_tss.index[const_type_mask] change_times = fused_tss.index[~const_type_mask] # Plot fig, ax = plt.subplots() for anno_key in annotations.keys(): df = annotations[anno_key] ax.plot(df.iloc[:, 0], df.iloc[:, 1]) half_sample_interval = 0.5 * (fused_tss.iloc[1, 0] - fused_tss.iloc[0, 0]) for const_time in const_times: ax.axvspan(const_time - half_sample_interval, const_time + half_sample_interval, alpha=0.2, color='red') for change_time in change_times: ax.axvspan(change_time - half_sample_interval, change_time + half_sample_interval, alpha=0.2, color='green') plt.xlim(135, 167) if tikz_file_out_path is not None: matplotlib2tikz.save(tikz_file_out_path) plt.show()
def summarize_dc_data(self): self.logger.info("Running custom summary function") df = self.dataframe.drop(columns=["school_ncesid"]) summary = df.describe(percentiles=[]).drop("count").T with (self.output_dir / "summary.tex").open('w') as fer: summary.to_latex(buf=fer, float_format="%.2f") with (self.output_dir / "missing.tex").open('w') as fer: pd.DataFrame([df.isnull().sum()]).T.to_latex(buf=fer, float_format="%.2f") for (column, color) in zip(df.columns, colors): try: df[[column]].plot.hist(facecolor=color, legend=None, bins=20) plt.gca().legend().remove() matplotlib2tikz.save(self.output_dir / (column + ".tex"), figureheight="3in", figurewidth="3in") except TypeError: pass return self
def plot_secondary(X,Y1, Y2, Y3, Y4, xlabel, y1label, y2label, y1max, y2max, filename='plot.pdf'): fig = plt.figure(figsize=(10.24,7.68)) plt.rc('text', usetex=True) plt.rc('font', family='serif') matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.size'] = 20 matplotlib.rcParams['text.latex.preamble'] = [ r'\usepackage{amsmath}', r'\usepackage{amssymb}'] #plt.title(title) plt.xlabel(xlabel) plt.grid(True, axis='both', which='both') ax = fig.gca() # ax.xaxis.set_major_locator(MultipleLocator(1)) # Format the ticklabel to be 2 raised to the power of `x` # ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**x))) ax.set_autoscaley_on(False) ax_sec = ax.twinx() plot1_, = ax.plot(X, Y1, 'k^-') plot2_, = ax.plot(X, Y2, 'bo--') plot3_, = ax_sec.plot(X, Y3, 'r^-') plot4_, = ax_sec.plot(X, Y4, 'go--') ax.set_ylabel(y1label) ax_sec.set_ylabel(y2label) ax.set_ylim(0, y1max) ax_sec.set_ylim(0, y2max) plt.grid(True) plt.legend([plot1_, plot2_, plot3_, plot4_], ['TX Power JB-PCIC', 'TX Power Optimal', 'SINR JB-PCIC', 'SINR Optimal'], loc='lower right') fig.tight_layout() plt.savefig('figures/{}'.format(filename), format='pdf') matplotlib2tikz.save('figures/{}.tikz'.format(filename))
def plot_primary_two(X, Y1, Y2, xlabel, ylabel, filename='plot.pdf'): fig = plt.figure(figsize=(10.24,7.68)) plt.xlabel(xlabel) ax = fig.gca() # ax.xaxis.set_major_locator(MultipleLocator(1)) # Format the ticklabel to be 2 raised to the power of `x` # ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**x))) ax.set_autoscaley_on(False) plot1_, = ax.plot(X, Y1, 'k^-') plot2_, = ax.plot(X, Y2, 'ro--') ax.set_ylabel(ylabel) ax.set_ylim(min(min(Y1), min(Y2))*0.98, max(max(Y1), max(Y2))*1.02) plt.grid(True) plt.legend([plot1_, plot2_], ['JB-PCIC', 'Optimal'], loc='best') fig.tight_layout() plt.savefig('{}'.format(filename), format='pdf') matplotlib2tikz.save('{}.tikz'.format(filename))
def _main(): args = _parse_input_arguments() # read the file handle = open(args.filename) data = yaml.load(handle) handle.close() # Plot Newton residuals. # Mind that the last Newton datum only contains the final ||F||. num_newton_steps = len(data["Newton results"]) x = list(range(num_newton_steps)) # Extract Newton residuals y = np.empty(num_newton_steps) for k in range(num_newton_steps): y[k] = data["Newton results"][k]["Fx_norm"] # Plot it. pp.semilogy(x, y) pp.xlabel("Newton step") pp.ylabel("||F||") pp.title( "Krylov: %s Prec: %r ix-defl: %r extra defl: %r ExpRes: %r Newton iters: %d" % ( data["krylov"], data["preconditioner type"], data["ix deflation"], data["extra deflation"], data["explicit residual"], num_newton_steps, )) # Write the info out to files. if args.imgfile: pp.savefig(args.imgfile) if args.tikzfile: matplotlib2tikz.save(args.tikzfile) return
def plot_ccdf(T): fig = plt.figure(figsize=(10.24, 7.68)) plt.rc('text', usetex=True) plt.rc('font', family='serif') matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.size'] = 20 matplotlib.rcParams['text.latex.preamble'] = [ r'\usepackage{amsmath}', r'\usepackage{amssymb}' ] labels = T.columns num_bins = 50 i = 0 for data in T: data_ = T[data].dropna() counts, bin_edges = np.histogram(data_, bins=num_bins, density=True) ccdf = 1 - np.cumsum(counts) / counts.sum() lw = 1 + i ax = fig.gca() style = '-' ax.plot(bin_edges[1:], ccdf, style, linewidth=lw) labels = [ r'$M_\text{ULA} = 4$', r'$M_\text{ULA} = 8$', r'$M_\text{ULA} = 16$', r'$M_\text{ULA} = 32$', r'$M_\text{ULA} = 64$' ] plt.grid(True) plt.tight_layout() ax.set_xlabel('$\gamma$') ax.set_ylabel('$1 - F_\Gamma(\gamma)$') ax.legend(labels, loc="lower left") plt.savefig('ccdf.pdf', format='pdf') matplotlib2tikz.save('figures/ccdf.tikz') plt.close(fig)
def plot_pdf(data1, label1, data2, label2): fig = plt.figure(figsize=(10.24, 7.68)) plt.rc('text', usetex=True) plt.rc('font', family='serif') matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.size'] = 40 matplotlib.rcParams['xtick.labelsize'] = 'small' matplotlib.rcParams['ytick.labelsize'] = 'small' matplotlib.rcParams['legend.fontsize'] = 'small' matplotlib.rcParams['text.latex.preamble'] = [ r'\usepackage{amsmath}', r'\usepackage{amssymb}' ] labels = [label1, label2] num_bins = 50 counts, bin_edges = np.histogram(data1, bins=num_bins, density=True) pdf = counts #np.cumsum(counts) / counts.sum() lw = 2 plt.xlabel('Coherence time (ms)') plt.grid(True, axis='both', which='both') ax = fig.gca() plot1, = ax.plot(bin_edges[1:], pdf, linewidth=lw) ax.set_ylabel('sub-6 Coherence time pdf') counts, bin_edges = np.histogram(data2, bins=num_bins, density=True) pdf = counts #np.cumsum(counts) / counts.sum() ax_sec = ax.twinx() plot2, = ax_sec.plot(bin_edges[1:], pdf, color='red', linewidth=lw) plt.legend([plot1, plot2], labels, loc="best") ax_sec.set_ylabel('mmWave Coherence time pdf') plt.tight_layout() plt.savefig('figures/coherence_time_{}.pdf'.format(p_randomness), format='pdf') matplotlib2tikz.save('figures/coherence_time_{}.tikz'.format(p_randomness))
def generate_ccdf(data1, data2, data3): fig = plt.figure(figsize=(10.24, 7.68)) plt.rc('text', usetex=True) plt.rc('font', family='serif') matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['font.size'] = 20 matplotlib.rcParams['text.latex.preamble'] = [ r'\usepackage{amsmath}', r'\usepackage{amssymb}' ] num_bins = 50 for data in [data1, data2, data3]: data_ = data counts, bin_edges = np.histogram(data_, bins=num_bins, density=True) ccdf = 1 - np.cumsum(counts) / counts.sum() ccdf = np.insert(ccdf, 0, 1) bin_edges = np.insert(bin_edges[1:], 0, bin_edges[0] - (bin_edges[2] - bin_edges[1])) ax = fig.gca() ax.plot(bin_edges, ccdf) labels = [ 'Tabular $Q$-learning', 'Deep $Q$-learning (proposed)', 'Fixed Power Allocation (FPA)' ] ax.set_xlabel('$\gamma$') ax.set_ylabel('$1 - F_\Gamma(\gamma)$') ax.set_ylim([0, 1]) ax.legend(labels, loc="best") plt.grid(True) plt.tight_layout() plt.savefig('figures/voice_ccdf.pdf', format="pdf") matplotlib2tikz.save('figures/voice_ccdf.tikz') plt.close(fig)
def plot_training(energies, symmetries, parameters): _, (eax, pax) = plt.subplots(ncols=2) eax.plot(energies, label=r"$\langle E_L\rangle$") eax.set_ylabel(r"Ground state energy (a.u.)") eax.set_xlabel(r"% of training") eax.axhline(y=3, label="Exact", linestyle="--", color="k", alpha=0.5) eax.legend() pax.plot(np.asarray(parameters)) pax.set_xlabel(r"% of training") matplotlib2tikz.save(__file__ + ".tex") _, (sax, wax) = plt.subplots(ncols=2) sax.semilogx(symmetries, label=r"$S(\psi_{RBM})$") sax.set_ylabel("Symmetry") sax.set_xlabel(r"% of training") sax.legend(loc="lower right") w = np.asarray(parameters[-1])[P * D + N:].reshape(P * D, N) wax.matshow(w) wax.set_xlabel(r"$\mathbf{W}$") matplotlib2tikz.save(__file__ + ".symmetry.tex")
def plot_primary(X,Y, xlabel, ylabel, ymin=0, ymax=MAX_EPISODES_DEEP, filename='plot.pdf'): fig = plt.figure(figsize=(10.24,7.68)) plt.xlabel(xlabel) #X = np.log2(np.array(X)) ax = fig.gca() #ax.xaxis.set_major_locator(MultipleLocator(1)) # Format the ticklabel to be 2 raised to the power of `x` #ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**x))) ax.set_autoscaley_on(False) plot_, = ax.plot(X, Y, 'k^-') ax.set_ylabel(ylabel) ax.set_ylim(ymin, ymax) plt.grid(True) fig.tight_layout() plt.savefig('{}'.format(filename), format='pdf') matplotlib2tikz.save('{}.tikz'.format(filename))
# 2: Voting(estimators[2][0:8]+estimators[2][21:22], voting=type), # 3: Voting(estimators[3][0:8]+estimators[3][23:24], voting=type) # } print "Models have been read in!" for target in [1, 2, 3]: decision = voters[target].transform(data_matrix[target-1]) if type == "soft": decision = sum(decision).transpose()[0] elif type == "hard": decision = sum(decision.transpose()) fpr, tpr, threshold = metrics.roc_curve(binarised_labels[target-1], decision, pos_label=True) # printMetrics(fpr, tpr, threshold, 0.99, decision[0], binarised_labels[target-1]) # printMetrics(fpr, tpr, threshold, 1, decision[0], binarised_labels[target-1]) prediction = printMetrics(fpr, tpr, threshold, 0.01, decision, binarised_labels[target-1]) printMetrics(fpr, tpr, threshold, 0, decision, binarised_labels[target-1]) plt.subplot(2, 2, 1) plt.plot(fpr, tpr) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.plot((0, 1), (0, 1)) plt.subplot(2, 2, target+1) axes = plt.gca() axes.set_ylim([-0.1, 1.1]) plt.plot(map(lambda x: x, prediction)) plt.plot(binarised_labels[target-1], "--") matplotlib2tikz.save("roc.tex") plt.show()
) plt.annotate( r"$\theta$", xy=fourier_coord_center + np.array([1., 0.15]) ) plt.axis('off') ax.set_xlim(0, 20) ax.set_ylim(0, 10) #plt.grid(False) plt.tight_layout() fig_w_inch, fig_h_inch = fig.get_size_inches() fig_physical_width = 12.0 #cm fig_physical_heigth = fig_physical_width * fig_h_inch / fig_w_inch #plt.show(); import sys; sys.exit() plt.savefig( os.path.join("..", "figures", "tikz", __file__.replace("py", "pdf")), bbox_inches='tight', pad_inches=0 ); import sys; sys.exit() import matplotlib2tikz as mpl2tikz mpl2tikz.save( os.path.join("..", "figures", "tikz", __file__.replace("py", "tex")), figureheight='{}cm'.format(fig_physical_heigth), figurewidth='{}cm'.format(fig_physical_width) )
'R-squared={:.3f}\n' 'pvalue={:.4f}').format(slope, intercept, rvalue, rvalue**2, pvalue), horizontalalignment='right', verticalalignment='top', Transform=ax.transAxes) for i in df.index: # subject numbers ax.text(df[x][i], df[y][i], df['subject'][i], color='white', horizontalalignment='center', verticalalignment='center') return rvalue, pvalue # plt.close('all') # regplot(x='mean_ct', y='mean_chi', data=df) # regplot(x='mean_ct', y='mean_r2s', data=df) # regplot(x='mean_r2s', y='mean_chi', data=df) plt.close('all') fig, axs = plt.subplots(2, 2, figsize=(12, 12)) axs[0][1].axis('off') regplot(x='mean_ct', y='mean_r2s', data=df, ax=axs[0][0], txtloc=(1, 0.3)) regplot(x='mean_ct', y='mean_chi', data=df, ax=axs[1][0]) regplot(x='mean_r2s', y='mean_chi', data=df, ax=axs[1][1]) axs[1][1].set_xlim([45, 230]) axs[1][1].set_ylim([-1.25, 0.39]) import matplotlib2tikz matplotlib2tikz.save('draft_regression_unwrap.tex')
def savePlot(name): original_name = copy.deepcopy(name) name = showAndSave.prefix + name name = ''.join(ch for ch in name if ch.isalnum() or ch == '_') name = name.lower() if not name.endswith("_notitle"): old_title = get_current_title() title = old_title else: title = "None" fig = plt.gcf() ax = plt.gca() gitMetadata = get_git_metadata() informationText = 'By Kjetil Lye@ETHZ <*****@*****.**>\nCommit: %s\nRepo: %s\nHostname: %s' % ( gitMetadata['git_commit'], gitMetadata['git_remote_url'], socket.gethostname()) ax.text(0.95, 0.01, informationText, fontsize=3, color='gray', ha='right', va='bottom', alpha=0.5, transform=ax.transAxes) if gitMetadata['git_short_commit'] != "unkown": if not name.endswith("_notitle"): ax.text(0.2, 0.93, "@" + gitMetadata['git_short_commit'], fontsize=10, ha='right', va='bottom', alpha=0.5, transform=ax.transAxes) # We don't want all the output from matplotlib2tikz with RedirectStdStreamsToNull(): if savePlot.saveTikz: matplotlib2tikz.save('img_tikz/' + name + '.xyz', figureheight='\\figureheight', figurewidth='\\figurewidth', show_info=False) with open('img_tikz/' + name + '.xyz', 'a') as f: f.write("\n\n") f.write("%% INCLUDE THE COMMENTS AT THE END WHEN COPYING\n") f.write("%%%%%%%%%%%%%TITLE%%%%%%%%%%%%%%%%%\n") for line in title.splitlines(): f.write("%% {}\n".format(line)) f.write("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n") f.write("\n") f.write( "%% ALWAYS INCLUDE THE COMMENTS WHEN COPYING THIS PLOT\n") f.write("%% DO NOT REMOVE THE COMMENTS BELOW!\n") for k in gitMetadata.keys(): f.write("%% GIT {} : {}\n".format(k, gitMetadata[k])) f.write("%% working_directory : {}\n".format(os.getcwd())) f.write("%% hostname : {}\n".format(socket.gethostname())) f.write("%% generated_on_date : {}\n".format( str(datetime.datetime.now()))) f.write("%% python_version: {}\n".format( get_python_description())) f.write("%% python modules:\n") for module in get_loaded_python_modules(): f.write( "%% {name}: {version} ({file})\n".format(**module)) f.write("%% stacktrace:\n") for line in get_stacktrace_str().splitlines(): f.write("%% {}\n".format(line)) savenamepng = 'img/' + name + '.png' plt.savefig(savenamepng, bbox_inches='tight') writeMetadata( savenamepng, { 'Copyright': 'Copyright, Kjetil Lye@ETHZ <*****@*****.**>', 'working_directory': os.getcwd(), 'hostname': socket.gethostname(), 'generated_on_date': str(datetime.datetime.now()), **gitMetadata, "modules_loaded": get_loaded_python_modules_formatted(), "python_version": get_python_description(), 'stacktrace': get_stacktrace_str() }) if savePlot.callback is not None: title = 'Unknown title' try: title = plt.gcf()._suptitle.get_text() except: pass savePlot.callback(savenamepng, name, title) if not name.endswith("_notitle"): old_title = get_current_title() plt.title("") savePlot(original_name + "_notitle") plt.title(old_title) title = old_title else: title = "None"
def acidtest(): tex_file_path = "./tex/acid.tex" # directory where all the generated files will end up data_dir = "./data" # how to get from the LaTeX file to the data tex_relative_path_to_data = "../data" figure_width = "7.5cm" # open file for writing file_handle = open( tex_file_path, "w" ) write_document_header( file_handle, figure_width ) test_functions = [ tf.basic_sin, tf.subplots, tf.image_plot, tf.noise, tf.patches, tf.legends, tf.legends2, tf.logplot, tf.loglogplot, tf.subplot4x4, tf.text_overlay, tf.annotate ] # see if the command line options tell which subset of the # tests are to be run test_list = [] for arg in sys.argv: try: test_list.append( int(arg) ) except ValueError: pass if len(test_list)!=0: # actually treat a sublist of test_functions # remove duplicates: test_list = list(set(test_list)) # create the sublist tmp = test_functions test_functions = [] for i in test_list: test_functions.append( tmp[i] ) k = 0 for fun in test_functions: mpl.pyplot.cla() mpl.pyplot.clf() # plot the test example comment = fun() # convert to TikZ tikz_path = data_dir + "/test" + repr(k) + ".tikz" matplotlib2tikz.save( tikz_path, figurewidth=figure_width, tex_relative_path_to_data = \ tex_relative_path_to_data ) # plot reference figure pdf_path = data_dir + "/test" + repr(k) + ".pdf" mpl.pyplot.savefig(pdf_path) # update the LaTeX file write_file_comparison_entry( file_handle, path.join( tex_relative_path_to_data, path.basename(pdf_path) ), path.join( tex_relative_path_to_data, path.basename(tikz_path) ), k, comment ) k = k+1 write_document_closure( file_handle ) file_handle.close() return
def print_or_show(fig, show, outfile, in_plotdir=True, tikz=None, data=None, store_meta=None): """Either print or save figure, or both, depending on arguments. Taking a figure, show and/or save figure in the default directory, obtained with :func:plotdir. Creates plot directory if needed. :param fig: Figure to store. :type fig: matplotlib.Figure object :param show: Show figure or not :type show: boolean :param outfile: File to write figure to, or list of files. If the string ends in a '.', write to x.png and x.pdf. :type outfile: string or list of strings :param in_plotdir: If true, write to default plot directory. If false, write to currect directory or use absolute path. :type in_plotdir: boolean :param tikz: Try to write tikz code with matplotlib2tikz. Requires that the latter is installed. :type tikz: boolean :param data: Store associated data in .dat file (useful for pgfplots). May be a list of ndarrays, which results in multiple numbered datafiles. :type data: ndarray or list thereof :param store_meta: Also store other info. This is a string that will be written to a file. If not set or set to None, it will just write the pyatmlab version. The file will use the same basename as the outfile, but replacing the extention by "info". However, this only works if outfile is a string and not a list thereof. To write nothing, pass an empty string. :type store_meta: str. """ if outfile is not None: outfiles = [outfile] if isinstance(outfile, str) else outfile if isinstance(outfile, str): if outfile.endswith("."): outfiles = [outfile+ext for ext in ("png", "pdf")] infofile = outfile + "info" else: outfiles = [outfile] infofile = None # interpret as sequence for outf in outfiles: if in_plotdir and not '/' in outf: outf = os.path.join(plotdir(), outf) logging.info("Writing to file: {}".format(outf)) if not os.path.exists(os.path.dirname(outf)): os.makedirs(os.path.dirname(outf)) fig.canvas.print_figure(outf) if store_meta is None: pr = subprocess.run(["pip", "freeze"], stdout=subprocess.PIPE) info = pr.stdout else: info = store_meta if infofile is not None and info: if in_plotdir and not "/" in infofile: infofile = os.path.join(plotdir(), infofile) with open(infofile, "w", encoding="utf-8") as fp: fp.write(info) if show: matplotlib.pyplot.show() if tikz is not None: import matplotlib2tikz print(now(), "Writing also to:", os.path.join(plotdir(), tikz)) matplotlib2tikz.save(os.path.join(plotdir(), tikz)) if data is not None: if not os.path.exists(io.plotdatadir()): os.makedirs(io.plotdatadir()) if isinstance(data, numpy.ndarray): data = (data,) # now take it as a loop for (i, dat) in enumerate(data): outf = os.path.join(io.plotdatadir(), "{:s}{:d}.dat".format( os.path.splitext(outfiles[0])[0], i)) fmt = ("%d" if issubclass(dat.dtype.type, numpy.integer) else '%.18e') if len(dat.shape) < 3: numpy.savetxt(outf, dat, fmt=fmt) elif len(dat.shape) == 3: io.savetxt_3d(outf, dat, fmt=fmt) else: raise ValueError("Cannot write {:d}-dim ndarray to textfile".format( len(dat.shape)))
X,Y = np.meshgrid(x,y) Xtest = np.c_[X.ravel(),Y.ravel()] ftest = np.dot(poly_kernel(Xtest,svs),aly)-b ftest.shape = 128,128 ftest_scale = ftest.copy() ftest_scale[ftest>0] /= 2.0*ftest.max() ftest_scale[ftest<0] /= 2.0*abs(ftest.min()) ftest_scale += .5 plt.pcolormesh(X,Y,ftest_scale,shading='gouraud',cmap=custom_colormap()) plt.contour(X,Y,ftest,[0.0],colors='k',linewidths=4) plt.contour(X,Y,ftest,[-1.0,1.0],colors='k',linewidths=2,linestyles='solid') plt.plot(Xdata[ydata<0,0],Xdata[ydata<0,1],'ok',markerfacecolor='r') plt.plot(Xdata[ydata>0,0],Xdata[ydata>0,1],'dk',markerfacecolor='b') # data data = '\x80\x02cnumpy.core.multiarray\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\xc8K\x02\x86cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x03U\x01<NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00tb\x89T@\x06\x00\x00\x92\xbc\x0f\xbf\x8f_\xc4>\xc7\xb73\xbf\x10#\x0f?\x95\xd1\xc2\xbd\xc6\x9cr>\xc2N.?O\x1a*?\x0e\x8d@\xbf\xd5\xe27?qkr?-17?\x8fU\n\xbf6Xa\xbf\xa0\xb5\xe5=\r\xe9\xfb\xbe>\xe5]\xbe[\x8a\xf3>Xl\xd3\xbe\xca\x1b\xbd\xbe\xe9\xa3\xe4\xbe\xb1\xed\xec>\x1a\xd7\x8f\xbd\x06i\x1a\xbf\xc4\xea\xeb\xbd\xe8y\x0e\xbf@\x94\x99>\x063\x00\xbf\xb2X\xe5\xbe.\xaa\xb6=\xe6jU\xbee\x99\xc3>&?8\xbfk\x87r>U\xd5y>\x19\x01G\xbfk\xf6\xb4\xbe\xe7\xb9\xdc=\xc4\xe4\xc2\xbelSF\xbf\xdb\x99(\xbfJ\xc7C?\xac\xa3\x1a\xbfB\x10A=\xea\xb2\x84\xbfj\xd5u?\xf7\xcd\xd5\xbe \xad.?\xb4\x08T>G53\xbf\x94\x89 \xbfS\'\x80\xbfi=\x8e=m\x9a\xda\xbeS\tY\xbfE\xd9>?\xa8\xf1W?\xff._?\xae\x83M>\xe6?~\xbf\xb5f1\xbf\x9e\xe1\x96>\\\xc0\xd0\xbe\x18;\x89\xbfR\x030\xbf\x96\xdb\x8a>\x92\x81Y\xbf\xf9\xd2\x15>\x19\n\x1b\xbf\xf6\xb5b>\xb1J\xec\xbe\xe0X0?\xc6\xae\xf7\xbe\x94\x81\x06?\xf8\x04X\xbe\x05\xaf\xe8>(\xb2\xc9\xbd\xea\x1a\xfe>\xdc\x17\xa2\xbe\xc9,\x08>\x169">\xe4\x9bU\xbf\xa9\xaa\x03=s5V\xbfS\xe0-\xbfbq\x01>-\xcf\xd6\xbe\xea\xbe\x90\xbe_]3\xbe]Z#?\xac\xe2o>\xe17+\xbfh\xb3\x8e\xbe\xadNu\xbe\xe6l_\xbf\xfe\xa0\xea=\xca\xc1M?iy\x92?n4\x08\xbf\xdaz\x0f?\xf4\x9e\x14\xbf\xd9\xc9R?\x85v&\xbf\xe4\x16\xea>\xd24E\xbf\xf6\xb2\xad>\xd3\xe5\x9e\xbe\xc7\xa1\x7f>[\x9d\x9b>\t\x81\x7f\xbf\xd9\xd0G\xbd\x80\xa6&\xbf\xe9\x9c\t>\xac\x0f\x0b\xbf\xb1?\xf9\xbe\xb2`\xdc>>\x97%\xbf\xa4\x17\x11?#\xc4\xd2\xbeQG+?\x8d\xf1\x88>\x0c\xbeT\xbf@\x838\xbfT\xe2\xa5>\xac\x8d\x0e\xbf]\xff\x1a>\xa6M=\xbf\xaby\xa3<\x87\xb7\\?<7\x9b?\xc3d\xd7=|\xa2\xc7\xbep-\x8f>z@C\xbf\xc3\x9c\x18\xbfuZ\xe0>K\x9d\xcb>\x13\xb1:\xbf\xde\x02\xfa<\x0b\x9f\x13?\xb2\xac\x07?\x9f\xear\xbf\xa4.H\xbff\xb4\x0e\xbe\x0c\t\x96>\xed7\x10\xbfF\xa32\xbf\x8cE;>\xf6/ \xbf\x87\xa9\xc7>\x01/\x94> \xd4Y\xbf\x84\x93\xe9>=Pp\xbf\x06R\xde\xbe3\x87\x81;\xeeC\x17\xbf\t^\x83\xbf8i\x80>\x95W\xba\xbe\xe9\x93\x1d\xbf\x8d\x92\x03?\x1d6\x9f\xbeA\x95\xef>\xe3\xce&\xbf\xec\xeb3?\xd7\xd3\x17\xbfPH\xc8>\x0f\x18\xa5>\x97\x0e/\xbf\xca8)?\x867t?\xa4M\xdf=Q(\x14\xbf`G\x94>\xf3\x9e\xaf\xbea\xb8\xbe>\xa9\xec^\xbfK\x8f\x05\xbf\xa8I\xb5>\x9d\xab^?\xdc\x92K?9\xeaf>\x84C\xdd\xbe\x0c\xdd\xb7\xbe\x8bZ\xce>\xa2_\x11\xbf\x82\x0c\x87\xbf\xc5\xd6\x82\xbeWh\n\xbf\x7f\x89t?\x13\x11g?\xfa$v?\x15\x90t?\xa3.j>"\xa0\x07\xbf\xfa\xb2U\xbe\xe1\x96.?\xa9\xd2\xaa=\xf3=\x99=a\x10\x85\xbf\x16\x05\xc6\xbc\xc3*c\xbf\x96\x1c\xf0=s\xc7x=\x956D\xbf\xe6H\xbb\xbeK\xa3\xc5>\x95\xb9\x04\xbf\xefB\x0f?T"\xd9\xbe\xae\xceW\xbd\\1C\xbf+\x90W?\x16\x96\x84\xbf\xe5\x87^>\x95\x15\xef\xbee\xeb@?V\xe7\xcb>\x98\xd6\x84\xbf\xa3\xe3 \xbfGB#?v\x8b\xeb\xbe\xad\xea>>\x91\xb0\x19\xbe\xc9\xd1(\xbf7c\x03?3O6?\'\x0f\xce\xbe\xa2"\xf6=\x05E\xa6\xbe\x85tW?\x0eT<\xbf\x91`K?E<\xbb\xbe^=]?\xcb\x95a>@\xeeC\xbfOIY\xbfx\x88\xc2>\xf3\xa2\xee=T\xd7#\xbf\xe573?\xe3\xc1\x8e?\xce\x99\x08\xbf\xe2q\t?\xf2\xc0\xf7\xbe\x04\xcf\x80>\x0ea\xcd\xbe\xcfl2?YP\x8f\xbe\xc1\xc5f>T\xcc\x0f\xbfLv5?\xbc/\x1a?\xfc\xe8\x82\xbf\x10D\xb0\xbet\xcfq>D\xd2\x08\xbfH*_?\xdc]D\xbf\xd7\xae3\xbf!\xa1\xc4\xbe\xae\x1a\x19?\x81\xc6\x90?\xf1\x1cs?\xafw\x9d\xbd\x17\xb7}\xbf\xa3\n\xba\xbe\xed\xeaM?*\xa2o>\x7fv\xf0\xbe\x11\xbcH\xbf\xc0Yn>i\x16\x7f\xbf5\xc5T=\x9c\xfb\x8d\xbd\x07j\x1e\xbfw\xba+>G\x92H\xbf\x0e\x04\xd2>\x8b\x82\x89\xbf\xb0+\xdc\xbe\xf3\xa0U?o\x1a\xe7\xbe\xc1\x91\xc6>\x02\xde\x8a>\x8b\xcb/\xbf\xa2Xd?\xff\x0f\x87?\xb4\xb1u\xbeP\x81\xce>\xa55D\xbe\x94\x17\xa0>\x9a\xc9\xad=\xf8\xd4\xc3\xbeU\x0fG\xbf\x0b\x87\x16?\x08\x8b\x8e>\xa5Z1\xbf\xc16\xc0>\xc7\x13\x04\xbf\x16\xd9\xf0>\x87\xc2\x8b\xbf\x0f\xb0\xb8\xbe\x9e\x92\x01=\xe6\xae\xd9\xbe\xa6\x8f\xf0>\xcf\x15\x82\xbeylV?\x98\x90\x00?\r\xb8\t\xbf\x93b\xd7\xbe\xaf{\xdf>9\xd67>\x90V|\xbfmHB\xbf\xb1`U?\xf0\x8a\xaf=G4\x1b\xbf\x1c\xde\xda\xbd\x95\xb85\xbf\x1f\xa3[>\xde\xbb~\xbff\xe2\'>\xacUB\xbf\xcd\t\x18?J<i?\xaa#\x86\xbf\\\xf3b\xbeM\xe2\x0b?$q8?B?g\xbf3\x82\x10\xbe\x1e\xcd\xa3\xbe\xc8\x06\x86\xbe\x803E>\x908\xca\xbe_\xc3\xd2>6\xefj\xbf\x05S@\xbfv\x85\x95>"\xe6E?\xfd,\x8e?QX\xe2>\x04\rO\xbf\xcc\x88E\xbf\xe0\xd7_\xbf\x07\xb0{>5\x07\x19\xbff\xaf:>F\x0e\x08\xbfs\xab\x9e>\xf1\x0bQ?@\xa1p\xbf55\xec>_+\xd2\xbe\xc2\xe9\xab>\xe6\x9a\x16\xbf\xe2\x93@\xbf\x99\x18\r\xbf\xd0\xd17?\xee\xfa@\xbf\xdf\xcd\xd3>\x12\\\t\xbf\xf6\xa94?9?\x9d>m\x94\xfb\xbezz9>\xa8_O\xbfg\xfa\xac>\x11@\x8b\xbf\xf3\x10\xfb\xbe\xd4 ,>,\xea\xd0\xbe\xe3\x1b\xf7>\x98\x98\xb5=L\xa5\x15\xbf6\x01\x18\xbf%\xcc\xcb>\xf2\xf0\x1a\xbfh\xd6`>=\xf7\xb7>I\xf0C\xbfw>\x00\xbf\x16R\x97=\x11$9\xbfTvX\xbf^^;?vy\x82?\xec\x04\x03\xbc\x13\xc5.\xbe\xfd\xed$\xbfN\xa5\xd3>+\xc1\xbb>\x8d\x05x\xbf\x01\xc5\x9d\xbe\xcf\xe7K\xbf\xde\x8d@>v\x16\x9f\xbftbh\x01h\x02K\x00\x85U\x01b\x87Rq\x06(K\x01K\xc8\x85h\x04U\x02i8K\x00K\x01\x87Rq\x07(K\x03U\x01<NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00tb\x89T@\x06\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00tb\x86.' Xdata, ydata = pickle.loads(data) # define model aly = np.array([-107.366745, -471.69812012, 73.48317719, 505.58166504]) b = 1.11400843 svs = Xdata[[ 9, 99, 176, 195],:] plot_modeldecision(Xdata,ydata,svs,aly,b,poly_kernel) matplotlib2tikz.save('example.tikz')
def _run_different_meshes(): mesh_files = [ #'states/rectangle10.vtu', #'states/rectangle20.vtu', #'states/rectangle30.vtu', #'states/rectangle40.vtu', #'states/rectangle50.vtu', #'states/rectangle60.vtu', #'states/rectangle70.vtu', #'states/rectangle80.vtu', #'states/rectangle90.vtu', #'states/rectangle100.vtu', #'states/rectangle110.vtu', #'states/rectangle120.vtu', #'states/rectangle130.vtu', #'states/rectangle140.vtu', #'states/rectangle150.vtu', #'states/rectangle160.vtu', #'states/rectangle170.vtu', #'states/rectangle180.vtu', #'states/rectangle190.vtu', 'states/rectangle200.vtu' ] mu = 1.0e-0 # loop over the meshes and compute nums_unknowns = [] num_iterations = {} for mesh_file in mesh_files: # read and set the mesh print print 'Reading the mesh...' try: mesh, point_data, field_data = voropy.reader.read(mesh_file) except AttributeError: print 'Could not read from file ', mesh_file, '.' sys.exit() print ' done.' # create model evaluator interface pynosh_modelval = pynosh.model_evaluator_nls(mu) # create preconditioners object precs = pynosh.preconditioners(pynosh_modelval) precs.set_parameter(mu) # recreate all the objects necessary to perform the preconditioner run num_unknowns = len(mesh.nodes) nums_unknowns.append(num_unknowns) # set psi at which to create the Jacobian # generate random numbers within the unit circle radius = numpy.random.rand(num_unknowns) arg = numpy.random.rand(num_unknowns) current_psi = numpy.empty(num_unknowns, dtype=complex) for k in range(num_unknowns): current_psi[k] = cmath.rect(radius[k], arg[k]) pynosh_modelval.set_current_psi(current_psi) # create right hand side and initial guess rhs = numpy.random.rand(num_unknowns) \ + 1j * numpy.random.rand(num_unknowns) # initial guess for all operations psi0 = numpy.zeros(num_unknowns, dtype=complex) test_preconditioners = _create_preconditioner_list(precs, num_unknowns ) # build the kinetic energy operator print 'Building the KEO...' start_time = time.clock() pynosh_modelval._assemble_kinetic_energy_operator() end_time = time.clock() print 'done. (', end_time - start_time, 's).' # Run the preconditioners and gather the relative residuals. relresvecs = _run_preconditioners(pynosh_modelval._keo, rhs, psi0, test_preconditioners ) # append the number of iterations to the data for prec in test_preconditioners: if prec['name'] not in num_iterations.keys(): num_iterations[prec['name']] = [] num_iterations[prec['name']].append( len(relresvecs[prec['name']]) - 1 ) print num_iterations # plot them all for prec in test_preconditioners: pp.semilogy(nums_unknowns, num_iterations[prec['name']], '-o', label=prec['name'] ) # plot legend pp.legend() # add title and so forth pp.title('CG convergence for $K$') pp.xlabel('Number of unknowns $n$') pp.ylabel('Number of iterations till $<10^{-10}$') matplotlib2tikz.save('meshrun-k.tikz', figurewidth='\\figurewidth', figureheight='\\figureheight' ) pp.show() return
# accuracy vs iteration fig = plt.figure(1, figsize=(8, 6)) # fig = plt.figure() for data, style in zip(sim_data, line_style): plt.plot(n_FC, data['n_iter'], style, label=data['legend']) # markevery=marker_at) plt.ylabel('Iterations needed') plt.xlabel('Number of local FCs') # plt.title(title_str) #plt.ylim(ymin=1e-8) plt.tight_layout() plt.legend() from matplotlib2tikz import save save('tradeoff.tex', figureheight='4cm', figurewidth='6cm') #%% # accuracy vs communication #fig = plt.figure(2, figsize=(8, 6)) #for data, style in zip(sim_data, line_style): # edges = data['edges'] # comm_cost = np.arange(len(data['opt_gap'])) * edges # plt.semilogy(comm_cost, data['opt_gap'], style, label=data['legend'], # markevery=marker_at) # #plt.xlabel('Communication cost') #plt.ylabel('Accuracy') #plt.ylim(ymin=1e-8) #plt.legend() # #fig.tight_layout()
def _main(): args = _parse_input_arguments() newton_data = list(yaml.load_all(open(args.newton_data_file))) # initialize TMinv = 0.0 # default: no preconditioner TM = 0.0 # obtain timings timings = yaml.load(open(args.timings_file)) for run in timings["tests"]: min_time = min(run["timings"]) / timings["number"] if run["target"] == "jacobian": TJ = min_time if run["target"] == newton_data[0]["preconditioner type"]: # peek at first newton run TMinv = min_time if run["target"] == "prec" and newton_data[0]["preconditioner type"]!="none": TM = min_time if run["target"] == "inner": Tip = min_time if run["target"] == "daxpy": Tdaxpy = min_time # k - number of MINRES iteratios # p - number of deflation vectors def Tp(p): return p*(Tip + Tdaxpy) TMip = TM + Tip def Tqr(p): return (p*(p+1)/2) * (Tip + Tdaxpy) + p*TM def Tget_proj(p): return p*TJ + (p*(p+1)/2 + p)*Tip + Tp(p) + p*Tdaxpy def TMINRES(k, p): return k * (2*Tip + 2*TJ + TMinv + Tp(p) + 7*Tdaxpy) def Tget_ritz(k, p): alpha = p*TMinv + (p+k)*p*Tdaxpy # alpha += p**2 * Tip # no computation of ritz residual norms return alpha def Toverall(k, p): return Tqr(p) + Tget_proj(p) + TMINRES(k, p) + Tget_ritz(k, p) vanilla_newton_data = list(yaml.load_all(open(args.vanilla_newton_data_file)))[0] assert vanilla_newton_data['ix deflation'] == False assert vanilla_newton_data['extra deflation'] == 0 assert vanilla_newton_data['preconditioner type'] == newton_data[0]['preconditioner type'] newton_steps = list(range(26)) for step in newton_steps: x = [0] # start at (0,1.0) y = [1.0] for newton_datum in newton_data: if step < len(newton_datum['Newton results']) - 1: num_vanilla_steps = len(vanilla_newton_data['Newton results'][step]['relresvec']) -1 num_steps = len(newton_datum['Newton results'][step]['relresvec']) - 1 num_defl_vecs = newton_datum['extra deflation'] if newton_datum['ix deflation']: num_defl_vecs += 1 if num_defl_vecs > 0: x.append(num_defl_vecs) y.append(Toverall(num_steps, num_defl_vecs) / Toverall(num_vanilla_steps, 0)) pp.plot(x, y, color=str(1.0 - float(step+1)/len(newton_steps)), label='step %d' % step) pp.ylim([0, 2]) pp.title('%s, ix defl: %r, prec: %s' % (timings["filename"], newton_data[0]["ix deflation"], newton_data[0]["preconditioner type"])) # Write the info out to files. if args.imgfile: pp.savefig(args.imgfile) if args.tikzfile: matplotlib2tikz.save(args.tikzfile) return
def main(): print '' print 'start - Plots' #============================================================== # Input Data # a = 'b' filename = "t-05-07-b-DCM-dc" amount = 8 # filename = "t-05-08-b-DCM-wc" # amount = 2 # filename = "t-05-09-a-DCM-tcur" # amount = 4 # filename = "t-05-10-a-DCM-ee" # amount = 4 # filename = "t-05-11-b-DCM-ds" # amount = 3 # filename = "t-05-12-b-DCM-RH" # amount = 5 # filename = "t-05-12-b-DCM-T" # amount = 8 # filename = "t-05-13-a-DCM-vc-vi-10" # amount = 2 # filename = "t-05-13-a-DCM-vc-vi-20" # amount = 2 # filename = "t-05-14-a-DCM-vc-hc-10" # amount = 2 # filename = "t-05-14-a-DCM-vc-hc-20" # amount = 2 # filename = "t-05-13-a-DCM-ic-vi-10" # amount = 2 # filename = "t-05-13-a-DCM-ic-vi-20" # amount = 2 # filename = "t-05-14-a-DCM-ic-hc-10" # amount = 2 # filename = "t-05-14-a-DCM-ic-hc-20" # amount = 2 # filename = "t-06-03-a-DCM-dc" # amount = 8 # filename = "t-06-03-b-DCM-ee-2-dc" # amount = 8 Data = [[]]*(2+amount) save_filename = filename load_dir = 'dat/results/' save_dir = 'plots/' if a == 'a': load_path = load_dir+'DCM-a.txt' else: load_path = load_dir+'DCM-b.txt' path = os.path.expanduser(load_path) data = np.loadtxt(path) x = [] Ps = [] Pf = [] Beta = [] for i in range(len(data)): x.append(data[i][0]) ps = data[i][1] pf = 1-ps beta = -Normal.inv_cdf(pf) Ps.append(ps) Pf.append(pf) Beta.append(beta) Data[0]=x if a == 'c': Data[1]=Beta else: Data[1]=Pf for num in range(amount): load_path = load_dir+filename+'-'+str(num)+'.txt' path = os.path.expanduser(load_path) data = np.loadtxt(path) Ps = [] Pf = [] Beta = [] for i in range(len(data)): ps = data[i][1] pf = 1-ps beta = -Normal.inv_cdf(pf) Ps.append(ps) Pf.append(pf) Beta.append(beta) if a == 'c': Data[num+2]=Beta else: Data[num+2]=Pf # x = [15,25,35,45,55,65,75,85] # plt.clf() # age = [10,20,30,40,50] # for n in age: # data = [] # for i in range(amount): # data.append(Data[i+2][n]) # plt.plot(x,data,linewidth = 2) plt.clf() for num in range(amount+1): plt.plot(Data[0],Data[num+1],linewidth = 2) plt.legend( ( 'ne', '0', '1','2','3','4','5','6','7'),loc = 'upper left' ) plt.title(r'Structural Reliability') plt.xlabel('Time in Service') if a == 'a': plt.ylabel('Corrosion') elif a == 'b': plt.ylabel('Failure') else: plt.ylabel('Reliability Index') plt.grid(True) save_path_png = save_dir+save_filename+".png" save_path_tex = save_dir+save_filename+".tex" path_png = os.path.expanduser(save_path_png) path_tex = os.path.expanduser(save_path_tex) plt.savefig(path_png, size=(4,3)) matplotlib2tikz.save(path_tex) print 'end - Plots' run_time = time.time() - start_time print str(datetime.timedelta(seconds=run_time))
def _main(): ''' Main function. ''' # parse input arguments opts, args = _parse_input_arguments() state_files = sorted( glob.glob( str(opts.foldername) + '/solution*.vtu' ) ) print state_files[0] tol = 1.0e-8 maxiter = 5000 # Create the model evaluator. # Get mu and mesh for the first grid. This way, the mesh doesn't need to # be reset in each step; this assumes of course that the mesh doesn't # change throughout the computation. print "Reading the state \"" + state_files[0] + "\"..." try: mesh, psi, field_data = vtkio.read_mesh( state_files[0] ) except AttributeError: print "Could not read from file ", state_files[0], "." raise print " done." ginla_modelval = ginla_model_evaluator( field_data["mu"] ) ginla_modelval.set_mesh( mesh ) # create precondictioner object precs = preconditioners( ginla_modelval ) precs.set_mesh( mesh ) # -------------------------------------------------------------------------- # loop over the meshes and compute num_iterations = [] for state_file in state_files: # ---------------------------------------------------------------------- # read and set the mesh print print "Reading the state \"" + state_file + "\"..." try: mesh, psi, field_data = vtkio.read_mesh( state_file ) except AttributeError: print "Could not read from file ", state_file, "." raise print " done." mu = field_data["mu"] ginla_modelval.set_parameter( mu ) ginla_modelval.set_current_psi( psi ) precs.set_parameter( mu ) # ---------------------------------------------------------------------- # recreate all the objects necessary to perform the precondictioner run num_unknowns = len( mesh.nodes ) # create preconditioner prec_keolu = LinearOperator( (num_unknowns, num_unknowns), matvec = precs.keo_lu, dtype = complex ) # create the linear operator ginla_jacobian = LinearOperator( (num_unknowns, num_unknowns), matvec = ginla_modelval.compute_jacobian, dtype = complex ) # create right hand side and initial guess rhs = np.zeros( num_unknowns ) # initial guess for all operations psi0 = np.random.rand( num_unknowns ) \ + 1j * np.random.rand( num_unknowns ) # ---------------------------------------------------------------------- # build the kinetic energy operator print "Building the KEO..." start_time = time.clock() ginla_modelval._assemble_kinetic_energy_operator() end_time = time.clock() print "done. (", end_time - start_time, "s)." # ---------------------------------------------------------------------- # Run the preconditioners and gather the relative residuals. print "Solving the system with KEO/LU precondictioning..." start_time = time.clock() sol, info, relresvec = nm.cg_wrap( ginla_jacobian, rhs, x0 = psi0, tol = tol, maxiter = maxiter, M = prec_keolu, inner_product = 'real' ) end_time = time.clock() if info == 0: print "success!", else: print "no convergence.", print " (", end_time - start_time, "s,", len(relresvec)-1 ," iters)." #pp.semilogy( relresvec ) #pp.show() # ---------------------------------------------------------------------- # append the number of iterations to the data num_iterations.append( len( relresvec ) - 1 ) # ---------------------------------------------------------------------- print ( num_iterations ) # plot the number of iterations pp.plot( num_iterations, 'o' ) # add title and so forth pp.title( 'CG convergence for $J$' ) pp.xlabel( 'Continuation step $k$' ) pp.ylabel( "Number of CG iterations till $<10^{-10}$" ) matplotlib2tikz.save( "pcg-iterations.tikz", figurewidth = "\\figurewidth", figureheight = "\\figureheight" ) # pp.show() return
def UUV_time_delay(uuv1=(0, 0, 15), uuv2=(0, 500, 40), SSP='linear_increasing', graph=0, depth=30, clock_error=0, dist_calc=False, tikz_plot=False, pdf_plot=False): """ :param uuv1: :param uuv2: :param SSP: :param graph: bool or 2 for testing :param depth: :param clock_error: :param dist_calc: :return: """ # initialise Sound speed profile z = np.linspace(0.1, depth, 100) ssp_getter = lambda zi: ssp_function(zi, SSP) c = map(ssp_getter, z) # plot graph of sound speed profile vs depth if graph: if pdf_plot is not None and 'figsize' in pdf_plot: fig = plt.figure(figsize=pdf_plot['figsize']) else: fig = plt.figure() gs = gridspec.GridSpec(1,5) gs.update(wspace=0.05, hspace=0.05) # set the spacing between axes. sspax = plt.subplot(gs[:,4]) sspax.plot(c, z) sspax.set_xlabel("$V_s (ms^{-1})$") sspax.set_xlim(1400,1560) sspax.invert_yaxis() sspax.xaxis.set_major_locator(FixedLocator([1400,1480,1560])) plt.setp(sspax.xaxis.get_majorticklabels(), rotation=30) tofax = plt.subplot(gs[:,0:4], sharey=sspax) plt.setp(sspax.get_yticklabels(),visible=False) tofax.set_ylabel("Depth $(m)$") tofax.set_xlabel("Distance $(m)$") fard = np.linalg.norm(uuv2[0:2])-np.linalg.norm(uuv1[0:2]) tofax.plot([0], [uuv1[2]], 'or', zorder=10) tofax.plot([fard], [uuv2[2]], 'or', zorder=10) tofax.set_xlim(-0.05*fard, fard*1.05) print fard else: tofax = None # Position of uuv 1 Pos = namedtuple('Pos', ['x', 'y', 'z']) uuv_pos = Pos(*uuv1), Pos(*uuv2) # position of uuv 2 # Calculate the time true delay between the UUVs delay, true_dist = Raytrace_model(uuv_pos, depth, SSP, graph, ax=tofax) # nargout=2 response = (delay, true_dist) if dist_calc: guess_dist1, guess_dist2, guess_dist3 = Distance_Estimates(SSP, c, clock_error, delay, uuv_pos) response += guess_dist1, guess_dist2, guess_dist3 if graph == 1: response += ((gs, sspax, tofax)) if tikz_plot and all(map(lambda v:v in tikz_plot, ['filepath',])): tikz_plot['figure'] = fig print("Dumping: {}".format(tikz_plot)) matplotlib2tikz.save(**tikz_plot) if pdf_plot and all(map(lambda v:v in pdf_plot, ['filepath',])): fig.savefig(pdf_plot['filepath']+'.pdf', format='pdf', bbox_inches='tight') return response
def train_renderV2(model, train_dataloader, test_dataloader, n_epochs, loss_function, date4File, cubeSetName, batch_size, fileExtension, device, obj_name, noise, number_train_im): # monitor loss functions as the training progresses lr = 0.001 loop = n_epochs Step_losses = [] current_step_loss = [] Test_losses = [] Epoch_losses = [] count = 0 testcount = 0 renderCount = 0 regressionCount = 0 renderbar = [] regressionbar = [] Im2ShowGT = [] Im2ShowGCP = [] numbOfImageDataset = number_train_im for epoch in tqdm(range(n_epochs)): ## Training phase model.train() print('train phase epoch {}'.format(epoch)) for image, silhouette, parameter in train_dataloader: loss = 0 Step_loss = 0 numbOfImage = image.size()[0] image = image.to(device) parameter = parameter.to(device) silhouette = silhouette.to(device) params = model(image) #should be size [batchsize, 6] # print('computed parameters are {}'.format(params)) # print(params.size()) for i in range(0, numbOfImage): #create and store silhouette model.t = params[i, 3:6] R = params[i, 0:3] # print(R) # print(model.t) model.R = R2Rmat(R) # angle from resnet are in radian # print(model.t) # print(model.R) current_sil = model.renderer(model.vertices, model.faces, R=model.R, t=model.t, mode='silhouettes').squeeze() # print(current_sil) current_GT_sil = (silhouette[i] / 255).type( torch.FloatTensor).to(device) # print(current_GT_sil) if (model.t[2] > 1 and model.t[2] < 10 and torch.abs(model.t[0]) < 1.5 and torch.abs(model.t[1]) < 1.5): optimizer = torch.optim.Adam(model.parameters(), lr=lr) loss += nn.BCELoss()(current_sil, current_GT_sil) print('render') renderCount += 1 else: optimizer = torch.optim.Adam(model.parameters(), lr=0.001) loss += nn.MSELoss()(params[i, 3:6], parameter[i, 3:6]).to(device) print('regression') regressionCount += 1 loss = loss / numbOfImage #take the mean of the step loss optimizer.zero_grad() loss.backward() optimizer.step() print(loss) Step_losses.append(loss.detach().cpu().numpy() ) # contain all step value for all epoch current_step_loss.append(loss.detach().cpu().numpy( )) #contain only this epoch loss, will be reset after each epoch count = count + 1 epochloss = np.mean(current_step_loss) current_step_loss = [] Epoch_losses.append(epochloss) #most significant value to store print(epochloss) print(renderCount, regressionCount) renderbar.append(renderCount) regressionbar.append(regressionCount) renderCount = 0 regressionCount = 0 #validation phase print('test phase epoch {}'.format(epoch)) model.eval() for image, silhouette, parameter in test_dataloader: Test_Step_loss = [] numbOfImage = image.size()[0] image = image.to(device) parameter = parameter.to(device) silhouette = silhouette.to(device) params = model(image) # should be size [batchsize, 6] # print('computed parameters are {}'.format(params)) # print(params.size()) for i in range(0, numbOfImage): model.t = params[i, 3:6] R = params[i, 0:3] model.R = R2Rmat(R) # angle from resnet are in radian current_sil = model.renderer(model.vertices, model.faces, R=model.R, t=model.t, mode='silhouettes').squeeze() current_GT_sil = (silhouette[i] / 255).type( torch.FloatTensor).to(device) loss += nn.BCELoss()(current_sil, current_GT_sil) Test_Step_loss.append(loss.detach().cpu().numpy()) if (epoch == n_epochs - 1): print('saving image to show') imgCP, _, _ = model.renderer(model.vertices, model.faces, torch.tanh(model.textures), R=model.R, t=model.t) imgCP = imgCP.squeeze() # float32 from 0-1 imgCP = imgCP.detach().cpu().numpy().transpose((1, 2, 0)) imgCP = (imgCP * 255).astype( np.uint8) # cast from float32 255.0 to 255 uint8 imgGT = image[i].detach().cpu().numpy() imgGT = (imgGT * 0.5 + 0.5).transpose(1, 2, 0) #denormalization Im2ShowGT.append(imgCP) Im2ShowGCP.append(imgGT) a = plt.subplot(2, numbOfImage, i + 1) plt.imshow(imgGT) a.set_title('GT {}'.format(i)) plt.xticks([0, 512]) plt.yticks([]) a = plt.subplot(2, numbOfImage, i + 1 + numbOfImage) plt.imshow(imgCP) a.set_title('Rdr {}'.format(i)) plt.xticks([0, 512]) plt.yticks([]) loss = loss / numbOfImage Test_losses.append(loss.detach().cpu().numpy()) loss = 0 # reset current test loss testcount = testcount + 1 #-----------plot and save section ------------------------------------------------------------------------------------ fig, (p1, p2, p3, p4) = plt.subplots(4, figsize=(15, 10)) #largeur hauteur #subplot 1 rollingAv = pd.DataFrame(Step_losses) rollingAv.rolling(2, win_type='triang').sum() p1.plot(np.arange(count), rollingAv, label="step Loss rolling average") p1.set(ylabel='BCE Step Loss') p1.set_ylim([0, 4]) # Place a legend to the right of this smaller subplot. p1.legend() #subplot 2 p2.plot(np.arange(n_epochs), Epoch_losses, label="epoch Loss") p2.set(ylabel=' Mean of BCE training step loss') p2.set_ylim([0, 4]) # Place a legend to the right of this smaller subplot. p2.legend() #subplot 3 ind = np.arange(n_epochs) #index width = 0.35 p3.bar(ind, renderbar, width, color='#d62728', label="render") height_cumulative = renderbar p3.bar(ind, regressionbar, width, bottom=height_cumulative, label="regression") p3.set(ylabel='render/regression call') p3.set(xlabel='epoch') p3.set_ylim([0, numbOfImageDataset]) p3.set_xticks(ind) p3.legend() p4.plot(np.arange(testcount), Test_losses, label="Test Loss") p4.set(ylabel='Mean of BCE test step loss') p4.set_ylim([0, 5]) # Place a legend to the right of this smaller subplot. p4.legend() plt.show() fig.savefig('results/render_{}batch_{}.pdf'.format(batch_size, n_epochs)) import matplotlib2tikz matplotlib2tikz.save("results/render_{}batch_{}.tex".format( batch_size, n_epochs))
import matplotlib2tikz as tikz import matplotlib.pyplot as plt import numpy as np t = np.arange(0.0, 2.0, 0.01) s = np.sin(2*np.pi*t) plt.plot(t, s) plt.xlabel('time (s)') plt.ylabel('voltage (mV)') plt.title('About as simple as it gets, folks') plt.grid(True) tikz.save('mytikz.tex')
def __init__(self, fig, save_kwargs=None): if save_kwargs is None: save_kwargs = dict() # convert to tikz file _, tmp_base = tempfile.mkstemp() tikz_file = tmp_base + "_tikz.tex" matplotlib2tikz.save(tikz_file, figurewidth="7.5cm", **save_kwargs) # test other height specs matplotlib2tikz.save(tikz_file + ".height", figureheight="7.5cm", show_info=True, strict=True, **save_kwargs) # save reference figure mpl_reference = tmp_base + "_reference.pdf" plt.savefig(mpl_reference) # close figure plt.close(fig) # create a latex wrapper for the tikz wrapper = """\\documentclass{standalone} \\usepackage[utf8]{inputenc} \\usepackage{pgfplots} \\usepgfplotslibrary{groupplots} \\usetikzlibrary{shapes.arrows} \\pgfplotsset{compat=newest} \\begin{document} \\input{%s} \\end{document}""" % tikz_file.replace("\\", "/") tex_file = tmp_base + ".tex" with open(tex_file, "w") as f: f.write(wrapper) # change into the directory of the TeX file os.chdir(os.path.dirname(tex_file)) # compile the output to pdf try: tex_out = subprocess.check_output( # use pdflatex for now until travis features a more modern # lualatex ["pdflatex", "--interaction=nonstopmode", tex_file], stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as e: print("Command output:") print("=" * 70) print(e.output) print("=" * 70) raise pdf_file = tmp_base + ".pdf" # PIL can only read images with up to 89478485 pixels (to prevent # decompression bomb DOS attacks). Make sure the resulting image will # be smaller. pdfinfo_out = subprocess.check_output( ["pdfinfo", pdf_file], stderr=subprocess.STDOUT).decode("utf-8") # Extract page size # Page size: 195.106 x 156.239 pts m = re.search("Page size: *([0-9]+\\.[0-9]+) x ([0-9]+\\.[0-9]+) pts", pdfinfo_out) # get dims in inches dims = [float(m.group(1)) / 72, float(m.group(2)) / 72] assert dims is not None max_num_pixels = 89e6 max_dpi = math.sqrt(max_num_pixels / dims[0] / dims[1]) dpi = min(2400, max_dpi) # Convert PDF to PNG. # Use a high resolution here to cover small changes. ptp_out = subprocess.check_output( ["pdftoppm", "-r", str(dpi), "-png", pdf_file, tmp_base], stderr=subprocess.STDOUT, ) png_file = tmp_base + "-1.png" self.phash = imagehash.phash(Image.open(png_file)).__str__() self.png_file = png_file self.pdf_file = pdf_file self.tex_out = tex_out self.ptp_out = ptp_out self.mpl_reference = mpl_reference self.tikz_file = tikz_file return
def _main(): # get command line arguments test_list = _parse_options() tex_file_path = "./tex/acid.tex" # directory where all the generated files will end up data_dir = "./data" # how to get from the LaTeX file to the data tex_relative_path_to_data = "../data" figure_width = "7.5cm" # open file for writing file_handle = open( tex_file_path, "w" ) write_document_header(file_handle, figure_width) # Get all function names from the testfunctions module. function_strings = dir(testfunctions) # Remove all functions that start with "_" and that are not # in a certain exclude list (uuuugly). exclude_list = {'mpl', 'np', 'pp'} tmp = [] for s in function_strings: if s[0] != '_' and s not in exclude_list: tmp.append(s) function_strings = tmp if not test_list is None: # actually treat a sublist of test_functions # remove duplicates and sort test_list = sorted(set(test_list)) else: # all indices test_list = xrange(0, len(function_strings)) for k in test_list: print 'Test function %d (%s)...' % (k, function_strings[k]), pp.cla() pp.clf() # plot the test example comment = getattr(testfunctions, function_strings[k])() # convert to TikZ tikz_path = data_dir + "/test%r.tex" % k matplotlib2tikz.save(tikz_path, figurewidth = figure_width, tex_relative_path_to_data = \ tex_relative_path_to_data ) # plot reference figure pdf_path = data_dir + "/test" + repr(k) + ".pdf" pp.savefig(pdf_path) # update the LaTeX file write_file_comparison_entry(file_handle, path.join(tex_relative_path_to_data, path.basename(pdf_path)), path.join(tex_relative_path_to_data, path.basename(tikz_path)), k, comment ) print 'done.' write_document_closure(file_handle) file_handle.close() return
import matplotlib2tikz as tikz import matplotlib.pyplot as plt import numpy as np t = np.arange(0.0, 2.0, 0.01) s = np.sin(2 * np.pi * t) plt.plot(t, s) plt.xlabel('time (s)') plt.ylabel('voltage (mV)') plt.title('About as simple as it gets, folks') plt.grid(True) tikz.save('mytikz.tex')
def check_hash(test, name): # import the test test.plot() # convert to tikz file _, tmp_base = tempfile.mkstemp(prefix=name) tikz_file = tmp_base + '_tikz.tex' matplotlib2tikz.save( tikz_file, figurewidth='7.5cm', show_info=False ) # save reference figure mpl_reference = tmp_base + '_reference.pdf' pp.savefig(mpl_reference) # create a latex wrapper for the tikz wrapper = '''\\documentclass{standalone} \\usepackage[utf8]{inputenc} \\usepackage{pgfplots} \\usepgfplotslibrary{groupplots} \\pgfplotsset{compat=newest} \\begin{document} \\input{%s} \\end{document}''' % tikz_file tex_file = tmp_base + '.tex' with open(tex_file, 'w') as f: f.write(wrapper) # change into the directory of the TeX file os.chdir(os.path.dirname(tex_file)) # compile the output to pdf tex_out = subprocess.check_output( # use pdflatex for now until travis features a more modern lualatex ['pdflatex', '--interaction=nonstopmode', tex_file], stderr=subprocess.STDOUT ) pdf_file = tmp_base + '.pdf' # Convert PDF to PNG. ptp_out = subprocess.check_output( ['pdftoppm', '-rx', '600', '-ry', '600', '-png', pdf_file, tmp_base], stderr=subprocess.STDOUT ) png_file = tmp_base + '-1.png' # compute the phash of the PNG phash = imagehash.phash(Image.open(png_file)).__str__() if test.phash != phash: # Compute the Hamming distance between the two 64-bit numbers hamming_dist = bin(int(phash, 16) ^ int(test.phash, 16)).count('1') print('Output file: %s' % png_file) print('computed pHash: %s' % phash) print('reference pHash: %s' % test.phash) print( 'Hamming distance: %s (out of %s)' % (hamming_dist, 4 * len(phash)) ) print('pdflatex output:') print(tex_out.decode('utf-8')) print('pdftoppm output:') print(ptp_out.decode('utf-8')) if 'DISPLAY' not in os.environ: # upload to chunk.io if we're on a headless client out = subprocess.check_output( ['curl', '-sT', mpl_reference, 'chunk.io'], stderr=subprocess.STDOUT ) print('Uploaded reference matplotlib PDF file to %s' % out) out = subprocess.check_output( ['curl', '-sT', tikz_file, 'chunk.io'], stderr=subprocess.STDOUT ) print('Uploaded TikZ file to %s' % out.decode('utf-8')) out = subprocess.check_output( ['curl', '-sT', pdf_file, 'chunk.io'], stderr=subprocess.STDOUT ) print('Uploaded output PDF file to %s' % out.decode('utf-8')) out = subprocess.check_output( ['curl', '-sT', png_file, 'chunk.io'], stderr=subprocess.STDOUT ) print('Uploaded output PNG file to %s' % out.decode('utf-8')) assert test.phash == phash
def print_or_show(fig, show, outfile, in_plotdir=True, tikz=None, data=None, store_meta="", close=True, dump_pickle=True): """Either print or save figure, or both, depending on arguments. Taking a figure, show and/or save figure in the default directory, obtained with :func:plotdir. Creates plot directory if needed. :param fig: Figure to store. :type fig: matplotlib.Figure object :param show: Show figure or not :type show: boolean :param outfile: File to write figure to, or list of files. If the string ends in a '.', write to x.png and x.pdf. :type outfile: string or list of strings :param in_plotdir: If true, write to default plot directory. If false, write to currect directory or use absolute path. :type in_plotdir: boolean :param tikz: Try to write tikz code with matplotlib2tikz. Requires that the latter is installed. :type tikz: boolean :param data: Store associated data in .dat file (useful for pgfplots). May be a list of ndarrays, which results in multiple numbered datafiles. :type data: ndarray or list thereof :param store_meta: Also store other info. This is a string that will be written to a file. If not set or set to None, it will just write the pyatmlab version. The file will use the same basename as the outfile, but replacing the extention by "info". However, this only works if outfile is a string and not a list thereof. To write nothing, pass an empty string. :type store_meta: str. :param close: If true, close figure. Defaults to true. :type close: bool. """ if outfile is not None: outfiles = [outfile] if isinstance(outfile, str) else outfile bs = pathlib.Path(plotdir()) if isinstance(outfile, str): if outfile.endswith("."): outfiles = [bs / pathlib.Path(outfile+ext) for ext in ("png", "pdf")] infofile = bs / pathlib.Path(outfile + "info") figfile = bs / pathlib.Path(outfile + "pkl.xz") else: outfiles = [bs / pathlib.Path(outfile)] infofile = None figfile = None if infofile is not None: infofile.parent.mkdir(parents=True, exist_ok=True) logging.debug("Obtaining verbose stack info") pr = subprocess.run(["pip", "freeze"], stdout=subprocess.PIPE) info = " ".join(sys.argv) + "\n" + pr.stdout.decode("utf-8") + "\n" info += tools.get_verbose_stack_description() # if infofile is not None and info: logging.info("Writing info to {!s}".format(infofile)) with infofile.open("w", encoding="utf-8") as fp: fp.write(info) if dump_pickle and figfile is not None: logging.info("Writing figure object to {!s}".format(figfile)) with lzma.open(str(figfile), "wb", preset=lzma.PRESET_DEFAULT) as fp: pickle.dump(fig, fp, protocol=4) # interpret as sequence for outf in outfiles: logging.info("Writing to file: {!s}".format(outf)) outf.parent.mkdir(parents=True, exist_ok=True) i = 0 while True: i += 1 try: fig.canvas.print_figure(str(outf)) except matplotlib.cbook.Locked.TimeoutError: logging.warning("Failed attempt no. {:d}".format(i)) if i > 100: raise else: break if show: matplotlib.pyplot.show() if close: matplotlib.pyplot.close(fig) if tikz is not None: import matplotlib2tikz print(now(), "Writing also to:", os.path.join(plotdir(), tikz)) matplotlib2tikz.save(os.path.join(plotdir(), tikz)) if data is not None: if not os.path.exists(io.plotdatadir()): os.makedirs(io.plotdatadir()) if isinstance(data, numpy.ndarray): data = (data,) # now take it as a loop for (i, dat) in enumerate(data): outf = os.path.join(io.plotdatadir(), "{:s}{:d}.dat".format( os.path.splitext(outfiles[0])[0], i)) fmt = ("%d" if issubclass(dat.dtype.type, numpy.integer) else '%.18e') if len(dat.shape) < 3: numpy.savetxt(outf, dat, fmt=fmt) elif len(dat.shape) == 3: io.savetxt_3d(outf, dat, fmt=fmt) else: raise ValueError("Cannot write {:d}-dim ndarray to textfile".format( len(dat.shape)))
b = np.random.rand(dim, 1) def matvec(x): return A.dot(x) # print(np.linalg.solve(A, b)) ans, hist = cg(matvec, b, np.zeros(b.shape), disp=False, tol=1e-10, maxiter=100) plot_performance(hist['norm_r'], 0, 'b', "50 кластеров") s = np.array(list(range(1, 11)) * 10, dtype=float) s += np.random.normal(0, 0.001, s.shape) A = (Q.dot(np.diag(s))).dot(Q.T) np.random.seed(1) b = np.random.rand(dim, 1) def matvec(x): return A.dot(x) # print(np.linalg.solve(A, b)) ans, hist = cg(matvec, b, np.zeros(b.shape), disp=False, tol=1e-10, maxiter=100) plot_performance(hist['norm_r'], 0, 'r', "10 кластеров") plt.ylabel(r'$\log||Ax_k - b||$') plt.xlabel("Номер итерации") title = "Метод Сопряженных Градиентов" # title += ", D = " + str(dim + 1) + ", N = " + str(data_size) + "." plt.title(title) plt.legend() file_name = "Plots/cg_" file_name += str(dim) + "_" + str(num_clusters) file_name += ".tikz" tikz.save(file_name) plt.show()
# psi.parameters = org_params psi = SimpleGaussian(0.8) sampler = ImportanceSampler(system, psi, 0.1) sampler.thermalize(10000) E_training = EnergyCallback(samples=1000000, verbose=True) train( psi, H, sampler, iters=150, samples=1000, gamma=0.0, optimizer=opt, call_backs=[E_training], call_back_resolution=50, ) E.append(np.asarray(E_training)) if master_rank(): fig, ax = plt.subplots() ax.set_xlabel(r"% of training") ax.set_ylabel(r"Energy error [a.u.]") for e, label in zip(E, labels): ax.semilogy(np.abs(e / N - D / 2), label=label) ax.legend() matplotlib2tikz.save( __file__ + ".tex", extra_axis_parameters=["compat=newest", "legend pos=outer north east"], ) plt.show()
def __init__(self, fig, save_kwargs=None): if save_kwargs is None: save_kwargs = dict() # convert to tikz file _, tmp_base = tempfile.mkstemp() tikz_file = tmp_base + "_tikz.tex" matplotlib2tikz.save(tikz_file, figurewidth="7.5cm", **save_kwargs) # test other height specs matplotlib2tikz.save( tikz_file + ".height", figureheight="7.5cm", show_info=True, strict=True, **save_kwargs ) # save reference figure mpl_reference = tmp_base + "_reference.pdf" plt.savefig(mpl_reference) # close figure plt.close(fig) # create a latex wrapper for the tikz wrapper = """\\documentclass{standalone} \\usepackage[utf8]{inputenc} \\usepackage{pgfplots} \\usepgfplotslibrary{groupplots} \\usetikzlibrary{shapes.arrows} \\pgfplotsset{compat=newest} \\begin{document} \\input{%s} \\end{document}""" % tikz_file.replace( "\\", "/" ) tex_file = tmp_base + ".tex" with open(tex_file, "w") as f: f.write(wrapper) # change into the directory of the TeX file os.chdir(os.path.dirname(tex_file)) # compile the output to pdf try: tex_out = subprocess.check_output( # use pdflatex for now until travis features a more modern # lualatex ["pdflatex", "--interaction=nonstopmode", tex_file], stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as e: print("Command output:") print("=" * 70) print(e.output) print("=" * 70) raise pdf_file = tmp_base + ".pdf" # PIL can only read images with up to 89478485 pixels (to prevent # decompression bomb DOS attacks). Make sure the resulting image will # be smaller. pdfinfo_out = subprocess.check_output( ["pdfinfo", pdf_file], stderr=subprocess.STDOUT ).decode("utf-8") # Extract page size # Page size: 195.106 x 156.239 pts m = re.search( "Page size: *([0-9]+\\.[0-9]+) x ([0-9]+\\.[0-9]+) pts", pdfinfo_out ) # get dims in inches dims = [float(m.group(1)) / 72, float(m.group(2)) / 72] assert dims is not None max_num_pixels = 89e6 max_dpi = math.sqrt(max_num_pixels / dims[0] / dims[1]) dpi = min(2400, max_dpi) # Convert PDF to PNG. # Use a high resolution here to cover small changes. ptp_out = subprocess.check_output( ["pdftoppm", "-r", str(dpi), "-png", pdf_file, tmp_base], stderr=subprocess.STDOUT, ) png_file = tmp_base + "-1.png" self.phash = imagehash.phash(Image.open(png_file)).__str__() self.png_file = png_file self.pdf_file = pdf_file self.tex_out = tex_out self.ptp_out = ptp_out self.mpl_reference = mpl_reference self.tikz_file = tikz_file return
def _main(): # get command line arguments test_list = _parse_options() tex_file_path = './tex/acid.tex' # directory where all the generated files will end up data_dir = './data' # how to get from the LaTeX file to the data tex_relative_path_to_data = '../data' figure_width = '7.5cm' # open file for writing file_handle = open(tex_file_path, 'w') write_document_header(file_handle, figure_width) # Get all function names from the testfunctions module. function_strings = dir(testfunctions) # Remove all functions that start with '_' and that are not # in a certain exclude list (uuuugly). exclude_list = {'mpl', 'np', 'pp'} tmp = [] for s in function_strings: if s[0] != '_' and s not in exclude_list: tmp.append(s) function_strings = tmp if not test_list is None: # actually treat a sublist of test_functions # remove duplicates and sort test_list = sorted(set(test_list)) else: # all indices test_list = xrange(0, len(function_strings)) for k in test_list: print 'Test function %d (%s)...' % (k, function_strings[k]), pp.cla() pp.clf() # plot the test example comment = getattr(testfunctions, function_strings[k])() # plot reference figure pdf_path = data_dir + '/test' + repr(k) + '.pdf' pp.savefig(pdf_path) pdf_path = path.join(tex_relative_path_to_data, path.basename(pdf_path) ) # Open figure, insert PDF file_handle.write('% test plot ' + str(k) + '\n' '\\begin{figure}%\n' '\\centering%\n' '\\begin{tabular}{cc}\n' '\includegraphics[width=\\figwidth]' '{' + str(pdf_path) + '}%\n' '&\n' ) # convert to TikZ tikz_path = data_dir + '/test%r.tex' % k tikz_tex_path = path.join(tex_relative_path_to_data, path.basename(tikz_path) ) try: matplotlib2tikz.save( tikz_path, figurewidth=figure_width, tex_relative_path_to_data=tex_relative_path_to_data, show_info=False ) file_handle.write('\\input{%s}\n' % tikz_tex_path) except: file_handle.write('% fail\n') # Close the figure file_handle.write('\\end{tabular}\n' '\\caption{' + str(comment) + ' (test ID ' + str(k) + ').}%\n' '\\end{figure}\\clearpage\n\n' ) print 'done.' write_document_closure(file_handle) file_handle.close() return
def main(figname, name, subname, collect = [], styles = [], tikz = False, pdf = True): figures_module = import_module("analyses.figures") plot_fn = getattr(figures_module, "plot_" + figname) collect.sort() styles.sort() collect_fns = [getattr(figures_module, 'collect_' + collect_name) for collect_name in collect] style_fns = [getattr(figures_module, 'style_' + style_name) for style_name in styles] names = [name] if len(subname) > 0: names.append(subname) pattern = string.join(names, '-') + '[0-9]*\.pkl' multiple_stats = [] designs = None max_iter = -1 for experiment in [Experiment.load(splitext(basename(f))[0]) for f in os.listdir(Experiment.SAVE_DIR) if re.match(pattern, f)]: print "Loaded %s, ended at iteration = %d" % (experiment.name, experiment.itr) if experiment.itr >= max_iter: max_iter = experiment.itr multiple_stats.append((experiment.stats, experiment.As, experiment.Xs)) designs = experiment.designs else: print "Skipping data, since it's less than %d" % max_iter if designs is None: raise Exception("No designs matched " + pattern) # Convert the data structure to be indexed by design data = [{'design': design, 'stats': [stats[i] for stats, _, _ in multiple_stats], 'As': [As[i] for _, As, _ in multiple_stats], 'Xs': [Xs[i] for _, _, Xs in multiple_stats] } for i, design in enumerate(designs)] # Only collect data for certain designs data = [d for d in data if reduce(operator.and_, [f(d['design']) for f in collect_fns], True)] # Clean design names design_names = [d['design'].name() for d in data] common_part = reduce(lambda l, s: _mstr(l,s), design_names, design_names[-1]) for d in data: d['name'] = string.join([s for s in d['design'].name().split(common_part) if len(s)>0],'-') plot_fn(data, style_fns) plt.draw() file_name = string.join([FIGURES_DIR + name, figname] + collect + styles, '-') if pdf: pdf_filename = file_name + '.pdf' pp = PdfPages(pdf_filename) pp.savefig(plt.gcf()) pp.close() print "Saved to %s" % pdf_filename if tikz: tikz_filename = file_name + '.tikz' save(tikz_filename, figureheight = '\\figureheight', figurewidth = '\\figurewidth', extra = Set([ 'y tick label style={/pgf/number format/.cd, precision=3, fixed, 1000 sep={}}','scaled y ticks=false', 'x tick label style={/pgf/number format/.cd, precision=3, fixed, 1000 sep={}}','scaled x ticks=false', ])) print "Saved to %s" % tikz_filename sys.stdout.write("\n\nDone, close the figures to exit\n"); sys.stdout.flush() plt.waitforbuttonpress()
def main(): random = 123 tracker_global_train = torch.load('tracker_notmnist_random_' + str(random) + '_train.pt') # [K, J, M] print(tracker_global_train['weight'].data.shape) # [K, M] print(tracker_global_train['bias'].data.shape) # [N, K, M] print(tracker_global_train['alpha_p'].data.shape) # [N, K, M] print(tracker_global_train['alpha_p_sparsemax'].data.shape) # [N, J, M] print(tracker_global_train['features'].data.shape) # [N, J, M] print(tracker_global_train['z'].data.shape) K = 10 m_Z = [] M = tracker_global_train['weight'].shape[2] probs_0 = tracker_global_train['alpha_p'].data.cpu().numpy()[ range(0, 1), :, -1] probs_1 = tracker_global_train['alpha_p'].data.cpu().numpy()[ range(1, 2), :, -1] probs_0_sparsemax = tracker_global_train['alpha_p_sparsemax'].data.cpu( ).numpy()[range(0, 1), :, -1] probs_1_sparsemax = tracker_global_train['alpha_p_sparsemax'].data.cpu( ).numpy()[range(1, 2), :, -1] features_0 = tracker_global_train['features'].data.cpu().numpy()[ range(0, 1), :, -1] features_1 = tracker_global_train['features'].data.cpu().numpy()[ range(1, 2), :, -1] for num in range(2): indices = range(num, num + 1) iter = -1 # [J, K] weights = tracker_global_train['weight'].data.cpu().numpy()[:, :, iter].T # [K, 1] bias = np.expand_dims( tracker_global_train['bias'].data.cpu().numpy()[:, iter], axis=-1) # [N, J] features = tracker_global_train['features'].data.cpu().numpy()[ indices, :, iter] # [N, J] print(tracker_global_train['x'].data.cpu().numpy().shape) # [N, J] x = np.reshape( tracker_global_train['x'].data.cpu().numpy()[indices, :, :, :, iter], (28, 28)) # [N, 1] c = tracker_global_train['c'].data.cpu().numpy()[indices, :, iter] # [N, K] alpha_p = tracker_global_train['alpha_p'].data.cpu().numpy()[ indices, :, iter] # [N, K] alpha_p_sparsemax = tracker_global_train['alpha_p_sparsemax'].data.cpu( ).numpy()[indices, :, iter] # [N, K] z_one_hot = tracker_global_train['z'].data.cpu().numpy()[indices, :, iter] z = np.argmax(z_one_hot, axis=1) mean = features.flatten() print("checking DST shapes", weights.shape, bias.shape, features.shape, mean.shape) dst_obj = DST() dst_obj.weights_from_linear_layer(weights, bias, features, mean) dst_obj.get_output_mass(num_classes=K) m_Z.append(dst_obj.output_mass[tuple(range(K))]) print('sum of singletons', sum(dst_obj.output_mass_singletons.flatten())) norm_singletons = deepcopy(alpha_p) norm_singletons[dst_obj.output_mass_singletons == 0.] = 0. norm_singletons = norm_singletons / np.sum(norm_singletons) if num == 1: plt.figure() width = 0.5 p1 = plt.bar(np.arange(K), alpha_p.flatten(), width, color='blue', alpha=0.5) p2 = plt.bar(np.arange(K), norm_singletons.flatten(), width, color='green', alpha=0.5) p3 = plt.bar(np.arange(K), alpha_p_sparsemax.flatten(), width, color='orange', alpha=0.5) plt.xlabel('Z') plt.ylabel('Values') plt.ylim(0, 1.0) plt.title('Values for 1') plt.legend([ 'Softmax Probabilities', 'Normalized Filtered Probabilities', 'Sparsemax' ]) plt.savefig('no_middle_bar' + '_random_' + str(random) + '.png', dpi=600) matplotlib2tikz.save('no_middle_bar' + '_random_' + str(random) + '_random_' + str(random) + '.tex') plt.close() if num == 0: plt.figure() width = 0.5 p1 = plt.bar(np.arange(K), alpha_p.flatten(), width, color='blue', alpha=0.5) p2 = plt.bar(np.arange(K), norm_singletons.flatten(), width, color='green', alpha=0.5) p3 = plt.bar(np.arange(K), alpha_p_sparsemax.flatten(), width, color='orange', alpha=0.5) plt.xlabel('Z') plt.ylabel('Values') plt.ylim(0, 1.0) plt.title('Values for 0') plt.legend([ 'Softmax Probabilities', 'Normalized Filtered Probabilities', 'Sparsemax' ]) plt.savefig('middle_bar' + '_random_' + str(random) + '.png', dpi=600) matplotlib2tikz.save('middle_bar' + '_random_' + str(random) + '.tex') plt.close() print("labels", c) print("singletons", dst_obj.output_mass_singletons) print("filtered probabilities", norm_singletons) print("probabilities", alpha_p) print("sparsemax", alpha_p_sparsemax)
def _main(): # get command line arguments test_list = _parse_options() tex_file_path = "./tex/acid.tex" # directory where all the generated files will end up data_dir = "./data" # how to get from the LaTeX file to the data tex_relative_path_to_data = "../data" figure_width = "7.5cm" # open file for writing file_handle = open( tex_file_path, "w" ) write_document_header( file_handle, figure_width ) test_functions = [ tf.basic_sin, tf.subplots, tf.image_plot, tf.noise, tf.circle_patch, tf.patches, tf.legends, tf.legends2, tf.logplot, tf.loglogplot, tf.subplot4x4, tf.text_overlay, tf.annotate, tf.histogram, tf.contourf_with_logscale ] if not test_list is None: # actually treat a sublist of test_functions # remove duplicates and sort test_list = sorted( set(test_list) ) else: # all indices test_list = xrange( 0, len(test_functions) ) for k in test_list: print 'Test function %d...' % k, pp.cla() pp.clf() # plot the test example comment = test_functions[k]() # convert to TikZ tikz_path = data_dir + "/test" + repr(k) + ".tex" matplotlib2tikz.save( tikz_path, figurewidth = figure_width, tex_relative_path_to_data = \ tex_relative_path_to_data ) # plot reference figure pdf_path = data_dir + "/test" + repr(k) + ".pdf" pp.savefig(pdf_path) # update the LaTeX file write_file_comparison_entry( file_handle, path.join( tex_relative_path_to_data, path.basename(pdf_path) ), path.join( tex_relative_path_to_data, path.basename(tikz_path) ), k, comment ) print 'done.' write_document_closure( file_handle ) file_handle.close() return