for i in range(num_chains): with open(sampler_output_run_paths[i].joinpath('runtime.txt'), 'r') as file: runtimes.append(float(file.readline().rstrip())) runtimes = np.array(runtimes) # %% Drop burn-in samples chain_arrays.vals['sample'] = chain_arrays.vals['sample'][:, diagnostic_iter_thres:, :] # %% Compute multivariate rhat rhat_val, _, _, _, _ = chain_arrays.multi_rhat(mc_cov_mat=mc_cov_mats) # %% Save rhat_val with open(sampler_output_path.joinpath('multi_rhat.txt'), 'w') as file: file.write('{}\n'.format(rhat_val)) # %% Compute multivariate ESS ess_vals = np.array(chain_arrays.multi_ess(mc_cov_mat=mc_cov_mats)) # %% Save multivariate ESSs for i in range(num_chains): with open(sampler_output_run_paths[i].joinpath('multi_ess.txt'), 'w') as file: file.write('{}\n'.format(ess_vals[i])) # %% Save mean of multivariate ESSs with open(sampler_output_path.joinpath('mean_multi_ess.txt'), 'w') as file:
# %% Load packages import numpy as np import torch from sklearn.metrics import accuracy_score from bnn_mcmc_examples.examples.mlp.penguins.constants import num_chains from bnn_mcmc_examples.examples.mlp.penguins.dataloaders import test_dataloader from bnn_mcmc_examples.examples.mlp.penguins.metropolis_hastings.constants import ( sampler_output_path, sampler_output_run_paths ) # %% Load test data and labels _, test_labels = next(iter(test_dataloader)) # %% Compute predictive accuracies accuracies = np.empty(num_chains) for i in range(num_chains): test_preds = np.loadtxt(sampler_output_run_paths[i].joinpath('preds_via_mean.txt'), skiprows=0) accuracies[i] = accuracy_score(test_preds, torch.argmax(test_labels, 1)) # %% Save predictive accuracies np.savetxt(sampler_output_path.joinpath('accuracies_via_mean.txt'), accuracies)
import numpy as np import kanga.plots as ps from bnn_mcmc_examples.examples.mlp.penguins.constants import num_chains from bnn_mcmc_examples.examples.mlp.penguins.metropolis_hastings.constants import ( sampler_output_path, sampler_output_run_paths) # %% Load correlation matrices mc_cor_mats = [] for i in range(num_chains): mc_cor_mats.append( np.loadtxt(sampler_output_run_paths[i].joinpath('mc_cor.csv'), delimiter=',', skiprows=0)) mc_cor_mats = np.stack(mc_cor_mats) mean_mc_cor_mat = np.loadtxt(sampler_output_path.joinpath('mean_mc_cor.csv'), delimiter=',', skiprows=0) # %% Plot heat maps of correlation matrices for i in range(num_chains): ps.cor_heatmap(mc_cor_mats[i], fname=sampler_output_run_paths[i].joinpath('mc_cor.png')) ps.cor_heatmap(mean_mc_cor_mat, fname=sampler_output_path.joinpath('mean_mc_cor.png'))
'sample'][:, diagnostic_iter_thres:, :] # %% Compute Monte Carlo covariance matrices mc_cov_mats = chain_arrays.mc_cov() # %% Save Monte Carlo covariance matrices for i in range(num_chains): np.savetxt(sampler_output_run_paths[i].joinpath('mc_cov.csv'), mc_cov_mats[i], delimiter=',') # %% Save mean of Monte Carlo covariance matrices np.savetxt(sampler_output_path.joinpath('mean_mc_cov.csv'), mc_cov_mats.mean(0), delimiter=',') # %% Compute Monte Carlo correlation matrices mc_cor_mats = chain_arrays.mc_cor(mc_cov_mat=mc_cov_mats) # %% Save Monte Carlo correlation matrices for i in range(num_chains): np.savetxt(sampler_output_run_paths[i].joinpath('mc_cor.csv'), mc_cor_mats[i], delimiter=',') # %% Save mean of Monte Carlo correlation matrices