+ str(len(str(len(test_dataloader)))) \ + '} out of ' \ + str(len(test_dataloader)) \ + '...' for k in range(num_chains): test_pred_probs = np.empty([len(test_dataloader), num_classes]) nums_dropped_samples = np.empty([len(test_dataloader), num_classes], dtype=np.int64) for i, (x, _) in enumerate(test_dataloader): print(verbose_msg.format(k + 1, i + 1)) for j in range(num_classes): y = torch.zeros([1, num_classes], dtype=dtype) y[0, j] = 1. integral, num_dropped_samples = model.predictive_posterior( chain_lists.vals['sample'][k], x, y) test_pred_probs[i, j] = integral.item() nums_dropped_samples[i, j] = num_dropped_samples np.savetxt( sampler_output_run_paths[k].joinpath('pred_posterior_on_test.csv'), test_pred_probs, delimiter=',') np.savetxt(sampler_output_run_paths[k].joinpath( 'pred_posterior_on_test_num_dropped_samples.csv'), nums_dropped_samples, fmt='%d', delimiter=',')
keys=['sample'], dtype=dtype) # %% Drop burn-in samples for i in range(num_chains): chain_lists.vals['sample'][i] = chain_lists.vals['sample'][i][ pred_iter_thres:] # %% Compute chain means means = chain_lists.mean() # %% Make and save predictions for k in range(num_chains): test_pred_probs = np.empty([len(test_dataloader), num_classes]) for i, (x, _) in enumerate(test_dataloader): for j in range(num_classes): y = torch.zeros([1, num_classes], dtype=dtype) y[0, j] = 1. integral, _ = model.predictive_posterior([means[k, :]], x, y) test_pred_probs[i, j] = integral.item() test_preds = np.argmax(test_pred_probs, axis=1) np.savetxt(sampler_output_run_paths[k].joinpath('preds_via_mean.txt'), test_preds, fmt='%d')