def load_results(self, chain_folder, x_dim, run='latest'): """Load results from a previous run of ultranest. Args: chain_folder: Path to directory that holds run{i}/ folder(s). These in turn hold chain/ and results/ folders, etc. x_dim: Number of dimensions of posterior. run: Optional; if 'latest' uses the run{i} folder with highest value of i. Else str or int corresponding to the run{i} folder you want to access. Returns: results; which is a list with two dictionaries of results. Note that results[1] corresponds to standard ultranest results. See ultranest documentation for details. """ number = 0 res_path = None for subdir in glob.glob(chain_folder + '/*'): rundir = re.match('.*run(\d+)', subdir, re.IGNORECASE) if rundir: number_new = int(rundir.group(1)) if run == 'latest': if number_new > number: number = number_new res_path = subdir else: if str(number_new) == str(run): res_path = subdir results = un.read_file(res_path, x_dim=x_dim) return results
def test_reactive_run_resume_eggbox(storage_backend): from ultranest import ReactiveNestedSampler from ultranest import read_file def loglike(z): chi = (np.cos(z / 2.)).prod(axis=1) loglike.ncalls += len(z) return (2. + chi)**5 loglike.ncalls = 0 def transform(x): return x * 10 * np.pi paramnames = ['a', 'b'] ndim = len(paramnames) #last_results = None folder = tempfile.mkdtemp() np.random.seed(1) try: for i in range(2): print() print("====== Running Eggbox problem [%d] =====" % (i+1)) print() sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform, log_dir=folder, resume=True, vectorized=True, draw_multiple=False, storage_backend=storage_backend) initial_ncalls = int(sampler.ncall) num_live_points = 100 loglike.ncalls = 0 r = sampler.run(max_iters=200 + i*200, max_num_improvement_loops=0, min_num_live_points=num_live_points, cluster_num_live_points=0) sampler.print_results() if storage_backend == 'hdf5': print("pointstore:", sampler.pointstore.fileobj['points'].shape) sampler.pointstore.close() print(loglike.ncalls, r['ncall'], initial_ncalls) ncalls = loglike.ncalls if sampler.mpi_size > 1: ncalls = sampler.comm.gather(ncalls, root=0) if sampler.mpi_rank == 0: print("ncalls on the different MPI ranks:", ncalls) ncalls = sum(sampler.comm.bcast(ncalls, root=0)) ncalls = ncalls + initial_ncalls assert abs(r['ncall'] - ncalls) <= 2 * sampler.mpi_size, (i, r['ncall'], ncalls, r['ncall'] - ncalls) assert paramnames == r['paramnames'], 'paramnames should be in results' # the results are not exactly the same, because the sampling adds #ncalls = loglike.ncalls #sampler = ReactiveNestedSampler(paramnames, # loglike, transform=transform, # log_dir=folder, resume=True, vectorized=True, num_test_samples=0) #print("pointstore:", sampler.pointstore.fileobj['points'].shape) #assert ncalls == loglike.ncalls, (ncalls, loglike.ncalls) if storage_backend == 'hdf5': sequence, results = read_file(folder, ndim, random=False, num_bootstraps=0) print("sampler results: ********************") print({k:v for k, v in r.items() if np.asarray(v).size < 20 and k != 'weighted_samples'}) print("reader results: ********************") print({k:v for k, v in results.items() if np.asarray(v).size < 20 and k != 'weighted_samples'}) for k, v in results.items(): if k == 'posterior' or k == 'samples': pass elif k == 'weighted_samples' or k == 'maximum_likelihood': for k2, v2 in results[k].items(): if k2 == 'bootstrapped_weights': continue print(" ", k, "::", k2, np.shape(v2)) assert_allclose(r[k][k2], v2) elif k.startswith('logzerr') or '_bs' in k or 'Herr' in k: print(" skipping", k, np.shape(v)) #assert_allclose(r[k], v, atol=0.5) elif k == 'insertion_order_MWW_test': print('insertion_order_MWW_test:', r[k], v) assert r[k] == v, (r[k], v) else: print(" ", k, np.shape(v)) assert_allclose(r[k], v) logw = r['weighted_samples']['logw'] v = r['weighted_samples']['points'] L = r['weighted_samples']['logl'] assert sequence['logz'][-1] - r['logz'] < 0.5, (results['logz'][-1], r['logz']) assert sequence['logzerr'][-1] <= r['logzerr_single'], (results['logzerr'][-1], r['logzerr']) #assert_allclose(sequence['logz_final'], r['logz_single'], atol=0.3) #assert_allclose(sequence['logzerr_final'], r['logzerr_single'], atol=0.1) assert r['niter'] <= sequence['niter'] <= r['niter'], (sequence['niter'], r['niter']) assert results['niter'] == len(sequence['logz']) == len(sequence['logzerr']) == len(sequence['logvol']) == len(sequence['logwt']) assert results['niter'] == len(results['samples']) data = np.loadtxt(folder + '/chains/weighted_post.txt', skiprows=1) assert_allclose(data[:,0], results['weighted_samples']['weights']) assert_allclose(data[:,1], results['weighted_samples']['logl']) assert_allclose(v, results['weighted_samples']['points']) assert_allclose(logw, results['weighted_samples']['logw']) assert_allclose(L, results['weighted_samples']['logl']) assert_allclose(L, sequence['logl']) #assert_allclose(logw + L, sequence['logwt']) assert sequence['logvol'].shape == logw.shape == (len(L),), (sequence['logvol'].shape, logw.shape) assert sequence['logwt'].shape == logw.shape == (len(L),), (sequence['logwt'].shape, logw.shape) #assert_allclose(logw, sequence['logvols']) #assert results['samples_untransformed'].shape == v.shape == (len(L), ndim), (results['samples_untransformed'].shape, v.shape) finally: shutil.rmtree(folder, ignore_errors=True)