def get_OracleKernelAdaptiveLangevin_instance(D, target_log_pdf): step_size = 1. m = 500 N = 5000 Z = sample_banana(N, D, bananicity, V) surrogate = KernelExpFiniteGaussian(sigma=10, lmbda=.001, m=m, D=D) surrogate.fit(Z) if False: param_bounds = {'sigma': [-2, 3]} bo = BayesOptSearch(surrogate, Z, param_bounds) best_params = bo.optimize() surrogate.set_parameters_from_dict(best_params) if False: sigma = 1. / gamma_median_heuristic(Z) surrogate.set_parameters_from_dict({'sigma': sigma}) logger.info("kernel exp family uses %s" % surrogate.get_parameters()) if False: import matplotlib.pyplot as plt Xs = np.linspace(-30, 30, 50) Ys = np.linspace(-20, 40, 50) visualise_fit_2d(surrogate, Z, Xs, Ys) plt.show() instance = OracleKernelAdaptiveLangevin(D, target_log_pdf, surrogate, step_size) return instance
D=D, bananicity=bananicity, V=V, num_benchmark_samples=num_benchmark_samples, population_size=population_size, num_iter_per_particle=num_iter_per_particle, mmd=mmd, rmse_mean=rmse_mean, rmse_cov=rmse_cov, time_taken=time_taken, ) if False: import matplotlib.pyplot as plt visualize_scatter_2d(samples) plt.title("%s" % sampler.get_name()) if isinstance(sampler, OracleKernelAdaptiveLangevin): Xs = np.linspace(-30, 30, 50) Ys = np.linspace(-20, 40, 50) visualise_fit_2d(sampler.surrogate, samples, Xs, Ys) if isinstance(sampler, StaticLangevin): plt.figure() plt.grid(True) plt.title("Drift norms %s" % sampler.get_name()) plt.hist(sampler.forward_drift_norms) plt.show()
surrogate = KernelExpFiniteGaussian(sigma=sigma, lmbda=lmbda, m=m, D=benchmark_samples.shape[1]) surrogate.fit(benchmark_samples) fake = empty_class() def replace_2(x_2d, a, i, j): a = a.copy() a[i] = x_2d[0] a[j] = x_2d[1] return a for i in range(benchmark_samples.shape[1]): for j in range(benchmark_samples.shape[1]): if i == j: continue fake.log_pdf = lambda x_2d: surrogate.log_pdf(replace_2(x_2d, true_mean, i, j)) fake.grad = lambda x_2d: surrogate.grad(replace_2(x_2d, true_mean, i, j)) visualise_fit_2d(fake, benchmark_samples[:, [i, j]], Xs=np.linspace(benchmark_samples[:, i].min(), benchmark_samples[:, i].max(), 30), Ys=np.linspace(benchmark_samples[:, j].min(), benchmark_samples[:, j].max(), 30), ) plt.show() plt.plot(log2_sigmas, Js_mean, 'b-') plt.plot(log2_sigmas, Js_mean - 2 * np.sqrt(Js_var[i]), 'b--') plt.plot(log2_sigmas, Js_mean + 2 * np.sqrt(Js_var[i]), 'b--') plt.show()
def replace_2(x_2d, a, i, j): a = a.copy() a[i] = x_2d[0] a[j] = x_2d[1] return a for i in range(benchmark_samples.shape[1]): for j in range(benchmark_samples.shape[1]): if i == j: continue fake.log_pdf = lambda x_2d: surrogate.log_pdf( replace_2(x_2d, true_mean, i, j)) fake.grad = lambda x_2d: surrogate.grad( replace_2(x_2d, true_mean, i, j)) visualise_fit_2d( fake, benchmark_samples[:, [i, j]], Xs=np.linspace(benchmark_samples[:, i].min(), benchmark_samples[:, i].max(), 30), Ys=np.linspace(benchmark_samples[:, j].min(), benchmark_samples[:, j].max(), 30), ) plt.show() plt.plot(log2_sigmas, Js_mean, 'b-') plt.plot(log2_sigmas, Js_mean - 2 * np.sqrt(Js_var[i]), 'b--') plt.plot(log2_sigmas, Js_mean + 2 * np.sqrt(Js_var[i]), 'b--') plt.show()
sampler_name=sampler.get_name(), D=D, bananicity=bananicity, V=V, num_benchmark_samples=num_benchmark_samples, population_size=population_size, num_iter_per_particle=num_iter_per_particle, mmd=mmd, rmse_mean=rmse_mean, rmse_cov=rmse_cov, time_taken=time_taken, ) if False: import matplotlib.pyplot as plt visualize_scatter_2d(samples) plt.title("%s" % sampler.get_name()) if isinstance(sampler, OracleKernelAdaptiveLangevin): Xs = np.linspace(-30, 30, 50) Ys = np.linspace(-20, 40, 50) visualise_fit_2d(sampler.surrogate, samples, Xs, Ys) if isinstance(sampler, StaticLangevin): plt.figure() plt.grid(True) plt.title("Drift norms %s" % sampler.get_name()) plt.hist(sampler.forward_drift_norms) plt.show()