def sample(n, path): def lnlikefn(theta): lnlikefn.counter += 1 return target_lnpdf(theta) def lnpriorfn(theta): return 0 prior = normal_pdf(np.zeros(num_dimensions), 1e-1 * np.eye(num_dimensions)) lnlikefn.counter = 0 print('path: ' + str(path)) sampler = PTSampler(num_dimensions, lnlikefn, lnpriorfn, np.copy(prior.cov), outDir=path, progressPath=path, progressRate=1000) p0 = prior.rvs(1) start = time() sampler.sample(p0, n, burn=2, thin=1, covUpdate=500, SCAMweight=20, AMweight=20, DEweight=20) samples = np.loadtxt(path + '/chain_1.0.txt')[:, :-4] progress = np.load(path + '/progress.npz') timestamps = progress['timestamps'] timestamps = timestamps - start n_fevals = progress['n_fevals'] samples = samples.reshape(len(n_fevals), -1, num_dimensions) # n_fevals = np.hstack((n_fevals, lnlikefn.counter)) np.savez(path + '/processed_data', samples=samples, timestamps=timestamps, fevals=n_fevals) print("Done")
def build_target_likelihood_planar_n_link(num_dimensions, prior_variance, likelihood_variance): prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions)) prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions) likelihood = normal_pdf([0.7 * num_dimensions, 0], likelihood_variance * np.eye(2)) l = np.ones(num_dimensions) def target_lnpdf(theta, without_prior=False): theta = np.atleast_2d(theta) target_lnpdf.counter += len(theta) y = np.zeros((len(theta))) x = np.zeros((len(theta))) for i in range(0, num_dimensions): y += l[i] * np.sin(np.sum(theta[:,:i+1],1)) x += l[i] * np.cos(np.sum(theta[:,:i+1],1)) if without_prior: return np.squeeze(likelihood.logpdf(np.vstack((x,y)).transpose())) else: return np.squeeze(prior.logpdf(theta) + likelihood.logpdf(np.vstack((x,y)).transpose())) target_lnpdf.counter = 0 return [target_lnpdf, prior, prior_chol]
def sample(n_samps, seed=1, path=None): if path is not None: dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname) unknown_params = [1, 2] + np.arange(4, 12).tolist() num_dimensions = len(unknown_params) target_lnpdf = build_Goodwin(unknown_params, seed=seed, sigma=np.sqrt(0.2), parameters=np.array([ 10., 1.97, 0.46, 0.53, 0.02878028, 0.13585575, 1.57070286, 0.75737477, 0.28929913, 1.52671658, 1.26995194, 1.89562767 ])) prior = normal_pdf(np.zeros(num_dimensions), 1e1 * np.eye(num_dimensions)) prior_chol = np.linalg.cholesky(prior.cov) iters = [] nfevals = [] target_lnpdf.counter = 0 def target_lnpdf_no_prior(theta): return target_lnpdf(theta) - prior.logpdf(theta) start = time() timestamps = [] cur_theta = prior.rvs(1) cur_lnpdf = target_lnpdf_no_prior(cur_theta) all_samples = [] samples = [] for i in range(1, n_samps + 1): [cur_theta, cur_lnpdf] = ess_update(cur_theta, prior_chol, target_lnpdf_no_prior, pdf_params=(), cur_lnpdf=cur_lnpdf) samples.append(cur_theta) if i > 1 and i % 1000 == 0: all_samples.append(np.array(samples)) samples = [] iters.append(i) nfevals.append(target_lnpdf.counter) timestamps.append(time() - start) if path is not None: np.savez(path + "processed_data", iter=iters, samples=np.exp(np.array(all_samples)), fevals=np.array(nfevals), timestamps=np.array(timestamps)) print("done")
def construct_initial_mixture(self, num_dimensions, num_initial_components, prior_variance): initial_mixture = GMM(num_dimensions) prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions)) for i in range(0, num_initial_components): # this_mean = prior.rvs() if num_initial_components == 1: this_mean = np.zeros(num_dimensions) else: this_mean = prior.rvs() this_cov = prior.cov initial_mixture.add_component(this_mean, this_cov) return initial_mixture
def build_frisk_lnpdf(prior_variance=1): import experiments.lnpdfs.StopAndFrisk.frisk as frisk lnpdf, _, num_dimensions, _, _= frisk.make_model_funs(precinct_type=1) prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions)) prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions) def target_lnpdf(theta, without_prior=False): theta = np.atleast_2d(theta) target_lnpdf.counter += len(theta) if without_prior: return lnpdf(theta) - prior.logpdf(theta) else: return lnpdf(theta) target_lnpdf.counter = 0 return [target_lnpdf, prior, prior_chol, num_dimensions]
def build_GMM_lnpdf(num_dimensions, num_true_components, prior_variance=1e3): prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions)) prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions) target_mixture = GMM(num_dimensions) for i in range(0, num_true_components): this_cov = 0.1 * np.random.normal(0, num_dimensions, (num_dimensions * num_dimensions)).reshape( (num_dimensions, num_dimensions)) this_cov = this_cov.transpose().dot(this_cov) this_cov += 1 * np.eye(num_dimensions) this_mean = 100 * (np.random.random(num_dimensions) - 0.5) target_mixture.add_component(this_mean, this_cov) target_mixture.set_weights(np.ones(num_true_components) / num_true_components) def target_lnpdf(theta, without_prior=False): theta = np.atleast_2d(theta) target_lnpdf.counter += len(theta) if without_prior: return np.squeeze(target_mixture.evaluate(theta, return_log=True) - prior.logpdf(theta)) else: return np.squeeze(target_mixture.evaluate(theta, return_log=True)) target_lnpdf.counter = 0 return [target_lnpdf, prior, prior_chol, target_mixture]
def build_logist_regression(X, y, prior_variance): import numpy as anp num_dimensions = X.shape[1] prior = normal_pdf(anp.zeros(num_dimensions), prior_variance * anp.eye(num_dimensions)) prior_chol = anp.sqrt(prior_variance) * anp.eye(num_dimensions) def target_lnpdf(theta, without_prior=False): theta = anp.atleast_2d(theta) target_lnpdf.counter += len(theta) weighted_sum = theta.dot(X.transpose()) offset = anp.maximum(weighted_sum, np.zeros(weighted_sum.shape)) denominator = offset + anp.log(anp.exp(weighted_sum - offset) + anp.exp(-offset)) log_prediction = -denominator log_prediction[:,np.where(y == 0)] += weighted_sum[:,np.where(y == 0)] log_prediction[np.where(anp.isinf(log_prediction))] = 0 if (anp.any(anp.isnan(log_prediction)) or anp.any(anp.isinf(log_prediction))): print('nan') loglikelihood = anp.sum(log_prediction,1) if without_prior: return np.squeeze(loglikelihood) else: return np.squeeze(prior.logpdf(theta) + loglikelihood) target_lnpdf.counter = 0 return [target_lnpdf, prior, prior_chol]
def add_component(self, mean, cov): self.mixture.append(normal_pdf(mean, cov)) self.weights = np.ones(len(self.mixture)) / len(self.mixture) self.num_components += 1