def main(): dist=Ring(dimension=50) X=dist.sample(10000).samples #print X[:,2:dist.dimension] print dist.emp_quantiles(X) dist2=Banana(dimension=50) X2=dist2.sample(10000).samples print dist2.emp_quantiles(X2)
def main(): distribution = Banana(dimension=8) sigma=5 print "using sigma", sigma kernel = GaussianKernel(sigma=sigma) mcmc_sampler = Kameleon(distribution, kernel, distribution.sample(100).samples) start = zeros(distribution.dimension) mcmc_params = MCMCParams(start=start, num_iterations=20000) chain = MCMCChain(mcmc_sampler, mcmc_params) chain.append_mcmc_output(StatisticsOutput(plot_times=True)) chain.run()
def main(): distribution = Banana(dimension=8) sigma = 5 print "using sigma", sigma kernel = GaussianKernel(sigma=sigma) mcmc_sampler = Kameleon(distribution, kernel, distribution.sample(100).samples) start = zeros(distribution.dimension) mcmc_params = MCMCParams(start=start, num_iterations=20000) chain = MCMCChain(mcmc_sampler, mcmc_params) chain.append_mcmc_output(StatisticsOutput(plot_times=True)) chain.run()
from kameleon_mcmc.mcmc.MCMCParams import MCMCParams from kameleon_mcmc.mcmc.output.StatisticsOutput import StatisticsOutput from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolis import AdaptiveMetropolis from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolisLearnScale import \ AdaptiveMetropolisLearnScale from kameleon_mcmc.mcmc.samplers.KameleonWindowLearnScale import \ KameleonWindowLearnScale from kameleon_mcmc.mcmc.samplers.StandardMetropolis import StandardMetropolis if __name__ == '__main__': experiment_dir = str(os.path.abspath(sys.argv[0])).split( os.sep)[-1].split(".")[0] + os.sep distribution = Banana(dimension=8, bananicity=0.03, V=100) sigma = GaussianKernel.get_sigma_median_heuristic( distribution.sample(1000).samples) sigma = 10 print "using sigma", sigma kernel = GaussianKernel(sigma=sigma) burnin = 20000 num_iterations = 40000 mcmc_sampler = KameleonWindowLearnScale(distribution, kernel, stop_adapt=burnin) mean_est = zeros(distribution.dimension, dtype="float64") cov_est = 1.0 * eye(distribution.dimension) cov_est[0, 0] = distribution.V #mcmc_sampler = AdaptiveMetropolisLearnScale(distribution, mean_est=mean_est, cov_est=cov_est) #mcmc_sampler = AdaptiveMetropolis(distribution, mean_est=mean_est, cov_est=cov_est)
from kameleon_mcmc.mcmc.MCMCChain import MCMCChain from kameleon_mcmc.mcmc.MCMCParams import MCMCParams from kameleon_mcmc.mcmc.output.StatisticsOutput import StatisticsOutput from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolis import AdaptiveMetropolis from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolisLearnScale import \ AdaptiveMetropolisLearnScale from kameleon_mcmc.mcmc.samplers.KameleonWindowLearnScale import \ KameleonWindowLearnScale from kameleon_mcmc.mcmc.samplers.StandardMetropolis import StandardMetropolis if __name__ == '__main__': experiment_dir = str(os.path.abspath(sys.argv[0])).split(os.sep)[-1].split(".")[0] + os.sep distribution = Banana(dimension=8, bananicity=0.1, V=100) sigma = GaussianKernel.get_sigma_median_heuristic(distribution.sample(1000).samples) sigma = 10 print "using sigma", sigma kernel = GaussianKernel(sigma=sigma) burnin = 40000 num_iterations = 80000 #mcmc_sampler = KameleonWindowLearnScale(distribution, kernel, stop_adapt=burnin) mean_est = zeros(distribution.dimension, dtype="float64") cov_est = 1.0 * eye(distribution.dimension) cov_est[0, 0] = distribution.V #mcmc_sampler = AdaptiveMetropolisLearnScale(distribution, mean_est=mean_est, cov_est=cov_est) #mcmc_sampler = AdaptiveMetropolis(distribution, mean_est=mean_est, cov_est=cov_est) mcmc_sampler = StandardMetropolis(distribution)
from kameleon_mcmc.mcmc.MCMCChain import MCMCChain from kameleon_mcmc.mcmc.MCMCParams import MCMCParams from kameleon_mcmc.mcmc.output.StatisticsOutput import StatisticsOutput from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolis import AdaptiveMetropolis from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolisLearnScale import \ AdaptiveMetropolisLearnScale from kameleon_mcmc.mcmc.samplers.KameleonWindowLearnScale import \ KameleonWindowLearnScale from kameleon_mcmc.mcmc.samplers.StandardMetropolis import StandardMetropolis if __name__ == '__main__': experiment_dir = str(os.path.abspath(sys.argv[0])).split(os.sep)[-1].split(".")[0] + os.sep distribution = Banana(dimension=8, bananicity=0.03, V=100) sigma = GaussianKernel.get_sigma_median_heuristic(distribution.sample(1000).samples) sigma = 10 print "using sigma", sigma kernel = GaussianKernel(sigma=sigma) burnin = 20000 num_iterations = 40000 #mcmc_sampler = KameleonWindowLearnScale(distribution, kernel, stop_adapt=burnin) mean_est = zeros(distribution.dimension, dtype="float64") cov_est = 1.0 * eye(distribution.dimension) cov_est[0, 0] = distribution.V #mcmc_sampler = AdaptiveMetropolisLearnScale(distribution, mean_est=mean_est, cov_est=cov_est) #mcmc_sampler = AdaptiveMetropolis(distribution, mean_est=mean_est, cov_est=cov_est) mcmc_sampler = StandardMetropolis(distribution)
def gradient(self, x, Y): assert (len(shape(x)) == 1) assert (len(shape(Y)) == 2) assert (len(x) == shape(Y)[1]) if self.nu == 1.5 or self.nu == 2.5: x_2d = reshape(x, (1, len(x))) lower_order_rho = self.rho * sqrt(2 * (self.nu - 1)) / sqrt( 2 * self.nu) lower_order_kernel = MaternKernel(lower_order_rho, self.nu - 1, self.sigma) k = lower_order_kernel.kernel(x_2d, Y) differences = Y - x G = (1.0 / lower_order_rho**2) * (k.T * differences) return G else: raise NotImplementedError() if __name__ == '__main__': distribution = Banana() Z = distribution.sample(50).samples Z2 = distribution.sample(50).samples kernel = MaternKernel(5.0, nu=1.5, sigma=2.0) K = kernel.kernel(Z, Z2) imshow(K, interpolation="nearest") #G = kernel.gradient(Z[0],Z2) #print G show()
else: raise NotImplementedError() return K def gradient(self, x, Y): assert(len(shape(x))==1) assert(len(shape(Y))==2) assert(len(x)==shape(Y)[1]) if self.nu==1.5 or self.nu==2.5: x_2d=reshape(x, (1, len(x))) lower_order_rho = self.rho * sqrt(2*(self.nu-1)) / sqrt(2*self.nu) lower_order_kernel = MaternKernel(lower_order_rho,self.nu-1,self.sigma) k = lower_order_kernel.kernel(x_2d, Y) differences = Y - x G = ( 1.0 / lower_order_rho ** 2 ) * (k.T * differences) return G else: raise NotImplementedError() if __name__ == '__main__': distribution = Banana() Z = distribution.sample(50).samples Z2 = distribution.sample(50).samples kernel = MaternKernel(5.0, nu=1.5, sigma=2.0) K = kernel.kernel(Z, Z2) imshow(K, interpolation="nearest") #G = kernel.gradient(Z[0],Z2) #print G show()