예제 #1
0
 def __init__(self, distribution, num_eigen=2, \
              mean_est=array([-2.0, -2.0]), cov_est=0.05 * eye(2), \
              sample_discard=500, sample_lag=10, accstar=0.234):
     AdaptiveMetropolis.__init__(self, distribution=distribution, \
                                  mean_est=mean_est, cov_est=cov_est, \
                                  sample_discard=sample_discard, sample_lag=sample_lag, accstar=accstar)
     assert (num_eigen <= distribution.dimension)
     self.num_eigen = num_eigen
     self.dwscale = self.globalscale * ones([self.num_eigen])
     u, s, _ = svd(self.cov_est)
     self.eigvalues = s[0:self.num_eigen]
     self.eigvectors = u[:, 0:self.num_eigen]
예제 #2
0
        burnin = 40000
        num_iterations = 80000

        mcmc_samplers.append(
            KameleonWindowLearnScale(distribution, kernel, stop_adapt=burnin))

        mean_est = zeros(distribution.dimension, dtype="float64")
        cov_est = 1.0 * eye(distribution.dimension)
        cov_est[0, 0] = distribution.V
        mcmc_samplers.append(
            AdaptiveMetropolisLearnScale(distribution,
                                         mean_est=mean_est,
                                         cov_est=cov_est))
        mcmc_samplers.append(
            AdaptiveMetropolis(distribution,
                               mean_est=mean_est,
                               cov_est=cov_est))
        #
        #        num_eigen = distribution.dimension
        #        mcmc_samplers.append(AdaptiveMetropolisPCA(distribution, num_eigen=num_eigen, mean_est=mean_est, cov_est=cov_est))
        #
        mcmc_samplers.append(StandardMetropolis(distribution))

        start = zeros(distribution.dimension, dtype="float64")
        mcmc_params = MCMCParams(start=start,
                                 num_iterations=num_iterations,
                                 burnin=burnin)

        mcmc_chains = [
            MCMCChain(mcmc_sampler, mcmc_params)
            for mcmc_sampler in mcmc_samplers
예제 #3
0
 def __init__(self, distribution, \
              mean_est=None, cov_est=None, \
              sample_discard=500, sample_lag=20, accstar=0.234):
     AdaptiveMetropolis.__init__(self, distribution, mean_est, cov_est, \
                                 sample_discard, sample_lag, accstar)
 def __init__(self, distribution, \
              mean_est=None, cov_est=None, \
              sample_discard=500, sample_lag=20, accstar=0.234):
     AdaptiveMetropolis.__init__(self, distribution, mean_est, cov_est, \
                                 sample_discard, sample_lag, accstar)
예제 #5
0
    distribution = Banana(dimension=8, bananicity=0.1, V=100)
    sigma = GaussianKernel.get_sigma_median_heuristic(
        distribution.sample(1000).samples)
    sigma = 10
    print "using sigma", sigma
    kernel = GaussianKernel(sigma=sigma)

    burnin = 40000
    num_iterations = 80000

    #mcmc_sampler = KameleonWindowLearnScale(distribution, kernel, stop_adapt=burnin)
    mean_est = zeros(distribution.dimension, dtype="float64")
    cov_est = 1.0 * eye(distribution.dimension)
    cov_est[0, 0] = distribution.V
    #mcmc_sampler = AdaptiveMetropolisLearnScale(distribution, mean_est=mean_est, cov_est=cov_est)
    mcmc_sampler = AdaptiveMetropolis(distribution,
                                      mean_est=mean_est,
                                      cov_est=cov_est)
    #mcmc_sampler = StandardMetropolis(distribution)

    start = zeros(distribution.dimension, dtype="float64")
    mcmc_params = MCMCParams(start=start,
                             num_iterations=num_iterations,
                             burnin=burnin)

    mcmc_chain = MCMCChain(mcmc_sampler, mcmc_params)
    mcmc_chain.append_mcmc_output(StatisticsOutput())

    experiment = SingleChainExperiment(mcmc_chain, experiment_dir)
    experiment.run()