def all_tests(gaussian1,gaussian2,n=200):
    oracle_samples1 = gaussian1.sample(n=n).samples
    oracle_samples2 = gaussian2.sample(n=n).samples
    
    distribution1 = GaussianFullConditionals(gaussian1, list(gaussian1.mu))
    distribution2 = GaussianFullConditionals(gaussian2, list(gaussian2.mu))
    
    mcmc_sampler1 = Gibbs(distribution1)
    mcmc_sampler2 = Gibbs(distribution2)
    
    start = zeros(2)
    mcmc_params = MCMCParams(start=start, num_iterations=2000+n, burnin=2000)
    
    chain1 = MCMCChain(mcmc_sampler1, mcmc_params)
    chain1.run()
    gibbs_samples1 = chain1.get_samples_after_burnin()
    
    
    chain2 = MCMCChain(mcmc_sampler2, mcmc_params)
    chain2.run()
    gibbs_samples2 = chain2.get_samples_after_burnin()
    
    sigma = GaussianKernel.get_sigma_median_heuristic(concatenate((oracle_samples1,oracle_samples2),axis=0))
    kernel = GaussianKernel(sigma=sigma)
    
    vanillap=empty((2,2))
    blockp=empty((2,2))
    wildp=empty((2,2))
    
    vanillap[0,0]=kernel.TwoSampleTest(oracle_samples1,oracle_samples2,method='vanilla')
    vanillap[0,1]=kernel.TwoSampleTest(oracle_samples1,gibbs_samples2,method='vanilla')
    vanillap[1,0]=kernel.TwoSampleTest(gibbs_samples1,oracle_samples2,method='vanilla')
    vanillap[1,1]=kernel.TwoSampleTest(gibbs_samples1,gibbs_samples2,method='vanilla')
    
    blockp[0,0]=kernel.TwoSampleTest(oracle_samples1,oracle_samples2,method='block')
    blockp[0,1]=kernel.TwoSampleTest(oracle_samples1,gibbs_samples2,method='block')
    blockp[1,0]=kernel.TwoSampleTest(gibbs_samples1,oracle_samples2,method='block')
    blockp[1,1]=kernel.TwoSampleTest(gibbs_samples1,gibbs_samples2,method='block')
    
    wildp[0,0]=kernel.TwoSampleTest(oracle_samples1,oracle_samples2,method='wild')
    wildp[0,1]=kernel.TwoSampleTest(oracle_samples1,gibbs_samples2,method='wild')
    wildp[1,0]=kernel.TwoSampleTest(gibbs_samples1,oracle_samples2,method='wild')
    wildp[1,1]=kernel.TwoSampleTest(gibbs_samples1,gibbs_samples2,method='wild')
    
    return vanillap,blockp,wildp
def all_tests(gaussian1, gaussian2, n=200):
    oracle_samples1 = gaussian1.sample(n=n).samples
    oracle_samples2 = gaussian2.sample(n=n).samples

    distribution1 = GaussianFullConditionals(gaussian1, list(gaussian1.mu))
    distribution2 = GaussianFullConditionals(gaussian2, list(gaussian2.mu))

    mcmc_sampler1 = Gibbs(distribution1)
    mcmc_sampler2 = Gibbs(distribution2)

    start = zeros(2)
    mcmc_params = MCMCParams(start=start, num_iterations=2000 + n, burnin=2000)

    chain1 = MCMCChain(mcmc_sampler1, mcmc_params)
    chain1.run()
    gibbs_samples1 = chain1.get_samples_after_burnin()

    chain2 = MCMCChain(mcmc_sampler2, mcmc_params)
    chain2.run()
    gibbs_samples2 = chain2.get_samples_after_burnin()

    sigma = GaussianKernel.get_sigma_median_heuristic(concatenate((oracle_samples1, oracle_samples2), axis=0))
    kernel = GaussianKernel(sigma=sigma)

    vanillap = empty((2, 2))
    blockp = empty((2, 2))
    wildp = empty((2, 2))

    vanillap[0, 0] = kernel.TwoSampleTest(oracle_samples1, oracle_samples2, method="vanilla")
    vanillap[0, 1] = kernel.TwoSampleTest(oracle_samples1, gibbs_samples2, method="vanilla")
    vanillap[1, 0] = kernel.TwoSampleTest(gibbs_samples1, oracle_samples2, method="vanilla")
    vanillap[1, 1] = kernel.TwoSampleTest(gibbs_samples1, gibbs_samples2, method="vanilla")

    blockp[0, 0] = kernel.TwoSampleTest(oracle_samples1, oracle_samples2, method="block")
    blockp[0, 1] = kernel.TwoSampleTest(oracle_samples1, gibbs_samples2, method="block")
    blockp[1, 0] = kernel.TwoSampleTest(gibbs_samples1, oracle_samples2, method="block")
    blockp[1, 1] = kernel.TwoSampleTest(gibbs_samples1, gibbs_samples2, method="block")

    wildp[0, 0] = kernel.TwoSampleTest(oracle_samples1, oracle_samples2, method="wild")
    wildp[0, 1] = kernel.TwoSampleTest(oracle_samples1, gibbs_samples2, method="wild")
    wildp[1, 0] = kernel.TwoSampleTest(gibbs_samples1, oracle_samples2, method="wild")
    wildp[1, 1] = kernel.TwoSampleTest(gibbs_samples1, gibbs_samples2, method="wild")

    return vanillap, blockp, wildp
Esempio n. 3
0
def kameleon_generator(num_warmup, thin_step):
    start = np.random.randn(9) * 0

    # this is tuned via median heuristic
    Z = np.load("../ground_truth/benchmark_samples.arr")[:1000]
    sigma = GaussianKernel.get_sigma_median_heuristic(Z)
    sigma = 23.  # kameleon-mcmc code
    logger.info("Using sigma=%.6f" % sigma)

    gamma2 = 0.2
    nu2 = .02

    target = GlassPosterior()
    job = KameleonJob(Z, sigma, nu2, gamma2, target, num_iterations, start,
                      statistics, num_warmup, thin_step)

    job.walltime = 60 * 60

    # store results in home dir straight away
    d = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-1]) + os.sep
    job.aggregator = MCMCJobResultAggregatorStoreHome(d)

    return job
Esempio n. 4
0
from kameleon_mcmc.mcmc.MCMCChain import MCMCChain
from kameleon_mcmc.mcmc.MCMCParams import MCMCParams
from kameleon_mcmc.mcmc.output.StatisticsOutput import StatisticsOutput
from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolis import AdaptiveMetropolis
from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolisLearnScale import \
    AdaptiveMetropolisLearnScale
from kameleon_mcmc.mcmc.samplers.KameleonWindowLearnScale import \
    KameleonWindowLearnScale
from kameleon_mcmc.mcmc.samplers.StandardMetropolis import StandardMetropolis

if __name__ == '__main__':
    experiment_dir = str(os.path.abspath(sys.argv[0])).split(
        os.sep)[-1].split(".")[0] + os.sep

    distribution = Banana(dimension=8, bananicity=0.03, V=100)
    sigma = GaussianKernel.get_sigma_median_heuristic(
        distribution.sample(1000).samples)
    sigma = 10
    print "using sigma", sigma
    kernel = GaussianKernel(sigma=sigma)

    burnin = 20000
    num_iterations = 40000

    mcmc_sampler = KameleonWindowLearnScale(distribution,
                                            kernel,
                                            stop_adapt=burnin)
    mean_est = zeros(distribution.dimension, dtype="float64")
    cov_est = 1.0 * eye(distribution.dimension)
    cov_est[0, 0] = distribution.V
    #mcmc_sampler = AdaptiveMetropolisLearnScale(distribution, mean_est=mean_est, cov_est=cov_est)
    #mcmc_sampler = AdaptiveMetropolis(distribution, mean_est=mean_est, cov_est=cov_est)
Esempio n. 5
0
def main():
    numTrials = 500
    n=200
    Sigma1 = eye(2)
    Sigma1[0, 0] = 30.0
    Sigma1[1, 1] = 1.0
    theta = - pi / 4
    U = MatrixTools.rotation_matrix(theta)
    Sigma1 = U.T.dot(Sigma1).dot(U)
    print Sigma1
    gaussian1 = Gaussian(Sigma=Sigma1)
    gaussian2 = Gaussian(mu=array([1., 0.]), Sigma=Sigma1)
    
    oracle_samples1 = gaussian1.sample(n=n).samples
    oracle_samples2 = gaussian2.sample(n=n).samples
    
    print 'mean1:', mean(oracle_samples1,0)
    print 'mean2:', mean(oracle_samples2,0)
    plot(oracle_samples1[:,0],oracle_samples1[:,1],'b*')
    plot(oracle_samples2[:,0],oracle_samples2[:,1],'r*')
    show()
    distribution1 = GaussianFullConditionals(gaussian1, list(gaussian1.mu))
    distribution2 = GaussianFullConditionals(gaussian2, list(gaussian2.mu))
    
    H0_samples = zeros(numTrials)
    HA_samples = zeros(numTrials)
    mcmc_sampler1 = Gibbs(distribution1)
    mcmc_sampler2 = Gibbs(distribution2)
    burnin = 9000
    thin = 5
    start = zeros(2)
    mcmc_params = MCMCParams(start=start, num_iterations=burnin+thin*n, burnin=burnin)
    sigma = GaussianKernel.get_sigma_median_heuristic(concatenate((oracle_samples1,oracle_samples2),axis=0))
    print 'using bandwidth: ', sigma
    kernel = GaussianKernel(sigma=sigma)
    
    for ii in arange(numTrials):
        start =time.time()
        print 'trial:', ii
        
        oracle_samples1 = gaussian1.sample(n=n).samples
        oracle_samples1a = gaussian1.sample(n=n).samples
        oracle_samples2 = gaussian2.sample(n=n).samples
        
        #         chain1 = MCMCChain(mcmc_sampler1, mcmc_params)
        #         chain1.run()
        #         gibbs_samples1 = chain1.get_samples_after_burnin()
        #         gibbs_samples1 = gibbs_samples1[thin*arange(n)]
        #         
        #         chain1a = MCMCChain(mcmc_sampler1, mcmc_params)
        #         chain1a.run()
        #         gibbs_samples1a = chain1a.get_samples_after_burnin()
        #         gibbs_samples1a = gibbs_samples1a[thin*arange(n)]
        #         
        #         chain2 = MCMCChain(mcmc_sampler2, mcmc_params)
        #         chain2.run()
        #         gibbs_samples2 = chain2.get_samples_after_burnin()
        #         gibbs_samples2 = gibbs_samples2[thin*arange(n)]
        
        
        #         H0_samples[ii]=kernel.estimateMMD(gibbs_samples1,gibbs_samples1a)
        #         HA_samples[ii]=kernel.estimateMMD(gibbs_samples1,gibbs_samples2)
        #         
        H0_samples[ii]=kernel.estimateMMD(oracle_samples1,oracle_samples1a)
        HA_samples[ii]=kernel.estimateMMD(oracle_samples1,oracle_samples2)
        end=time.time()
        print 'time elapsed: ', end-start
        
    f = open("/home/dino/git/mmdIIDTrueSamples.dat", "w")
    dump(H0_samples, f)
    dump(HA_samples, f)
    dump(gaussian1, f)
    dump(gaussian2, f)
    f.close()
    return None
from kameleon_mcmc.mcmc.MCMCChain import MCMCChain
from kameleon_mcmc.mcmc.MCMCParams import MCMCParams
from kameleon_mcmc.mcmc.output.StatisticsOutput import StatisticsOutput
from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolis import AdaptiveMetropolis
from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolisLearnScale import \
    AdaptiveMetropolisLearnScale
from kameleon_mcmc.mcmc.samplers.KameleonWindowLearnScale import \
    KameleonWindowLearnScale
from kameleon_mcmc.mcmc.samplers.StandardMetropolis import StandardMetropolis


if __name__ == '__main__':
    experiment_dir = str(os.path.abspath(sys.argv[0])).split(os.sep)[-1].split(".")[0] + os.sep
    
    distribution = Banana(dimension=8, bananicity=0.1, V=100)
    sigma = GaussianKernel.get_sigma_median_heuristic(distribution.sample(1000).samples)
    sigma = 10
    print "using sigma", sigma
    kernel = GaussianKernel(sigma=sigma)
    
    
    burnin = 40000
    num_iterations = 80000
    
    #mcmc_sampler = KameleonWindowLearnScale(distribution, kernel, stop_adapt=burnin)
    mean_est = zeros(distribution.dimension, dtype="float64")
    cov_est = 1.0 * eye(distribution.dimension)
    cov_est[0, 0] = distribution.V
    #mcmc_sampler = AdaptiveMetropolisLearnScale(distribution, mean_est=mean_est, cov_est=cov_est)
    #mcmc_sampler = AdaptiveMetropolis(distribution, mean_est=mean_est, cov_est=cov_est)
    mcmc_sampler = StandardMetropolis(distribution)
    # prior on theta and posterior target estimate
    theta_prior=Gaussian(mu=0*ones(dim), Sigma=eye(dim)*5)
    target=PseudoMarginalHyperparameterDistribution(data, labels, \
                                                    n_importance=100, prior=theta_prior, \
                                                    ridge=1e-3)
    
    # create sampler
    burnin=10000
    num_iterations=burnin+300000
    kernel = GaussianKernel(sigma=23.0)
    sampler=KameleonWindowLearnScale(target, kernel, stop_adapt=burnin)
#    sampler=AdaptiveMetropolisLearnScale(target)
#    sampler=StandardMetropolis(target)
    
    # posterior mode derived by initial tests
    start=zeros(target.dimension)
    params = MCMCParams(start=start, num_iterations=num_iterations, burnin=burnin)
    
    # create MCMC chain
    chain=MCMCChain(sampler, params)
    chain.append_mcmc_output(StatisticsOutput(print_from=0, lag=100))
    #chain.append_mcmc_output(PlottingOutput(plot_from=0, lag=500))
    
    # create experiment instance to store results
    experiment_dir = str(os.path.abspath(sys.argv[0])).split(os.sep)[-1].split(".")[0] + os.sep
    experiment = SingleChainExperiment(chain, experiment_dir)
    
    experiment.run()
    sigma=GaussianKernel.get_sigma_median_heuristic(experiment.mcmc_chain.samples.T)
    print "median kernel width", sigma
Esempio n. 8
0
                                                    n_importance=100, prior=theta_prior, \
                                                    ridge=1e-3)

    # create sampler
    burnin = 10000
    num_iterations = burnin + 300000
    kernel = GaussianKernel(sigma=23.0)
    sampler = KameleonWindowLearnScale(target, kernel, stop_adapt=burnin)
    #    sampler=AdaptiveMetropolisLearnScale(target)
    #    sampler=StandardMetropolis(target)

    # posterior mode derived by initial tests
    start = zeros(target.dimension)
    params = MCMCParams(start=start,
                        num_iterations=num_iterations,
                        burnin=burnin)

    # create MCMC chain
    chain = MCMCChain(sampler, params)
    chain.append_mcmc_output(StatisticsOutput(print_from=0, lag=100))
    #chain.append_mcmc_output(PlottingOutput(plot_from=0, lag=500))

    # create experiment instance to store results
    experiment_dir = str(os.path.abspath(sys.argv[0])).split(
        os.sep)[-1].split(".")[0] + os.sep
    experiment = SingleChainExperiment(chain, experiment_dir)

    experiment.run()
    sigma = GaussianKernel.get_sigma_median_heuristic(
        experiment.mcmc_chain.samples.T)
    print "median kernel width", sigma
Esempio n. 9
0
def main():
    numTrials = 500
    n = 200
    Sigma1 = eye(2)
    Sigma1[0, 0] = 30.0
    Sigma1[1, 1] = 1.0
    theta = -pi / 4
    U = MatrixTools.rotation_matrix(theta)
    Sigma1 = U.T.dot(Sigma1).dot(U)
    print Sigma1
    gaussian1 = Gaussian(Sigma=Sigma1)
    gaussian2 = Gaussian(mu=array([1., 0.]), Sigma=Sigma1)

    oracle_samples1 = gaussian1.sample(n=n).samples
    oracle_samples2 = gaussian2.sample(n=n).samples

    print 'mean1:', mean(oracle_samples1, 0)
    print 'mean2:', mean(oracle_samples2, 0)
    plot(oracle_samples1[:, 0], oracle_samples1[:, 1], 'b*')
    plot(oracle_samples2[:, 0], oracle_samples2[:, 1], 'r*')
    show()
    distribution1 = GaussianFullConditionals(gaussian1, list(gaussian1.mu))
    distribution2 = GaussianFullConditionals(gaussian2, list(gaussian2.mu))

    H0_samples = zeros(numTrials)
    HA_samples = zeros(numTrials)
    mcmc_sampler1 = Gibbs(distribution1)
    mcmc_sampler2 = Gibbs(distribution2)
    burnin = 9000
    thin = 5
    start = zeros(2)
    mcmc_params = MCMCParams(start=start,
                             num_iterations=burnin + thin * n,
                             burnin=burnin)
    sigma = GaussianKernel.get_sigma_median_heuristic(
        concatenate((oracle_samples1, oracle_samples2), axis=0))
    print 'using bandwidth: ', sigma
    kernel = GaussianKernel(sigma=sigma)

    for ii in arange(numTrials):
        start = time.time()
        print 'trial:', ii

        oracle_samples1 = gaussian1.sample(n=n).samples
        oracle_samples1a = gaussian1.sample(n=n).samples
        oracle_samples2 = gaussian2.sample(n=n).samples

        #         chain1 = MCMCChain(mcmc_sampler1, mcmc_params)
        #         chain1.run()
        #         gibbs_samples1 = chain1.get_samples_after_burnin()
        #         gibbs_samples1 = gibbs_samples1[thin*arange(n)]
        #
        #         chain1a = MCMCChain(mcmc_sampler1, mcmc_params)
        #         chain1a.run()
        #         gibbs_samples1a = chain1a.get_samples_after_burnin()
        #         gibbs_samples1a = gibbs_samples1a[thin*arange(n)]
        #
        #         chain2 = MCMCChain(mcmc_sampler2, mcmc_params)
        #         chain2.run()
        #         gibbs_samples2 = chain2.get_samples_after_burnin()
        #         gibbs_samples2 = gibbs_samples2[thin*arange(n)]

        #         H0_samples[ii]=kernel.estimateMMD(gibbs_samples1,gibbs_samples1a)
        #         HA_samples[ii]=kernel.estimateMMD(gibbs_samples1,gibbs_samples2)
        #
        H0_samples[ii] = kernel.estimateMMD(oracle_samples1, oracle_samples1a)
        HA_samples[ii] = kernel.estimateMMD(oracle_samples1, oracle_samples2)
        end = time.time()
        print 'time elapsed: ', end - start

    f = open("/home/dino/git/mmdIIDTrueSamples.dat", "w")
    dump(H0_samples, f)
    dump(HA_samples, f)
    dump(gaussian1, f)
    dump(gaussian2, f)
    f.close()
    return None