def main_simu(d, nb_exps=10000, nb_iters=40000, sigma_max=2.0, seed=1):
    np.random.seed(123456 + seed)
    error_ula_all = np.zeros((nb_iters, 1))
    error_ula_02_all = np.zeros((nb_iters, 1))
    error_mala_all = np.zeros((nb_iters, 1))
    error_rwmh_all = np.zeros((nb_iters, 1))

    mean = np.zeros(d)
    sigma = np.array([1.0 + (sigma_max - 1.0) / (d - 1) * i for i in range(d)])
    L = 1. / sigma[0]**2
    m = 1. / sigma[-1]**2
    kappa = L / m

    print("d = %d, m = %0.2f, L = %0.2f, kappa = %0.2f" % (d, m, L, kappa))

    def error_quantile(x_curr):
        q3 = sigma[-1] * scipy.stats.norm.ppf(0.75)
        e1 = np.abs(np.percentile(x_curr[:, -1], 75) - q3) / q3
        return np.array([e1])

    init_distr = 1. / np.sqrt(L) * np.random.randn(nb_exps, d)

    def grad_f_local(x):
        return grad_f(x, mean=mean, sigma=sigma)

    def f_local(x):
        return density_f(x, mean=mean, sigma=sigma)

    error_ula_all, x_ula = mcmc.ula(init_distr,
                                    grad_f_local,
                                    error_quantile,
                                    epsilon=1.0,
                                    kappa=kappa,
                                    L=L,
                                    nb_iters=nb_iters,
                                    nb_exps=nb_exps)
    error_ula_02_all, x_ula_02 = mcmc.ula(init_distr,
                                          grad_f_local,
                                          error_quantile,
                                          epsilon=0.2,
                                          kappa=kappa,
                                          L=L,
                                          nb_iters=nb_iters,
                                          nb_exps=nb_exps)
    error_mala_all, x_mala = mcmc.mala(init_distr,
                                       grad_f_local,
                                       f_local,
                                       error_quantile,
                                       kappa=kappa,
                                       L=L,
                                       nb_iters=nb_iters,
                                       nb_exps=nb_exps)
    error_rwmh_all, x_rwmh = mcmc.rwmh(init_distr,
                                       f_local,
                                       error_quantile,
                                       kappa=kappa,
                                       L=L,
                                       nb_iters=nb_iters,
                                       nb_exps=nb_exps)

    result = {}
    result['d'] = d
    result['nb_iters'] = nb_iters
    result['nb_exps'] = nb_exps
    result['sigma_max'] = sigma_max
    result['ula'] = error_ula_all
    result['ula_02'] = error_ula_02_all
    result['mala'] = error_mala_all
    result['rwmh'] = error_rwmh_all

    save_path = "PLEASE UPDATE BEFORE USE"
    np.save(
        '%s/gaussian_nonisotropic_d%d_iters%d_exps%d_seed%d.npy' %
        (save_path, d, nb_iters, nb_exps, seed), result)
示例#2
0
def main_simu(nb_exps=10000, nb_iters=40000, sigma_max=2.0, seed=1):
    d = 3
    epsilons = np.arange(10)*0.1+0.1
    np.random.seed(123456+seed)
    error_ula_all = np.zeros((epsilons.shape[0], nb_iters, 1))
    error_ula_02_all = np.zeros((epsilons.shape[0], nb_iters, 1))
    error_mala_all = np.zeros((epsilons.shape[0], nb_iters, 1))
    error_rwmh_all = np.zeros((epsilons.shape[0], nb_iters, 1))

    mean = np.zeros(d)
    sigma =  np.array([1.0 + (sigma_max - 1.0)/(d-1)*i for i in range(d)])
    # make the last sigma large, so that it is close to nonstrongly convex
    sigma[-1] = 1000.
    L = 1./sigma[0]**2
    m = 1./sigma[-1]**2
    kappa = L/m

    print("d = %d, m = %0.2f, L = %0.2f, kappa = %0.2f" %(d, m, L, kappa))

    for j, eps in enumerate(epsilons):
        # modify the objective function to make it gamma/2 strongly convex
        # fourth moment bound
        nu = 1.0
        gamma = 2. * eps/d/nu
        L_mod = L + gamma/2.
        m_mod = m + gamma/2.
        kappa_mod = L_mod/m_mod

        print("d = %d, m_mod = %0.2f, L_mod = %0.2f, kappa_mod = %0.2f, eps = %0.2f" %(d, m_mod, L_mod, kappa_mod, eps))

        # compare error on the before-last dimension
        def error_quantile(x_curr):
            q3 =  sigma[-2]*scipy.stats.norm.ppf(0.75)
            e1 = np.abs(np.percentile(x_curr[:, -2], 75) - q3)/q3
            return np.array([e1])

        init_distr = 1./np.sqrt(L_mod)*np.random.randn(nb_exps, d)

        def grad_f_local(x):
            return grad_f_mod(x, mean=mean, sigma=sigma, gamma=gamma)

        def f_local(x):
            return density_f_mod(x, mean=mean, sigma=sigma, gamma=gamma)

        error_mala_all[j], x_mala = mcmc.mala(init_distr, grad_f_local, f_local, error_quantile,
                                       kappa=kappa_mod, L=L_mod, nb_iters=nb_iters, nb_exps=nb_exps)
        error_rwmh_all[j], x_rwmh = mcmc.rwmh(init_distr, f_local, error_quantile,
                                       kappa=kappa_mod, L=L_mod, nb_iters=nb_iters, nb_exps=nb_exps)
        error_ula_all[j], x_ula = mcmc.ula(init_distr, grad_f_local, error_quantile,
                                    epsilon=10.*eps, kappa=kappa_mod, L=L_mod, nb_iters=nb_iters, nb_exps=nb_exps)
        error_ula_02_all[j], x_ula = mcmc.ula(init_distr, grad_f_local, error_quantile,
                                    epsilon=eps, kappa=kappa_mod, L=L_mod, nb_iters=nb_iters, nb_exps=nb_exps)

    result = {}
    result['epsilons'] = epsilons
    result['d'] = d
    result['nb_iters'] = nb_iters
    result['nb_exps'] = nb_exps
    result['sigma_max'] = sigma_max
    result['ula'] = error_ula_all
    result['ula_02'] = error_ula_02_all
    result['mala'] = error_mala_all
    result['rwmh'] = error_rwmh_all

    save_path = "PLEASE UPDATE BEFORE USE"
    np.save('%s/gaussian_nonstrongly_nonisotropic_eps_iters%d_exps%d_seed%d.npy' %(save_path, nb_iters, nb_exps, seed), result)
示例#3
0
def main_simu(d, nb_exps=100, nb_iters=40000, seed=1):
    np.random.seed(123456 + seed)
    error_hmc_all = np.zeros((nb_iters, 1))
    error_hmcagg_all = np.zeros((nb_iters, 1))
    error_mala_all = np.zeros((nb_iters, 1))
    error_rwmh_all = np.zeros((nb_iters, 1))

    mean = np.zeros(d)
    #sigma_max = d**(5.0/12)
    sigma_max = d**(1.0 / 3)
    sigma = np.array([1.0 + (sigma_max - 1.0) / (d - 1) * i for i in range(d)])
    L = 1. / sigma[0]**2
    m = 1. / sigma[-1]**2
    kappa = L / m

    print("d = %d, m = %0.2f, L = %0.2f, kappa = %0.2f" % (d, m, L, kappa))

    def error_quantile(x_curr):
        q3 = sigma[-1] * scipy.stats.norm.ppf(0.75)
        e1 = np.abs(np.percentile(x_curr[:, -1], 75) - q3) / q3
        return np.array([e1])

    init_distr = 1. / np.sqrt(L) * np.random.randn(nb_exps, d)

    def grad_f_local(x):
        return grad_f(x, mean=mean, sigma=sigma)

    def f_local(x):
        return f(x, mean=mean, sigma=sigma)

    # cK is a multiplier on K
    cK = 1
    # number of leapfrog updates
    K_hmc = np.int(np.ceil(d**0.75 / kappa**0.75 * cK))
    nb_hmc_iters = np.int(nb_iters / K_hmc) + 1

    error_hmc_all[:nb_hmc_iters, :], x_hmc, ac_hmc, _ = mcmc.hmc(
        init_distr,
        grad_f_local,
        f_local,
        error_quantile,
        stepchoice=3,
        kappa=kappa,
        L=L,
        L3=L,
        cK=cK,
        nb_iters=nb_hmc_iters,
        nb_exps=nb_exps)
    # aggressive HMC step-size choice by assuming L3 small
    K_hmcagg = np.int(np.ceil(d**0.125 * kappa**0.25 * cK))
    nb_hmcagg_iters = np.int(nb_iters / K_hmcagg) + 1
    error_hmcagg_all[:nb_hmcagg_iters, :], x_hmcagg, ac_hmcagg, _ = mcmc.hmc(
        init_distr,
        grad_f_local,
        f_local,
        error_quantile,
        stepchoice=5,
        kappa=kappa,
        L=L,
        L3=L,
        cK=cK,
        nb_iters=nb_hmcagg_iters,
        nb_exps=nb_exps)

    error_mala_all, x_mala, ac_mala = mcmc.mala(init_distr,
                                                grad_f_local,
                                                f_local,
                                                error_quantile,
                                                kappa=kappa,
                                                L=L,
                                                nb_iters=nb_iters,
                                                nb_exps=nb_exps)
    error_rwmh_all, x_rwmh, ac_rwmh = mcmc.rwmh(init_distr,
                                                f_local,
                                                error_quantile,
                                                kappa=kappa,
                                                L=L,
                                                nb_iters=nb_iters,
                                                nb_exps=nb_exps)

    result = {}
    result['d'] = d
    result['nb_iters'] = nb_iters
    result['nb_exps'] = nb_exps
    result['sigma_max'] = sigma_max
    result['cK'] = cK
    result['K_hmc'] = K_hmc
    result['nb_hmc_iters'] = nb_hmc_iters
    result['K_hmcagg'] = K_hmcagg
    result['nb_hmcagg_iters'] = nb_hmcagg_iters
    result['hmc'] = error_hmc_all
    result['hmcagg'] = error_hmcagg_all
    result['mala'] = error_mala_all
    result['rwmh'] = error_rwmh_all
    result['ac_hmc'] = ac_hmc
    result['ac_hmcagg'] = ac_hmcagg
    result['ac_mala'] = ac_mala
    result['ac_rwmh'] = ac_rwmh

    save_path = "/cluster/scratch/chenyua/HMC/results"
    np.save(
        '%s/warm_gaussian_nonisotropic_d%d_kappa23_cK%d_iters%d_exps%d_seed%d.npy'
        % (save_path, d, int(cK), nb_iters, nb_exps, seed), result)