Example #1
0
    functools.partial(loggaussian, mu=mu[i], sigma2=sigma2[i])
    for i in range(num_mixtures)
]
logjoint_ = functools.partial(logjointmix, logps=logps, alpha=alpha)


def logjoint(sample):
    return logjoint_(sample)


true_mean, true_cov = tndmeancov(mu, sigma2, alpha)
sampler = lambda nsamples: samplergaussian(mu, sigma2, alpha, nsamples)
#%%
nsamples = 10
ninducing = 50
samples = sampling.sampling1(nsamples, dim, scale=10.0)
mu0 = torch.zeros(dim)
cov0 = (20.0 / 3)**2 * torch.ones(dim)
acquisition = "prospective"
folder_name = "example3"
try:
    os.mkdir(folder_name)
except:
    pass
#%% The main training class is initiated, it's gp model optimized,
#   and initial component set
vb = VariationalBoosting(dim,
                         logjoint,
                         samples,
                         mu0,
                         cov0,
Example #2
0
    nu = torch.rand(dim) * (2 + 0.5 * dim - 2.5) + 2.5
    A = torch.eye(dim)
    logjoint_ = functools.partial(logrotated,
                                  f=functools.partial(logtnd, nu=nu),
                                  A=A)
    device = "cpu"

    def logjoint(sample):
        return logjoint_(sample.to("cpu")).to(device)

    true_mean = torch.zeros(dim)
    true_cov = A @ torch.diag(nu / (nu - 2) * torch.ones(dim)) @ A.t()
    sampler = lambda nsamples: samplertndrot(nsamples, nu, A, dim)
    #%%
    nsamples = 10 * dim
    samples = sampling.sampling1(nsamples, dim, scale=5.0, device=device)
    mu0 = torch.zeros(dim).to(device)
    cov0 = (20.0 / 3)**2 * torch.ones(dim).to(device)
    #%%
    training_interval = 20
    acquisitions = ["prospective", "mmlt"]
    vb = VariationalBoosting(dim,
                             logjoint,
                             samples,
                             mu0,
                             cov0,
                             kernel_function="PMat",
                             matern_coef=2.5,
                             degree_hermite=60)
    vb.optimize_bmc_model(maxiter=200)
    vb.update_full()