Example #1
0
 samples = sampling.sampling1(nsamples, dim, scale=5.0, device=device)
 mu0 = torch.zeros(dim).to(device)
 cov0 = (20.0 / 3)**2 * torch.ones(dim).to(device)
 #%%
 training_interval = 20
 acquisitions = ["prospective", "mmlt"]
 vb = VariationalBoosting(dim,
                          logjoint,
                          samples,
                          mu0,
                          cov0,
                          kernel_function="PMat",
                          matern_coef=2.5,
                          degree_hermite=60)
 vb.optimize_bmc_model(maxiter=200)
 vb.update_full()
 #%%
 dmeans = [torch.norm(vb.currentq_mean.to("cpu") - true_mean).numpy()]
 dcovs = [torch.norm(vb.currentq_mean.to("cpu") - true_cov, 2).numpy()]
 mmds = [mmd_vb_sampler(vb, sampler, 100000)]
 weights = [vb.weights.cpu().numpy()]
 elbo_list = [vb.evidence_lower_bound(nsamples=10000).cpu().numpy()]
 step_list = [0]
 time_list = [0.0]
 for i in range(100):
     tictoc.tic()
     _ = vb.update(maxiter_nc=300,
                   lr_nc=0.01,
                   n_samples_nc=500,
                   n_samples_sn=300,
                   n_iter_sn=300)
Example #2
0
elbo_list = [vb.evidence_lower_bound(nsamples=10000).cpu().numpy()]
step_list = [0]
time_list = [0]
vbp_list = []
vbp = vb.current_logq(delta_x.reshape(-1,1)).cpu().flatten().numpy().astype(float)
vbp_list.append(vbp)

plt.plot(delta_x_np,np.exp(prediction_np))
plt.plot(delta_x_np,np.exp(tp_np))
raise KeyError
#%% Main loop
for i in range(50):
    tictoc.tic()
    _ = vb.update()
    if ((i+1)%10) == 0:
        vb.update_full()
    #%% Save trackings
    elapsed = tictoc.toc(printing=False)
    dmeans.append(np.linalg.norm(vb.currentq_mean.cpu()-true_mean,2))
    dcovs.append(np.linalg.norm(vb.currentq_cov.cpu()-true_cov,2))
    elbo_list.append(vb.evidence_lower_bound(nsamples=10000).cpu().numpy())
    step_list.append(i+1)
    time_list.append(elapsed)
    vbp = vb.current_logq(delta_x.reshape(-1,1)).cpu().flatten().numpy().astype(float)
    vbp_list.append(vbp)
    vb.save_distribution("%s/distrib%i"%(folder_name,i+1))
    print(vb_utils.kl_vb_bmc(vb,1000))
    print(dmeans[-1])
    print(dcovs[-1])
    print(elbo_list[-1])
    print(time_list[-1])
Example #3
0
#Approximating nnormalized 2-d Cauchy
def logjoint(theta):
    return torch.sum(-torch.log(1+theta**2),dim=-1)

#Set up parameters
dim=2 #Dimension of problem
samples = torch.randn(20,dim) #Initial samples
mu0 = torch.zeros(dim) #Initial mean
cov0 = 20.0*torch.ones(dim) #Initial covariance
acquisition = "prospective" #Acquisition function

#Initialize algorithm
vb = VariationalBoosting(dim,logjoint,samples,mu0,cov0)
vb.optimize_bmc_model() #Optimize GP model
vb.update_full() #Fit first component

#Training loop
for i in range(100):
    _ = vb.update() #Choose new boosting component
    vb.update_bmcmodel(acquisition=acquisition) #Choose new evaluation
    vb.cutweights(1e-3) #Weights prunning
    if ((i+1)%20) == 0:
        vb.update_full(cutoff=1e-3) #Joint parameter updating

vb.save_distribution("finaldistrib") #Save distribution
#%%
import math
distrib = torch.load("finaldistrib")
nplot = 21
x,y = torch.linspace(-6,6,nplot),torch.linspace(-6,6,nplot)