Example #1
0
sampler = lambda nsamples : samplergaussian(mu,sigma2,alpha,nsamples)
#%%
folder_name = "example1"
try:
    os.mkdir(folder_name)
except:
    pass
nsamples = 5
samples = torch.linspace(-20,20,nsamples).reshape(-1,1)
#Just initiate with large mean and covariance
mu0 = torch.zeros(dim)
cov0 = (20.0/3)**2*torch.ones(dim)
#%% The main training class is initiated, it's gp model optimized,
#   and initial component set
vb = VariationalBoosting(dim,logjoint,samples,mu0,cov0,
                         kernel_function="PMat",
                         matern_coef=2.5,
                         degree_hermite=60)
vb.optimize_bmc_model(maxiter=100,verbose=False)
#%% Tracking devices
vb.save_distribution("%s/distrib%i"%(folder_name,0))
nplot = 201
delta_x = torch.linspace(-20,20,nplot)
delta_x_np = delta_x.flatten().numpy()
tp_np = (logjoint(delta_x.reshape(-1,1)).cpu()).flatten().numpy()
prediction_np = (vb.bmcmodel.prediction(delta_x.reshape(-1,1),cov="none")*vb.evals_std + vb.evals_mean).\
                numpy().astype(float)        
dmeans = [torch.norm(vb.currentq_mean.to("cpu")-true_mean).numpy()]
dcovs = [torch.norm(vb.currentq_mean.to("cpu")-true_cov,2).numpy()]
elbo_list = [vb.evidence_lower_bound(nsamples=10000).cpu().numpy()]
step_list = [0]
time_list = [0]
Example #2
0
torch.manual_seed(100) #For reproducibility

#Approximating nnormalized 2-d Cauchy
def logjoint(theta):
    return torch.sum(-torch.log(1+theta**2),dim=-1)

#Set up parameters
dim=2 #Dimension of problem
samples = torch.randn(20,dim) #Initial samples
mu0 = torch.zeros(dim) #Initial mean
cov0 = 20.0*torch.ones(dim) #Initial covariance
acquisition = "prospective" #Acquisition function

#Initialize algorithm
vb = VariationalBoosting(dim,logjoint,samples,mu0,cov0)
vb.optimize_bmc_model() #Optimize GP model
vb.update_full() #Fit first component

#Training loop
for i in range(100):
    _ = vb.update() #Choose new boosting component
    vb.update_bmcmodel(acquisition=acquisition) #Choose new evaluation
    vb.cutweights(1e-3) #Weights prunning
    if ((i+1)%20) == 0:
        vb.update_full(cutoff=1e-3) #Joint parameter updating

vb.save_distribution("finaldistrib") #Save distribution
#%%
import math
distrib = torch.load("finaldistrib")
Example #3
0
samples = sampling1(nsamples)
mu0 = torch.zeros(dim)
cov0 = 20.0 * torch.ones(dim)

#%%
#samples = vb.samples
training_interval = 20
acquisitions = ["prospective", "mmlt"]
vb = VariationalBoosting(dim,
                         logjoint,
                         samples,
                         mu0,
                         cov0,
                         bmc_type="FM",
                         normalization_mode="normalize",
                         training_space="gspace",
                         noise=1e-4,
                         kernel_function="PMat",
                         matern_coef=1.5,
                         numstab=-50.0,
                         degree_hermite=50)
vb.optimize_bmc_model(maxiter=500, verbose=1, lr=0.05)

#%%
elbo_list = [vb.evidence_lower_bound(nsamples=10000).cpu().numpy()]
kl_list = [vb.kullback_proposal_bmc(10000).item()]
step_list = [0]
time_list = [0.0]
#%%
print("Active sampling...")
Example #4
0
 try:
     os.mkdir(folder_name)
 except:
     pass
 nsamples = 51
 ninducing = 50
 samples = torch.linspace(-20, 20, nsamples).reshape(-1, 1)
 #Just initiate with large mean and covariance
 mu0 = torch.zeros(dim)
 cov0 = (20.0 / 3)**2 * torch.ones(dim)
 #%% The main training class is initiated, it's gp model optimized,
 #   and initial component set
 vb = VariationalBoosting(dim,
                          logjoint,
                          samples,
                          mu0,
                          cov0,
                          kernel_function=kernels[0],
                          matern_coef=kernels[1],
                          degree_hermite=60)
 vb.optimize_bmc_model(maxiter=100, verbose=False)
 #%% Tracking devices
 vb.save_distribution("%s/distrib%i" % (folder_name, 0))
 nplot = 201
 delta_x = torch.linspace(-20, 20, nplot)
 delta_x_np = delta_x.flatten().numpy()
 tp_np = (logjoint(delta_x.reshape(-1, 1)).cpu()).flatten().numpy()
 prediction_np = (vb.bmcmodel.prediction(delta_x.reshape(-1,1),cov="none")*vb.evals_std + vb.evals_mean).\
                 numpy().astype(float)
 dmeans = [torch.norm(vb.currentq_mean.to("cpu") - true_mean).numpy()]
 dcovs = [torch.norm(vb.currentq_mean.to("cpu") - true_cov, 2).numpy()]
 elbo_list = [vb.evidence_lower_bound(nsamples=10000).cpu().numpy()]