closure_basic = gprh.gp_closure(model_basic, meastype, buildPhi_basic, lossfu_basic, n, dom_points, train_y)
else:
    closure_basic = gprh.gp_closure(model_basic, meastype, buildPhi_basic, lossfu_basic, n, dom_points, train_y, train_x=train_x)
loss_basic = closure_basic()

for i in range(training_iterations_basic):

    options = {'line_search': True, 'closure': closure_basic, 'max_ls': 3, 'ls_debug': False, 'inplace': False, 'interpolate': False,
               'eta': 3, 'c1': 1e-4, 'decrease_lr_on_max_ls': 0.1, 'increase_lr_on_min_ls': 5}

    optimiser_basic.zero_grad() # zero gradients
    loss_basic.backward()  # Backprop derivatives
    loss_basic, lr, ls_step = optimiser_basic.step(options=options) # compute new loss

    # print
    gprh.optiprint(i, training_iterations_basic, loss_basic.item(), lr, ls_step, model_basic, buildPhi_basic.L)

if integral:
    test_f = gprh.compute_and_save(model_basic, meastype, dataname, train_y, n, X, Y, Z,
            ntx, nty, test_x, dom_points, m, dim, mt, noise_std, lossfu_basic, buildPhi_basic, optimiser_basic, training_iterations_basic,
             joint=True, x0=x0, unitvecs=unitvecs, Rlim=Rlim, rec_fbp=rec_fbp, err_fbp=err_fbp, basic=True)
if point:
    test_f = gprh.compute_and_save(model_basic, meastype, dataname, train_y, n, X, Y, Z,
        ntx, nty, test_x, dom_points, m, dim, mt, noise_std, lossfu_basic, buildPhi_basic, optimiser_basic, training_iterations_basic,
         joint=True, train_x=train_x, basic=True)

try:  # since plotting might produce an error on remote machines
    vmin = 0
    vmax = Z.max()
    gprh.makeplot2D_new('mymodel_basic_'+meastype+'_'+dataname+'_'+str(training_iterations_basic),vmin=vmin,vmax=vmax,data=True)
except:
def closure3():
    global L
    phi, sq_lambda, L = buildPhi2.getphi(mybestnet,n,train_x=train_x,dom_points=dom_points)
    return lossfu3(mybestnet.gp.log_sigma_f, mybestnet.gp.log_lengthscale, mybestnet.gp.log_sigma_n, phi, train_y, sq_lambda) + gprh.regulariser(mybestnet,weight=regweight2)

loss3 = closure3()

for i in range(training_iterations2):
    options = {'closure': closure3, 'max_ls': 3, 'ls_debug': False, 'inplace': False, 'interpolate': False,
               'eta': 3, 'c1': 1e-4, 'decrease_lr_on_max_ls': 0.1, 'increase_lr_on_min_ls': 5}

    optimiser3.zero_grad() # zero gradients
    loss3.backward() # propagate derivatives
    loss3, lr, ls_iters = optimiser3.step(options=options) # compute new loss

    gprh.optiprint(i,training_iterations2,loss3.item(),lr,ls_iters,mybestnet,L)


# update phi
phi,sq_lambda,L = buildPhi2.getphi(mybestnet,n,train_x=train_x,dom_points=dom_points)

# now make predictions
test_f, cov_f = mybestnet(y_train=train_y, phi=phi, sq_lambda=sq_lambda, L=L, x_test=test_x)

# torch.save((model,train_x,train_y,test_x,test_f,cov_f,truefunc,omega,diml,meastype),'mymodel')
# model,train_x,train_y,test_x,test_f,cov_f,truefunc,omega,diml,meastype = torch.load('mymodel')

# plot
gprh.makeplot(mybestnet,train_x,train_y,test_x,test_f,cov_f,truefunc,diml,meastype=meastype)
Exemple #3
0
        'max_ls': 5,
        'ls_debug': False,
        'inplace': False,
        'interpolate': False,
        'eta': 3,
        'c1': 1e-4,
        'decrease_lr_on_max_ls': 0.1,
        'increase_lr_on_min_ls': 5
    }

    optimiser.zero_grad()  # zero gradients
    loss.backward()  # propagate derivatives
    loss, lr, ls_iters = optimiser.step(options=options)  # compute new loss

    # print
    gprh.optiprint(i, training_iterations, loss.item(), lr, ls_iters, model, L)

# update phi
phi, sq_lambda, L = buildPhi.getphi(model,
                                    m,
                                    n,
                                    mt,
                                    train_x=train_x,
                                    dom_points=dom_points)

# now make predictions
test_f, cov_f = model(y_train=train_y,
                      phi=phi,
                      sq_lambda=sq_lambda,
                      L=L,
                      x_test=test_x)
Exemple #4
0
        'max_ls': 3,
        'ls_debug': False,
        'inplace': False,
        'interpolate': False,
        'eta': 3,
        'c1': 1e-4,
        'decrease_lr_on_max_ls': 0.1,
        'increase_lr_on_min_ls': 5
    }

    optimiser_pt.zero_grad()  # zero gradients
    loss2.backward()  # propagate derivatives
    loss2, lr, ls_step = optimiser_pt.step(options=options)  # compute new loss

    # print
    gprh.optiprint(i, training_iterations_pt, loss2.item(), lr, ls_step)

#########################################################
# joint training
#########################################################
print('\n=========Training the joint model=========')

# set appr params
dim = len(m)  # nr of latent outputs
mt = np.prod(m)  # total nr of basis functions

# buildPhi object
if integral:
    buildPhi = gprh.buildPhi(m,
                             type=meastype,
                             ni=ni,