buildPhi_basic = gprh.buildPhi(m,type=meastype,tun=tun1,x0=x0,unitvecs=unitvecs,Rlim=Rlim)
else:
    buildPhi_basic = gprh.buildPhi(m,type=meastype,tun=tun1)

if integral:
    closure_basic = gprh.gp_closure(model_basic, meastype, buildPhi_basic, lossfu_basic, n, dom_points, train_y)
else:
    closure_basic = gprh.gp_closure(model_basic, meastype, buildPhi_basic, lossfu_basic, n, dom_points, train_y, train_x=train_x)
loss_basic = closure_basic()

for i in range(training_iterations_basic):

    options = {'line_search': True, 'closure': closure_basic, 'max_ls': 3, 'ls_debug': False, 'inplace': False, 'interpolate': False,
               'eta': 3, 'c1': 1e-4, 'decrease_lr_on_max_ls': 0.1, 'increase_lr_on_min_ls': 5}

    optimiser_basic.zero_grad() # zero gradients
    loss_basic.backward()  # Backprop derivatives
    loss_basic, lr, ls_step = optimiser_basic.step(options=options) # compute new loss

    # print
    gprh.optiprint(i, training_iterations_basic, loss_basic.item(), lr, ls_step, model_basic, buildPhi_basic.L)

if integral:
    test_f = gprh.compute_and_save(model_basic, meastype, dataname, train_y, n, X, Y, Z,
            ntx, nty, test_x, dom_points, m, dim, mt, noise_std, lossfu_basic, buildPhi_basic, optimiser_basic, training_iterations_basic,
             joint=True, x0=x0, unitvecs=unitvecs, Rlim=Rlim, rec_fbp=rec_fbp, err_fbp=err_fbp, basic=True)
if point:
    test_f = gprh.compute_and_save(model_basic, meastype, dataname, train_y, n, X, Y, Z,
        ntx, nty, test_x, dom_points, m, dim, mt, noise_std, lossfu_basic, buildPhi_basic, optimiser_basic, training_iterations_basic,
         joint=True, train_x=train_x, basic=True)
            a = train_x[q,0]
            b = train_x[q,1]
            h = (b-a)/ni

            zz = mybestnet( torch.linspace(a,b,ni+1).view(ni+1,1) )[:,0].unsqueeze(-1)

            ints[q] = torch.sum(zz*buildPhi.sc.t() ).mul(buildPhi.fact*h)
        return  (ints.sub( train_y ) ).pow(2) .sum() .div(n) + gprh.regulariser(mybestnet,weight=regweight)

loss2 = closure2()

for i in range(training_iterations):
    options = {'closure': closure2, 'max_ls': 5, 'ls_debug': False, 'inplace': False, 'interpolate': False,
               'eta': 3, 'c1': 1e-4, 'decrease_lr_on_max_ls': 0.1, 'increase_lr_on_min_ls': 5}

    optimiser2.zero_grad() # zero gradients
    loss2.backward() # propagate derivatives
    loss2, lr, ls_iters = optimiser2.step(options=options) # compute new loss

    # print
    print(i,loss2.item())


with torch.no_grad():
    fplot, ax = plt.subplots(1, 1, figsize=(4, 3))

    # plot predictions
    ax.plot(test_x.numpy(), mybestnet(test_x).detach().numpy(), 'r')

    plt.show()
Пример #3
0
for i in range(training_iterations):
    # model.scale+=1.01
    # model.scale2*=1.2
    options = {
        'closure': closure,
        'max_ls': 5,
        'ls_debug': False,
        'inplace': False,
        'interpolate': False,
        'eta': 3,
        'c1': 1e-4,
        'decrease_lr_on_max_ls': 0.1,
        'increase_lr_on_min_ls': 5
    }

    optimiser.zero_grad()  # zero gradients
    loss.backward()  # propagate derivatives
    loss, lr, ls_iters = optimiser.step(options=options)  # compute new loss

    # print
    gprh.optiprint(i, training_iterations, loss.item(), lr, ls_iters, model, L)

# update phi
phi, sq_lambda, L = buildPhi.getphi(model,
                                    m,
                                    n,
                                    mt,
                                    train_x=train_x,
                                    dom_points=dom_points)

# now make predictions
        print('Solving ' + problemName)
        print(
            '==================================================================================='
        )
        print(
            '    Iter:    |     F       |    ||g||    | |x - y|/|x| |   F Evals   |    alpha    '
        )
        print(
            '-----------------------------------------------------------------------------------'
        )

    func_evals = 0

    t = time.process_time()

    optimizer.zero_grad()
    obj = model()
    obj.backward()
    grad = model.grad()
    func_evals += 1

    x_old = model.x().clone()
    x_new = x_old.clone()
    f_old = obj

    # main loop
    for n_iter in range(max_iter):

        # define closure for line search
        def closure():
            optimizer.zero_grad()