I.cuda(device) mesh_bound[1] += 1/1000 dataset = meshgen(mesh_bound, [1001,1001]) dataset = torch.from_numpy(dataset) dataset = I.interp_coe.data.new(dataset.size()).copy_(dataset) dataset = Variable(dataset) mesh_bound[1] -= 1/1000 IFixInputs = LagrangeInterpFixInputs(dataset,m,d,mesh_bound,mesh_size) IFixInputs.double() if device>=0: IFixInputs.cuda(device) ax = plt.figure().add_subplot(1,1,1) ax.imshow(I(dataset).data.cpu().numpy()) #%% nfi = NumpyFunctionInterface([I.interp_coe,],forward=lambda :forward(I,dataset)) nfi.flat_param = random.randn(nfi.numel()) x,f,d = lbfgsb(nfi.f,nfi.flat_param,nfi.fprime,m=1000,factr=1,pgtol=1e-14,iprint=10) infe,infe_true = compare(I, dataset) ax = plt.figure().add_subplot(1,1,1) ax.imshow(infe) errs = infe-infe_true ax = plt.figure().add_subplot(1,1,1) ax.imshow(errs) #%% outputs = IFixInputs() outputs_true = torch.from_numpy(testfunc(IFixInputs.inputs.cpu().numpy())) outputs_true = outputs_true.view(outputs.size()) outputs_true = outputs.data.new(outputs_true.size()).copy_(outputs_true) outputs_true = Variable(outputs_true) nfi = NumpyFunctionInterface(IFixInputs.parameters(), forward=lambda :forwardFixInputs(IFixInputs, outputs_true)) nfi.flat_param = random.randn(nfi.numel())
grad_proj=grad_proj), dict(params=linpdelearner.coe_params(), isfrozen=False, x_proj=None, grad_proj=None) ], forward=forward, always_refresh=False) callback.nfi = nfi try: # optimize xopt, f, d = lbfgsb(nfi.f, nfi.flat_param, nfi.fprime, m=500, callback=callback, factr=1e0, pgtol=1e-16, maxiter=maxiter, iprint=50) except RuntimeError as Argument: with callback.open() as output: print(Argument, file=output) # if overflow then just print and continue finally: # save parameters nfi.flat_param = xopt callback.save(xopt, 'final') #%%
nfi.fprime(a + 1) print(nfi.f(a) - f) print(nfi.flat_param - a) nfi.flat_param = a print(nfi.fprime(a) - g) print('no frozen, no x_proj, no grad_proj, 2 param_groups') test() nfi.set_options(1, x_proj=x_proj, grad_proj=grad_proj) print('set x_proj and grad_proj in param_group[1], 2 param_groups') test() nfi.add_param_group(param_group2) print('add param_group') test() nfi.flat_param = random.randn(nfi.numel()) nfi.set_options(1, isfrozen=True) print('set frozen in param_group[1], 3 param_groups') test() nfi.set_options(1, x_proj=None, grad_proj=None) print('delete x_proj,grad_proj in param_group[1], 3 param_groups') test() nfi.set_options(0, x_proj=x_proj, grad_proj=grad_proj) print('add x_proj and grad_proj in param_group[0], 3 param_groups') test() nfi.forward = forward_gen() print('change forward function') test() nfi.flat_param = a0[:nfi.numel()] + 10 print('change flat_param') print(nfi.forward() - nfi.f(a0[:nfi.numel()] + 10))
""" def x_proj(*args, **kw): penalty.x2.data[0] = 1e-5 def grad_proj(*args, **kw): penalty.x2.grad.data[0] = 0 nfi = NumpyFunctionInterface(penalty.parameters(), forward=penalty.forward, x_proj=x_proj, grad_proj=grad_proj) # x0 = torch.cat([penalty.x1.cpu(),penalty.x2.cpu()],0).data.clone().numpy() x0 = np.random.randn(nfi.numel()) print("\n\n\n\n ***************** penalty *****************") x, f, d = lbfgsb(nfi.f, x0, nfi.fprime, m=100, factr=1, pgtol=1e-14, iprint=10) out, fx, its, imode, smode = slsqp(nfi.f, x0, fprime=nfi.fprime, acc=1e-16, iter=15000, iprint=1, full_output=True) # the following two assignments will inforce 'out' to satisfy the constraint nfi.flat_param = out out = nfi.flat_param print('\noptimial solution\n', out)