Esempio n. 1
0
device = -1
mesh_bound = zeros((2, m))
# mesh_bound[0] = arange(m)-1
# mesh_bound[1] = arange(m)+1
mesh_bound[0] = 0
mesh_bound[1] = 1
mesh_size = array([
    40,
] * m)
I = Interp(m, d, mesh_bound, mesh_size, device=device)
mesh_bound[1] += 1 / 200
dataset = meshgen(mesh_bound, [201, 201, 201])
dataset = torch.from_numpy(dataset).clone()
dataset = I.interp_coe.data.new(dataset.size()).copy_(dataset)
nfi = NumpyFunctionInterface([
    I.interp_coe,
],
                             forward=lambda: I.forward(dataset))
nfi.flat_param = random.randn(nfi.numel())
x0 = nfi.flat_param
#%%
inputs_shape = [50, 50, 50]
IN, JN, KN = int(200 / inputs_shape[0]), int(200 / inputs_shape[1]), int(
    200 / inputs_shape[2])
indx = zeros((IN * JN * KN, 3), dtype=int32)
idx = 0
for i in range(IN):
    for j in range(JN):
        for k in range(KN):
            indx[idx] = array([i, j, k]) * array(inputs_shape)
            idx += 1
for i in range(64):
Esempio n. 2
0
if device>=0:
    I.cuda(device)
mesh_bound[1] += 1/1000
dataset = meshgen(mesh_bound, [1001,1001])
dataset = torch.from_numpy(dataset)
dataset = I.interp_coe.data.new(dataset.size()).copy_(dataset)
dataset = Variable(dataset)
mesh_bound[1] -= 1/1000
IFixInputs = LagrangeInterpFixInputs(dataset,m,d,mesh_bound,mesh_size)
IFixInputs.double()
if device>=0:
    IFixInputs.cuda(device)
ax = plt.figure().add_subplot(1,1,1)
ax.imshow(I(dataset).data.cpu().numpy())
#%%
nfi = NumpyFunctionInterface([I.interp_coe,],forward=lambda :forward(I,dataset))
nfi.flat_param = random.randn(nfi.numel())
x,f,d = lbfgsb(nfi.f,nfi.flat_param,nfi.fprime,m=1000,factr=1,pgtol=1e-14,iprint=10)
infe,infe_true = compare(I, dataset)
ax = plt.figure().add_subplot(1,1,1)
ax.imshow(infe)
errs = infe-infe_true
ax = plt.figure().add_subplot(1,1,1)
ax.imshow(errs)
#%%
outputs = IFixInputs()
outputs_true = torch.from_numpy(testfunc(IFixInputs.inputs.cpu().numpy()))
outputs_true = outputs_true.view(outputs.size())
outputs_true = outputs.data.new(outputs_true.size()).copy_(outputs_true)
outputs_true = Variable(outputs_true)
nfi = NumpyFunctionInterface(IFixInputs.parameters(), forward=lambda :forwardFixInputs(IFixInputs, outputs_true))
Esempio n. 3
0
    param_group['w'].data[0, 1] = 1
    param_group['b'].data[0] = 1


def grad_proj(param_group):
    if not param_group['w'].grad is None:
        param_group['w'].grad.data[0, 1] = 0
    if not param_group['b'].grad is None:
        param_group['b'].grad.data[0] = 0


forward = forward_gen()
param_group0 = {'params': {'w': net.weight0, 'b': net.bias0}}
param_group1 = {'params': {'w': net.weight1, 'b': net.bias1}}
param_group2 = {'params': iter([net.weight2, net.bias2])}
nfi = NumpyFunctionInterface([param_group0, param_group1], forward=forward)
a0 = random.randn(1000).astype(dtype=np.float32)


def test():
    a = a0[:nfi.numel()].copy()
    f = nfi.f(a)
    g = nfi.fprime(a)
    print(g)
    print(a - nfi.flat_param)
    print(nfi.f(a) - f)
    print(np.linalg.norm(nfi.fprime(a) - g))
    nfi.fprime(a + 1)
    print(nfi.f(a) - f)
    print(nfi.flat_param - a)
    nfi.flat_param = a
        (linpdelearner(sample['u0'], step) - sample['uT'])**2) / var

    def x_proj(*args, **kw):
        linpdelearner.id.MomentBank.x_proj()
        linpdelearner.fd2d.MomentBank.x_proj()

    def grad_proj(*args, **kw):
        linpdelearner.id.MomentBank.grad_proj()
        linpdelearner.fd2d.MomentBank.grad_proj()

    nfi = NumpyFunctionInterface([
        dict(params=linpdelearner.diff_params(),
             isfrozen=isfrozen,
             x_proj=x_proj,
             grad_proj=grad_proj),
        dict(params=linpdelearner.coe_params(),
             isfrozen=False,
             x_proj=None,
             grad_proj=None)
    ],
                                 forward=forward,
                                 always_refresh=False)
    callback.nfi = nfi
    try:
        # optimize
        xopt, f, d = lbfgsb(nfi.f,
                            nfi.flat_param,
                            nfi.fprime,
                            m=500,
                            callback=callback,
                            factr=1e0,
                            pgtol=1e-16,
Esempio n. 5
0
"""
a interface "forward" for NumpyFunctionInterface is needed
"""


def forward():
    return powell_bs(nfix)


"""
At last, construct your NumpyFunctionInterface of the PyTorch tensor function
"""
listofparameters = [
    nfix,
]
nfi = NumpyFunctionInterface(listofparameters, forward=forward)
"""
Now it's ready to use interfaces given by "nfi": "nfi.flat_param,nfi.f,nfi.fprime". 
What these interfaces do is somethine like
```
class NumpyFunctionInterface:
    @property
    def params(self):
    # notice that nfi = NumpyFunctionInterface(listofparameters,forward)
        for p in listofparameters:
            yield p
    @property
    def flat_param(self):
        views = []
        for p in self.params:
            views.append(p.view(-1))
Esempio n. 6
0
            stableloss = 0
            sparseloss = 0
            momentloss = 0
        if constraint == 'frozen':
            momentloss = 0
        loss = stablize * stableloss + dataloss + stepnum * sparsity * sparseloss + stepnum * momentsparsity * momentloss
        if torch.isnan(loss):
            loss = (torch.ones(1, requires_grad=True) /
                    torch.zeros(1)).to(loss)
        return loss

    nfi = NumpyFunctionInterface([
        dict(params=model.diff_params(),
             isfrozen=isfrozen,
             x_proj=model.diff_x_proj,
             grad_proj=model.diff_grad_proj),
        dict(params=model.expr_params(), isfrozen=False)
    ],
                                 forward=forward,
                                 always_refresh=False)
    callback.nfi = nfi

    def callbackhook(_callback, *args):
        # global model,block,u0_obs,T,stable_loss,data_loss,sparse_loss
        stableloss,dataloss,sparseloss,momentloss = \
                setenv.loss(model, u_obs, globalnames, block, layerweight)
        stableloss,dataloss,sparseloss,momentloss = \
                stableloss.item(),dataloss.item(),sparseloss.item(),momentloss.item()
        with _callback.open() as output:
            print("stableloss: {:.2e}".format(stableloss),
                  "  dataloss: {:.2e}".format(dataloss),
Esempio n. 7
0
    r = x[2] * te1 - x[3] * te2 + x[5] * te3 - y
    return r.dot(r)


def powell_bs(x):
    return (1e4 * x[0] * x[1] - 1)**2 + ((-x[0]).exp() +
                                         (-x[1]).exp() - 1.0001)**2


#%% Penalty
penalty = Penalty(100, 1e-5)
nfi = NumpyFunctionInterface([{
    'params': [
        penalty.x1,
    ]
}, {
    'params': [
        penalty.x2,
    ]
}], penalty.forward)
x0 = torch.cat([penalty.x1.cpu(), penalty.x2.cpu()], 0).data.clone().numpy()
x, f, d = lbfgsb(nfi.f, x0, nfi.fprime, m=100, factr=1, pgtol=1e-14, iprint=10)
out, fx, its, imode, smode = slsqp(nfi.f,
                                   x0,
                                   fprime=nfi.fprime,
                                   acc=1e-16,
                                   iter=15000,
                                   iprint=1,
                                   full_output=True)
#%% Trignometric
trig = Trignometric(100)