示例#1
0
b11 = 0.9
b21 = 0.999
e = 1e-8

mw1 = 0
vw1 = 0
mw_end = 0
vw_end = 0
mb1 = 0
vb1 = 0
mb_end = 0
vb_end = 0

batchsize = 32

net = nn(batchsize, layers, indim, neurons, outdim)
w1, b1, w_end, b_end = net.gen_params_firstlast()

gw1 = grad(bceloss, 0)
gw_end = grad(bceloss, 1)
gb1 = grad(bceloss, 2)
gb_end = grad(bceloss, 3)

lossfunction = np.array([0])

for i in range(1, it + 1):
    gradw1 = gw1(w1, w_end, b1, b_end)
    gradw_end = gw_end(w1, w_end, b1, b_end)
    gradb1 = gb1(w1, w_end, b1, b_end)
    gradb_end = gb_end(w1, w_end, b1, b_end)
示例#2
0
    #    gu = grad(u,x)

    return u


def loss(w1, w2, w_end, b1, b2, b_end):

    loss = 1
    return loss


N = 500
Nu = 2
Nf = N - Nu
lam = 1

layers = 2
indim = 1
outdim = 1
neurons = 50

x, fx, Nu_out = gen_data()

net = nn(N, layers, indim, neurons, outdim)
w1, b1, w_end, b_end = net.gen_params_firstlast()
w2, b2 = net.gen_params_hidden()

gu = grad(gen_u, 0)
g2u = grad(gu)

gx = gu(x)