예제 #1
0
 def check_beta(b):
     y, _, _ = layers.batchnorm(x, gamma, b)
     return layers.l2_loss(y, fake_y)
예제 #2
0
 def check_fn(x):
     return layers.l2_loss(f(x = x, fc_weight = weights), fake_y)
예제 #3
0
 def check_fn(x):
     return layers.l2_loss(layers.relu(x), fake_y)
예제 #4
0
 def check_gamma(g):
     y, _, _ = layers.batchnorm(x, g, beta)
     return layers.l2_loss(y, fake_y)
예제 #5
0
 def check_fn(w):
     return layers.l2_loss(layers.affine(x, w, b), fake_y)
예제 #6
0
 def loss(self, predict, y):
     return layers.l2_loss(predict, y)
예제 #7
0
 def check_fn(x_0):
     return layers.l2_loss(f(x_0=x_0, x_1=x_1), fake_y)
예제 #8
0
파일: rnn.py 프로젝트: sxjscience/minpy
 def loss(self, predict, y):
     return layers.l2_loss(predict, y)
예제 #9
0
 def check_fn(x_0):
     return layers.l2_loss(f(x_0=x_0, x_1=x_1), fake_y)