Esempio n. 1
0
 def l2(weights, name=None):
   """Applies l2 regularization to weights."""
   with ops.op_scope([weights], name, 'l2_regularizer') as scope:
     my_scale = ops.convert_to_tensor(scale,
                                      dtype=weights.dtype.base_dtype,
                                      name='scale')
     return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)
Esempio n. 2
0
 def l2(weights):
   """Applies l2 regularization to weights."""
   with ops.name_scope(scope, 'l2_regularizer', [weights]) as name:
     my_scale = ops.convert_to_tensor(scale,
                                      dtype=weights.dtype.base_dtype,
                                      name='scale')
     return standard_ops.mul(my_scale, nn.l2_loss(weights), name=name)
 def loop_fn(i):
   with g:
     x1 = array_ops.gather(x, i)
     outputs = nn.fused_batch_norm(
         x1,
         scale,
         offset,
         mean=mean,
         variance=variance,
         epsilon=0.01,
         data_format=data_format,
         is_training=is_training)
     outputs = list(outputs)
     # We only test the first value of outputs when is_training is False.
     # It looks like CPU and GPU have different outputs for batch_mean
     # and batch_variance for this case.
     if not is_training:
       outputs[1] = constant_op.constant(0.)
       outputs[2] = constant_op.constant(0.)
     loss = nn.l2_loss(outputs[0])
   if is_training:
     gradients = g.gradient(loss, [x1, scale, offset])
   else:
     gradients = [constant_op.constant(0.)] * 3
   return outputs + gradients
 def loop_fn(i):
   with g:
     x1 = array_ops.gather(x, i)
     output = nn.avg_pool(
         x1, ksize, strides=[1, 2, 2, 1], padding="VALID",
         data_format="NHWC")
     loss = nn.l2_loss(output)
   return output, g.gradient(loss, x1)
 def loop_fn(i):
   with g:
     x1 = array_ops.gather(x, i)
     output = nn.max_pool3d(
         x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
     loss = nn.l2_loss(output)
     ones = array_ops.ones_like(output)
     g.watch(ones)
     grad = g.gradient(loss, x1, output_gradients=ones)
   grad_grad = g.gradient(grad, ones)
   return output, grad, grad_grad
Esempio n. 6
0
 def testGradient(self):
   x_shape = [20, 7, 3]
   np.random.seed(1)  # Make it reproducible.
   x_val = np.random.random_sample(x_shape).astype(np.float64)
   with self.test_session():
     x = constant_op.constant(x_val, name="x")
     output = nn.l2_loss(x)
     err = gc.ComputeGradientError(x, x_shape, output, [1])
   print "L2Loss gradient err = %g " % err
   err_tolerance = 1e-11
   self.assertLess(err, err_tolerance)
Esempio n. 7
0
 def model_fn(inps, init_state):
   state = init_state
   for inp in inps:
     _, state = cell(inp, state)
   output = nn.l2_loss(state.c)
   return gradient_ops.gradients(output, variables.trainable_variables())
Esempio n. 8
0
 def model_fn(activation):
   for layer in layers:
     activation = layer(activation)
   activation = projection(activation)
   activation = nn.l2_loss(activation)
   return gradient_ops.gradients(activation, variables.trainable_variables())
Esempio n. 9
0
 def loop_fn(i):
   with g:
     x_i = array_ops.gather(x, i)
     y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
     loss = nn.l2_loss(y)
   return y, g.gradient(loss, x_i)
Esempio n. 10
0
 def loop_fn(i):
   with g:
     x1 = array_ops.gather(x, i)
     y = op(x1) + x1
     loss = nn.l2_loss(y)
   return op(x), y, g.gradient(loss, x1)
Esempio n. 11
0
 def testL2Loss(self):
   with self.test_session():
     x = constant_op.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x")
     l2loss = nn.l2_loss(x)
     value = l2loss.eval()
   self.assertAllClose(7.0, value)
Esempio n. 12
0
 def model_fn(inps, init_state):
     state = init_state
     for inp in inps:
         _, state = cell(inp, state)
     output = nn.l2_loss(state.c)
     return gradient_ops.gradients(output, variables.trainable_variables())
Esempio n. 13
0
 def loop_fn(i):
     with g:
         x_i = array_ops.gather(x, i)
         y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
         loss = nn.l2_loss(y)
     return y, g.gradient(loss, x_i)
Esempio n. 14
0
 def loop_fn(i):
     with g:
         x1 = array_ops.gather(x, i)
         y = op(x1) + x1
         loss = nn.l2_loss(y)
     return op(x), y, g.gradient(loss, x1)
 def loop_fn(i):
   x1 = array_ops.gather(x, i)
   output = nn.max_pool(
       x1, ksize, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
   loss = nn.l2_loss(output)
   return output, gradient_ops.gradients(loss, x1)