Example #1
0
def _ReluGradGrad(op, grad):
    x = op.inputs[1]
    return (gen_nn_ops._relu_grad(grad, x),
            array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
Example #2
0
 def _GuidedReluGrad(op, grad):
     return tf.where(0. < grad,
                     gen_nn_ops._relu_grad(grad, op.outputs[0]),
                     tf.zeros_like(grad))
Example #3
0
def _ReluGradGrad(op, grad):
  x = op.inputs[1]
  return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros(
      shape=array_ops.shape(x), dtype=x.dtype))
Example #4
0
def _ReluGrad(op, grad):
    return gen_nn_ops._relu_grad(grad, op.outputs[0])
Example #5
0
def _guided_grad_relu(op, grad):
    return guided_grad(gen_nn_ops._relu_grad(grad, op.outputs[0]))
Example #6
0
def _ReluGrad(op, grad):
  return gen_nn_ops._relu_grad(grad, op.outputs[0])
Example #7
0
def _ReluGrad(op, grad):
    t = _VerifyTensor(op.inputs[0], op.name, "ReluGrad input is not finite.")
    return gen_nn_ops._relu_grad(grad, t)
 def _guided_relu_grad(op, grad):
     return tf.select(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros(grad.get_shape()))
Example #9
0
def _leaky_relu_grad(op, grad):
	x = op.inputs[0]
	a = op.inputs[1]

	return tf.select(x < 0., gen_nn_ops._relu_grad(grad, x * a), gen_nn_ops._relu_grad(grad, x)), tf.zeros(tf.shape(a))
Example #10
0
def _GuidedReluGrad(op, grad):
    return tf.where(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros_like(grad))
Example #11
0
def relu_mask(a, dtype=default_dtype):
  """Produces mask of 1s for positive values and 0s for negative values."""
  from tensorflow.python.ops import gen_nn_ops
  ones = tf.ones(a.get_shape(), dtype=dtype)
  return gen_nn_ops._relu_grad(ones, a)
Example #12
0
 def GuidedReluGrad(op, grad):
     return tf.where(0. < grad,
                     gen_nn_ops._relu_grad(grad, op.outputs[0]),
                     tf.zeros(grad.get_shape()))
Example #13
0
def _ReluGrad(op, grad):
    t = _VerifyTensor(op.inputs[0], op.name, "ReluGrad input is not finite.")
    return gen_nn_ops._relu_grad(grad, t)
Example #14
0
def relu_mask(a, dtype=default_dtype):
    """Produces mask of 1s for positive values and 0s for negative values."""
    from tensorflow.python.ops import gen_nn_ops
    ones = tf.ones(a.get_shape(), dtype=dtype)
    return gen_nn_ops._relu_grad(ones, a)
Example #15
0
def _guided_grad_relu(op, grad):
    return guided_grad(gen_nn_ops._relu_grad(grad, op.outputs[0]))
Example #16
0
def _GuidedReluGrad(op, grad):
    return tf.where(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]),
                    tf.zeros(grad.get_shape()))
Example #17
0
def _GuidedReluGrad(op, grad):
    return tf.select(
        grad > 0.,
        gen_nn_ops._relu_grad(grad, op.outputs[0]),
        tf.zeros(grad.get_shape()))