def _ReluGradGrad(op, grad): x = op.inputs[1] return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _GuidedReluGrad(op, grad): return tf.where(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros_like(grad))
def _ReluGradGrad(op, grad): x = op.inputs[1] return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros( shape=array_ops.shape(x), dtype=x.dtype))
def _ReluGrad(op, grad): return gen_nn_ops._relu_grad(grad, op.outputs[0])
def _guided_grad_relu(op, grad): return guided_grad(gen_nn_ops._relu_grad(grad, op.outputs[0]))
def _ReluGrad(op, grad): t = _VerifyTensor(op.inputs[0], op.name, "ReluGrad input is not finite.") return gen_nn_ops._relu_grad(grad, t)
def _guided_relu_grad(op, grad): return tf.select(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros(grad.get_shape()))
def _leaky_relu_grad(op, grad): x = op.inputs[0] a = op.inputs[1] return tf.select(x < 0., gen_nn_ops._relu_grad(grad, x * a), gen_nn_ops._relu_grad(grad, x)), tf.zeros(tf.shape(a))
def relu_mask(a, dtype=default_dtype): """Produces mask of 1s for positive values and 0s for negative values.""" from tensorflow.python.ops import gen_nn_ops ones = tf.ones(a.get_shape(), dtype=dtype) return gen_nn_ops._relu_grad(ones, a)
def GuidedReluGrad(op, grad): return tf.where(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros(grad.get_shape()))
def _GuidedReluGrad(op, grad): return tf.where(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros(grad.get_shape()))
def _GuidedReluGrad(op, grad): return tf.select( grad > 0., gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros(grad.get_shape()))