def _TanhGrad(op, grad): """Returns grad * (1 - tanh(x) * tanh(x)).""" y = op.outputs[0] # y = tanh(x) with ops.control_dependencies([grad.op]): if y.dtype.is_complex: y = math_ops.conj(y) return gen_math_ops._tanh_grad(y, grad)
def _TanhGrad(op, grad): """Returns grad * (1 - tanh(x) * tanh(x)).""" y = op.outputs[0] # y = tanh(x) with ops.control_dependencies([grad.op]): y = math_ops.conj(y) # pylint: disable=protected-access return gen_math_ops._tanh_grad(y, grad)
def _TanhGrad(op, grad): """Returns grad * (1 - tanh(x) * tanh(x)).""" y = op.outputs[0] # y = tanh(x) with ops.control_dependencies([grad]): y = math_ops.conj(y) # pylint: disable=protected-access return gen_math_ops._tanh_grad(y, grad)
def atanh_grad(op, grad): x = op.inputs[0] y = op.outputs[0] org_grad = gen_math_ops._tanh_grad(y, grad) # return [gen_math_ops._abs(gen_math_ops.sign(y) - gen_math_ops.sign(grad)) / 2 * org_grad + # gen_math_ops._abs(gen_math_ops.sign(y) + gen_math_ops.sign(grad)) / 2 * grad] return [gen_math_ops._abs(gen_math_ops.sign(y) - gen_math_ops.sign(grad)) * org_grad + gen_math_ops._abs(gen_math_ops.sign(y) + gen_math_ops.sign(grad)) / 2 * grad]
def _TanhGradGrad(op, grad): with ops.control_dependencies([grad.op]): a = math_ops.conj(op.inputs[0]) b = math_ops.conj(op.inputs[1]) # pylint: disable=protected-access return grad * -2.0 * b * a, gen_math_ops._tanh_grad(a, grad)
def _TanhGradGrad(op, grad): with ops.control_dependencies([grad]): a = math_ops.conj(op.inputs[0]) b = math_ops.conj(op.inputs[1]) # pylint: disable=protected-access return grad * -2.0 * b * a, gen_math_ops._tanh_grad(a, grad)
def _penalized_tanh_grad(op, grad): x = op.inputs[0] a = op.inputs[1] return tf.select(x < 0., gen_math_ops._tanh_grad(grad, x * a), gen_math_ops._tanh_grad(grad, x)), tf.zeros(tf.shape(a))