示例#1
0
def _TanhGrad(op, grad):
    """Returns grad * (1 - tanh(x) * tanh(x))."""
    y = op.outputs[0]  # y = tanh(x)
    with ops.control_dependencies([grad.op]):
        if y.dtype.is_complex:
            y = math_ops.conj(y)
        return gen_math_ops._tanh_grad(y, grad)
示例#2
0
def _TanhGrad(op, grad):
  """Returns grad * (1 - tanh(x) * tanh(x))."""
  y = op.outputs[0]  # y = tanh(x)
  with ops.control_dependencies([grad.op]):
    y = math_ops.conj(y)
    # pylint: disable=protected-access
    return gen_math_ops._tanh_grad(y, grad)
示例#3
0
def _TanhGrad(op, grad):
    """Returns grad * (1 - tanh(x) * tanh(x))."""
    y = op.outputs[0]  # y = tanh(x)
    with ops.control_dependencies([grad]):
        y = math_ops.conj(y)
        # pylint: disable=protected-access
        return gen_math_ops._tanh_grad(y, grad)
示例#4
0
def _TanhGrad(op, grad):
  """Returns grad * (1 - tanh(x) * tanh(x))."""
  y = op.outputs[0]  # y = tanh(x)
  with ops.control_dependencies([grad.op]):
    if y.dtype.is_complex:
      y = math_ops.conj(y)
    return gen_math_ops._tanh_grad(y, grad)
示例#5
0
文件: ops.py 项目: hobotrl/hobotrl
def atanh_grad(op, grad):
    x = op.inputs[0]
    y = op.outputs[0]
    org_grad = gen_math_ops._tanh_grad(y, grad)
    # return [gen_math_ops._abs(gen_math_ops.sign(y) - gen_math_ops.sign(grad)) / 2 * org_grad +
    #         gen_math_ops._abs(gen_math_ops.sign(y) + gen_math_ops.sign(grad)) / 2 * grad]
    return [gen_math_ops._abs(gen_math_ops.sign(y) - gen_math_ops.sign(grad)) * org_grad +
            gen_math_ops._abs(gen_math_ops.sign(y) + gen_math_ops.sign(grad)) / 2 * grad]
示例#6
0
def _TanhGradGrad(op, grad):
  with ops.control_dependencies([grad.op]):
    a = math_ops.conj(op.inputs[0])
    b = math_ops.conj(op.inputs[1])
    # pylint: disable=protected-access
    return grad * -2.0 * b * a, gen_math_ops._tanh_grad(a, grad)
示例#7
0
def _TanhGradGrad(op, grad):
    with ops.control_dependencies([grad]):
        a = math_ops.conj(op.inputs[0])
        b = math_ops.conj(op.inputs[1])
        # pylint: disable=protected-access
        return grad * -2.0 * b * a, gen_math_ops._tanh_grad(a, grad)
示例#8
0
def _penalized_tanh_grad(op, grad):
	x = op.inputs[0]
	a = op.inputs[1]

	return tf.select(x < 0., gen_math_ops._tanh_grad(grad, x * a), gen_math_ops._tanh_grad(grad, x)), tf.zeros(tf.shape(a))