Example #1
0
def _wave_sparse_softmax_cross_entropy_with_logits_grad_int_cc(op, grad_0, _):
    sparse_softmax_grad_without_gradient = array_ops.prevent_gradient(
        op.outputs[1],
        message="Currently there is no way to take the second "
        "derivative of sparse_softmax_cross_entropy_with_logits due to the fused "
        "implementation's interaction with tf.gradients()")
    grad_0 = array_ops.expand_dims(grad_0, -1)
    return wavecomp_ops_module.wave_mul_int(
        grad_0, sparse_softmax_grad_without_gradient), None
Example #2
0
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
  """Gradient function for SoftmaxCrossEntropyWithLogits."""
  # grad_0 is the backprop for cost, and we multiply it with the gradients
  # (which is output[1])
  # There is no gradient for the labels
  #
  # Currently there is no way to take the second derivative of this op
  # due to the fused implementation's interaction with tf.gradients(),
  # so we make sure we prevent silently incorrect results by raising
  # an error if the second derivative is requested via prevent_gradient.
  softmax_grad_without_gradient = array_ops.prevent_gradient(op.outputs[1])
  return _BroadcastMul(grad_0, softmax_grad_without_gradient), None
Example #3
0
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
    """Gradient function for SoftmaxCrossEntropyWithLogits."""
    # grad_0 is the backprop for cost, and we multiply it with the gradients
    # (which is output[1])
    # There is no gradient for the labels
    #
    # Currently there is no way to take the second derivative of this op
    # due to the fused implementation's interaction with tf.gradients(),
    # so we make sure we prevent silently incorrect results by raising
    # an error if the second derivative is requested via prevent_gradient.
    softmax_grad_without_gradient = array_ops.prevent_gradient(op.outputs[1])
    return _BroadcastMul(grad_0, softmax_grad_without_gradient), None
Example #4
0
def _CTCLossGrad(op, grad_loss, _):
    """The derivative provided by CTC Loss.

  Args:
     op: the CTCLoss op.
     grad_loss: The backprop for cost.

  Returns:
     The CTC Loss gradient.
  """
    # Outputs are: loss, grad
    #
    # Currently there is no way to take the second derivative of this op
    # due to the fused implementation's interaction with tf.gradients(),
    # so we make sure we prevent silently incorrect results by raising
    # an error if the second derivative is requested via prevent_gradient.
    grad_without_gradient = array_ops.prevent_gradient(op.outputs[1])
    # Return gradient for inputs and None for
    # labels_indices, labels_values and sequence_length
    return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None]
Example #5
0
def _CTCLossGrad(op, grad_loss, _):
  """The derivative provided by CTC Loss.

  Args:
     op: the CTCLoss op.
     grad_loss: The backprop for cost.

  Returns:
     The CTC Loss gradient.
  """
  # Outputs are: loss, grad
  #
  # Currently there is no way to take the second derivative of this op
  # due to the fused implementation's interaction with tf.gradients(),
  # so we make sure we prevent silently incorrect results by raising
  # an error if the second derivative is requested via prevent_gradient.
  grad_without_gradient = array_ops.prevent_gradient(op.outputs[1])
  # Return gradient for inputs and None for
  # labels_indices, labels_values and sequence_length
  return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None]
Example #6
0
 def grad(dy):
     return array_ops.prevent_gradient(
         dy, message="Second derivative is not implemented.")
 def testPreventGradient(self):
     with ops.Graph().as_default():
         inp = constant(1.0, shape=[100, 32], name="in")
         out = array_ops.prevent_gradient(inp)
         with self.assertRaisesRegexp(LookupError, "explicitly disabled"):
             _ = gradients.gradients(out, inp)
Example #8
0
 def testPreventGradient(self):
   with ops.Graph().as_default():
     inp = constant(1.0, shape=[100, 32], name="in")
     out = array_ops.prevent_gradient(inp)
     with self.assertRaisesRegexp(LookupError, "explicitly disabled"):
       _ = gradients.gradients(out, inp)
 def _FuzzyCTCLossGrad(op, grad_loss, _):
     grad_without_gradient = array_ops.prevent_gradient(
         op.outputs[1], message="Currently there is no way to take the second "
                                " derivative of ctc_loss due to the fused implementation's interaction "
                                " with tf.gradients()")
     return [_BroadcastMul(tf.expand_dims(grad_loss, -1), grad_without_gradient), None, None, None]
Example #10
0
def prevent_gradient(x, message='', name=None):
    return array_ops.prevent_gradient(x, message=message, name=name)