Exemplo n.º 1
0
def _RealDivGrad(op, grad):
  """RealDiv op gradient."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(
      math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),
          array_ops.reshape(
              math_ops.reduce_sum(
                  grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry), sy))
Exemplo n.º 2
0
def _RealDivGrad(op, grad):
  """RealDiv op gradient."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(
      math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),
          array_ops.reshape(
              math_ops.reduce_sum(
                  grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry), sy))
Exemplo n.º 3
0
def _RealDivGrad(op, grad):
  """RealDiv op gradient."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(math_ops.reduce_sum(
      math_ops.realdiv(grad, y), rx), sx),
          array_ops.reshape(math_ops.reduce_sum(
              grad * math_ops.realdiv(-x, math_ops.square(y)), ry), sy))
Exemplo n.º 4
0
def _RealDivGrad(op, grad):
    """RealDiv op gradient."""
    x = op.inputs[0]
    y = op.inputs[1]
    sx = array_ops.shape(x)
    sy = array_ops.shape(y)
    # pylint: disable=protected-access
    rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
    # pylint: enable=protected-access
    x = math_ops.conj(x)
    y = math_ops.conj(y)
    return (array_ops.reshape(
        math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),
            array_ops.reshape(
                math_ops.reduce_sum(
                    grad * math_ops.realdiv(-x, math_ops.square(y)), ry), sy))
Exemplo n.º 5
0
    def _get_aggregated_dense_grad(self, graph_item, grad_name,
                                   reduce_to_device):
        grad_op_name = strip_replica_prefix(get_op_name(grad_name))
        output_idx = get_index_from_tensor_name(grad_name)
        grad_ops = [
            graph_item.graph.get_operation_by_name(
                ops.prepend_name_scope(grad_op_name, replica_prefix(i)))
            for i in range(self.num_replicas)
        ]

        # Aggregate gradients on `reduce_to_device` (usually CPU)
        with ops.device(reduce_to_device):
            grad_sum_op_name = ops.prepend_name_scope(
                grad_op_name, u"%sAdd" % AUTODIST_PREFIX)
            grad_sum = math_ops.add_n(
                [grad_op.outputs[output_idx] for grad_op in grad_ops],
                name=grad_sum_op_name)
            grad_avg_op_name = ops.prepend_name_scope(
                grad_op_name, u"%sDiv" % AUTODIST_PREFIX)
            grad_avg = math_ops.realdiv(grad_sum,
                                        self.num_replicas,
                                        name=grad_avg_op_name)
        return grad_avg
Exemplo n.º 6
0
 def testRealDiv(self):
   nums, divs = self.floatTestData()
   with self.test_session():
     tf_result = math_ops.realdiv(nums, divs).eval()
     np_result = np.divide(nums, divs)
     self.assertAllEqual(tf_result, np_result)
Exemplo n.º 7
0
 def testRealDiv(self):
     nums, divs = self.floatTestData()
     with self.cached_session():
         tf_result = math_ops.realdiv(nums, divs).eval()
         np_result = np.divide(nums, divs)
         self.assertAllEqual(tf_result, np_result)
Exemplo n.º 8
0
 def testRealDiv(self):
     nums, divs = self.floatTestData()
     tf_result = math_ops.realdiv(nums, divs)
     np_result = np.divide(nums, divs)
     self.assertAllClose(tf_result, np_result)