Example #1
0
 def testExecuteMultipleNonListOutput(self):
   x = tensor.Tensor([1, 2, 3, 4, 5, 6])
   y = tensor.Tensor([1, 3, 5])
   result = array_ops.listdiff(x, y)
   out, idx = result
   self.assertTrue(out is result.out)
   self.assertTrue(idx is result.idx)
   self.assertAllEqual([2, 4, 6], out.numpy())
   self.assertAllEqual([1, 3, 5], idx.numpy())
Example #2
0
 def testExecuteMultipleNonListOutput(self):
     x = constant_op.constant([1, 2, 3, 4, 5, 6])
     y = constant_op.constant([1, 3, 5])
     result = array_ops.listdiff(x, y)
     out, idx = result
     self.assertTrue(out is result.out)
     self.assertTrue(idx is result.idx)
     self.assertAllEqual([2, 4, 6], out)
     self.assertAllEqual([1, 3, 5], idx)
Example #3
0
 def testExecuteMultipleNonListOutput(self):
   x = constant_op.constant([1, 2, 3, 4, 5, 6])
   y = constant_op.constant([1, 3, 5])
   result = array_ops.listdiff(x, y)
   out, idx = result
   self.assertTrue(out is result.out)
   self.assertTrue(idx is result.idx)
   self.assertAllEqual([2, 4, 6], out)
   self.assertAllEqual([1, 3, 5], idx)
Example #4
0
 def testExecuteMultipleNonListOutput(self):
     x = tensor.Tensor([1, 2, 3, 4, 5, 6])
     y = tensor.Tensor([1, 3, 5])
     result = array_ops.listdiff(x, y)
     out, idx = result
     self.assertTrue(out is result.out)
     self.assertTrue(idx is result.idx)
     self.assertAllEqual([2, 4, 6], out.numpy())
     self.assertAllEqual([1, 3, 5], idx.numpy())
Example #5
0
 def _testListDiff(self, x, y, out, idx):
   for dtype in [dtypes.int32, dtypes.int64]:
     for index_dtype in [dtypes.int32, dtypes.int64]:
       with self.cached_session():
         x_tensor = ops.convert_to_tensor(x, dtype=dtype)
         y_tensor = ops.convert_to_tensor(y, dtype=dtype)
         with self.test_scope():
           out_tensor, idx_tensor = array_ops.listdiff(
               x_tensor, y_tensor, out_idx=index_dtype)
           tf_out, tf_idx = self.evaluate([out_tensor, idx_tensor])
       self.assertAllEqual(out, tf_out)
       self.assertAllEqual(idx, tf_idx)
       self.assertEqual(1, out_tensor.get_shape().ndims)
       self.assertEqual(1, idx_tensor.get_shape().ndims)
def _ProdGrad(op, grad):
    """Gradient for Prod."""
    # The gradient can be expressed by dividing the product by each entry of the
    # input tensor, but this approach can't deal with zeros in the input.
    # Here, we avoid this problem by composing the output as a product of two
    # cumprod operations.

    input_shape = array_ops.shape(op.inputs[0])
    # Reshape reduction indices for the case where the parameter is a scalar
    reduction_indices = array_ops.reshape(op.inputs[1], [-1])

    # Expand grad to full input shape
    output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
    tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
    grad = array_ops.reshape(grad, output_shape_kept_dims)
    grad = array_ops.tile(grad, tile_scaling)

    # Pack all reduced dimensions into a single one, so we can perform the
    # cumprod ops. If the reduction dims list is empty, it defaults to float32,
    # so we need to cast here.  We put all the shape-related ops on CPU to avoid
    # copying back and forth, and since listdiff is CPU only.
    with ops.device("/cpu:0"):
        reduced = math_ops.cast(reduction_indices, dtypes.int32)
        idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
        other, _ = array_ops.listdiff(idx, reduced)
        perm = array_ops.concat(0, [reduced, other])
        reduced_num = math_ops.reduce_prod(
            array_ops.gather(input_shape, reduced))
        other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
    permuted = array_ops.transpose(op.inputs[0], perm)
    permuted_shape = array_ops.shape(permuted)
    reshaped = array_ops.reshape(permuted, (reduced_num, other_num))

    # Calculate product, leaving out the current entry
    left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
    right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
    y = array_ops.reshape(left * right, permuted_shape)

    # Invert the transpose and reshape operations.
    # Make sure to set the statically known shape information through a reshape.
    out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
    return array_ops.reshape(out, input_shape), None
Example #7
0
def _ProdGrad(op, grad):
  """Gradient for Prod."""
  # The gradient can be expressed by dividing the product by each entry of the
  # input tensor, but this approach can't deal with zeros in the input.
  # Here, we avoid this problem by composing the output as a product of two
  # cumprod operations.

  input_shape = array_ops.shape(op.inputs[0])
  # Reshape reduction indices for the case where the parameter is a scalar
  reduction_indices = array_ops.reshape(op.inputs[1], [-1])

  # Expand grad to full input shape
  output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
  tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
  grad = array_ops.reshape(grad, output_shape_kept_dims)
  grad = array_ops.tile(grad, tile_scaling)

  # Pack all reduced dimensions into a single one, so we can perform the
  # cumprod ops. If the reduction dims list is empty, it defaults to float32,
  # so we need to cast here.  We put all the shape-related ops on CPU to avoid
  # copying back and forth, and since listdiff is CPU only.
  with ops.device("/cpu:0"):
    reduced = math_ops.cast(reduction_indices, dtypes.int32)
    idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
    other, _ = array_ops.listdiff(idx, reduced)
    perm = array_ops.concat(0, [reduced, other])
    reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
    other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
  permuted = array_ops.transpose(op.inputs[0], perm)
  permuted_shape = array_ops.shape(permuted)
  reshaped = array_ops.reshape(permuted, (reduced_num, other_num))

  # Calculate product, leaving out the current entry
  left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
  right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
  y = array_ops.reshape(left * right, permuted_shape)

  # Invert the transpose and reshape operations.
  # Make sure to set the statically known shape information through a reshape.
  out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
  return array_ops.reshape(out, input_shape), None