Esempio n. 1
0
def default_partition_fn(keys, shard_num):
    """The default partition function.
      partition keys by "mod" strategy.

      keys: a tensor presents the keys to be partitioned.
      shard_num: the num of partitions
    Returns:
      a tensor with same shape as keys with type of `tf.int32`,
        represents the corresponding partition-ids of keys.
    """
    keys_op = ops.convert_to_tensor(keys, name="keys")
    gpu_mode = _pywrap_util_port.IsGoogleCudaEnabled()

    with ops.colocate_with(keys_op):
        if keys_op.dtype == dtypes.int64 and gpu_mode:
            # This branch has low performance on some multi-CPU scenario,
            # so we try to use default branch when GPUs are not available.
            mask = constant_op.constant(0x7fffffff, dtypes.int64)
            keys_int32 = math_ops.cast(bitwise_ops.bitwise_and(keys_op, mask),
                                       dtypes.int32)
            mod = math_ops.mod(keys_int32,
                               constant_op.constant(shard_num, dtypes.int32))
            ids = math_ops.cast(mod, dtype=dtypes.int32)
        elif keys_op.dtype == dtypes.string:
            ids = string_ops.string_to_hash_bucket_fast(keys_op, shard_num)
            mask = constant_op.constant(0x7fffffff, dtypes.int64)
            ids = math_ops.cast(bitwise_ops.bitwise_and(ids, mask),
                                dtypes.int32)
        else:
            ids = math_ops.cast(math_ops.mod(keys_op, shard_num),
                                dtype=dtypes.int32)
    return ids
 def testInvertOp(self):
     dtype_list = [
         dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
         dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64
     ]
     inputs = [0, 5, 3, 14]
     with self.test_session(use_gpu=True) as sess:
         for dtype in dtype_list:
             # Because of issues with negative numbers, let's test this indirectly.
             # 1. invert(a) and a = 0
             # 2. invert(a) or a = invert(0)
             input_tensor = constant_op.constant(inputs, dtype=dtype)
             not_a_and_a, not_a_or_a, not_0 = sess.run([
                 bitwise_ops.bitwise_and(input_tensor,
                                         bitwise_ops.invert(input_tensor)),
                 bitwise_ops.bitwise_or(input_tensor,
                                        bitwise_ops.invert(input_tensor)),
                 bitwise_ops.invert(constant_op.constant(0, dtype=dtype))
             ])
             self.assertAllEqual(not_a_and_a, [0, 0, 0, 0])
             self.assertAllEqual(not_a_or_a, [not_0] * 4)
             # For unsigned dtypes let's also check the result directly.
             if dtype.is_unsigned:
                 inverted = sess.run(bitwise_ops.invert(input_tensor))
                 expected = [dtype.max - x for x in inputs]
                 self.assertAllEqual(inverted, expected)
Esempio n. 3
0
  def testShapeInference(self):
    dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
                  dtypes.uint8, dtypes.uint16]

    with self.test_session(use_gpu=True) as sess:
      for dtype in dtype_list:
        lhs = constant_op.constant([[0], [3], [5]], dtype=dtype)
        rhs = constant_op.constant([[1, 2, 4]], dtype=dtype)

        and_tensor = bitwise_ops.bitwise_and(lhs, rhs)
        or_tensor = bitwise_ops.bitwise_or(lhs, rhs)
        xor_tensor = bitwise_ops.bitwise_xor(lhs, rhs)
        ls_tensor = bitwise_ops.left_shift(lhs, rhs)
        rs_tensor = bitwise_ops.right_shift(lhs, rhs)

        and_result, or_result, xor_result, ls_result, rs_result = sess.run(
            [and_tensor, or_tensor, xor_tensor, ls_tensor, rs_tensor])

        # Compare shape inference with result
        self.assertAllEqual(and_tensor.get_shape().as_list(), and_result.shape)
        self.assertAllEqual(and_tensor.get_shape().as_list(), [3, 3])
        self.assertAllEqual(or_tensor.get_shape().as_list(), or_result.shape)
        self.assertAllEqual(or_tensor.get_shape().as_list(), [3, 3])
        self.assertAllEqual(xor_tensor.get_shape().as_list(), xor_result.shape)
        self.assertAllEqual(xor_tensor.get_shape().as_list(), [3, 3])
        self.assertAllEqual(ls_tensor.get_shape().as_list(), ls_result.shape)
        self.assertAllEqual(ls_tensor.get_shape().as_list(), [3, 3])
        self.assertAllEqual(rs_tensor.get_shape().as_list(), rs_result.shape)
        self.assertAllEqual(rs_tensor.get_shape().as_list(), [3, 3])
Esempio n. 4
0
  def testShapeInference(self):
    dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
                  dtypes.uint8, dtypes.uint16]

    with self.session(use_gpu=True) as sess:
      for dtype in dtype_list:
        lhs = constant_op.constant([[0], [3], [5]], dtype=dtype)
        rhs = constant_op.constant([[1, 2, 4]], dtype=dtype)

        and_tensor = bitwise_ops.bitwise_and(lhs, rhs)
        or_tensor = bitwise_ops.bitwise_or(lhs, rhs)
        xor_tensor = bitwise_ops.bitwise_xor(lhs, rhs)
        ls_tensor = bitwise_ops.left_shift(lhs, rhs)
        rs_tensor = bitwise_ops.right_shift(lhs, rhs)

        and_result, or_result, xor_result, ls_result, rs_result = sess.run(
            [and_tensor, or_tensor, xor_tensor, ls_tensor, rs_tensor])

        # Compare shape inference with result
        self.assertAllEqual(and_tensor.get_shape().as_list(), and_result.shape)
        self.assertAllEqual(and_tensor.get_shape().as_list(), [3, 3])
        self.assertAllEqual(or_tensor.get_shape().as_list(), or_result.shape)
        self.assertAllEqual(or_tensor.get_shape().as_list(), [3, 3])
        self.assertAllEqual(xor_tensor.get_shape().as_list(), xor_result.shape)
        self.assertAllEqual(xor_tensor.get_shape().as_list(), [3, 3])
        self.assertAllEqual(ls_tensor.get_shape().as_list(), ls_result.shape)
        self.assertAllEqual(ls_tensor.get_shape().as_list(), [3, 3])
        self.assertAllEqual(rs_tensor.get_shape().as_list(), rs_result.shape)
        self.assertAllEqual(rs_tensor.get_shape().as_list(), [3, 3])
 def _benchmark_bitwise_and(self, mat, device):
     if device == GPU and not context.num_gpus():
         return
     with context.device(device):
         if device == GPU:
             mat = mat.gpu()
         func = lambda: bitwise_ops.bitwise_and(mat, mat)
         self._run(func, num_iters=5000)
Esempio n. 6
0
  def testBinaryOps(self):
    dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
                  dtypes.uint8, dtypes.uint16]

    with self.test_session(use_gpu=True) as sess:
      for dtype in dtype_list:
        lhs = constant_op.constant([0, 5, 3, 14], dtype=dtype)
        rhs = constant_op.constant([5, 0, 7, 11], dtype=dtype)
        and_result, or_result, xor_result = sess.run(
            [bitwise_ops.bitwise_and(lhs, rhs),
             bitwise_ops.bitwise_or(lhs, rhs),
             bitwise_ops.bitwise_xor(lhs, rhs)])
        self.assertAllEqual(and_result, [0, 0, 3, 10])
        self.assertAllEqual(or_result, [5, 5, 7, 15])
        self.assertAllEqual(xor_result, [5, 5, 4, 5])
Esempio n. 7
0
  def testBinaryOps(self):
    dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
                  dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]

    with self.session(use_gpu=True) as sess:
      for dtype in dtype_list:
        lhs = constant_op.constant([0, 5, 3, 14], dtype=dtype)
        rhs = constant_op.constant([5, 0, 7, 11], dtype=dtype)
        and_result, or_result, xor_result = sess.run(
            [bitwise_ops.bitwise_and(lhs, rhs),
             bitwise_ops.bitwise_or(lhs, rhs),
             bitwise_ops.bitwise_xor(lhs, rhs)])
        self.assertAllEqual(and_result, [0, 0, 3, 10])
        self.assertAllEqual(or_result, [5, 5, 7, 15])
        self.assertAllEqual(xor_result, [5, 5, 4, 5])
def default_partition_fn(keys, shard_num):
  """The default partition function.
      partition keys by "mod" strategy.

      keys: a tensor presents the keys to be partitioned.
      shard_num: the num of partitions
    Returns:
      a tensor with same shape as keys with type of `tf.int32`,
        represents the corresponding partition-ids of keys.
    """
  keys_op = ops.convert_to_tensor(keys, name="keys")
  with ops.colocate_with(keys_op):
    if keys_op.dtype == dtypes.int64:
      mask = constant_op.constant(0x7FFFFFFF, dtypes.int64)
      keys_int32 = math_ops.cast(bitwise_ops.bitwise_and(keys_op, mask),
                                 dtypes.int32)
      mod = math_ops.mod(keys_int32,
                         constant_op.constant(shard_num, dtypes.int32))
      ids = math_ops.cast(mod, dtype=dtypes.int32)
    else:
      ids = math_ops.cast(math_ops.mod(keys_op, shard_num), dtype=dtypes.int32)
  return ids
Esempio n. 9
0
 def testInvertOp(self):
   dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
                 dtypes.uint8, dtypes.uint16]
   inputs = [0, 5, 3, 14]
   with self.test_session(use_gpu=True) as sess:
     for dtype in dtype_list:
       # Because of issues with negative numbers, let's test this indirectly.
       # 1. invert(a) and a = 0
       # 2. invert(a) or a = invert(0)
       input_tensor = constant_op.constant(inputs, dtype=dtype)
       not_a_and_a, not_a_or_a, not_0 = sess.run(
           [bitwise_ops.bitwise_and(
               input_tensor, bitwise_ops.invert(input_tensor)),
            bitwise_ops.bitwise_or(
                input_tensor, bitwise_ops.invert(input_tensor)),
            bitwise_ops.invert(constant_op.constant(0, dtype=dtype))])
       self.assertAllEqual(not_a_and_a, [0, 0, 0, 0])
       self.assertAllEqual(not_a_or_a, [not_0] * 4)
       # For unsigned dtypes let's also check the result directly.
       if dtype.is_unsigned:
         inverted = sess.run(bitwise_ops.invert(input_tensor))
         expected = [dtype.max - x for x in inputs]
         self.assertAllEqual(inverted, expected)