Exemple #1
0
def default_partition_fn(keys, shard_num):
    """The default partition function.
    partition keys by "mod" strategy.

    keys: a tensor presents the keys to be partitioned.
    shard_num: the num of partitions
  Returns:
    a tensor with same shape as keys with type of `tf.int32`,
      represents the corresponding partition-ids of keys.
  """
    keys_op = ops.convert_to_tensor(keys, name="keys")
    gpu_mode = pywrap_tensorflow.IsGoogleCudaEnabled()

    with ops.colocate_with(keys_op):
        if keys_op.dtype == dtypes.int64 and gpu_mode:
            # This branch has low performance on some multi-CPU scenario,
            # so we try to use default branch when GPUs are not available.
            mask = constant_op.constant(0x7fffffff, dtypes.int64)
            keys_int32 = math_ops.cast(bitwise_ops.bitwise_and(keys_op, mask),
                                       dtypes.int32)
            mod = math_ops.mod(keys_int32,
                               constant_op.constant(shard_num, dtypes.int32))
            ids = math_ops.cast(mod, dtype=dtypes.int32)
        elif keys_op.dtype == dtypes.string:
            ids = string_ops.string_to_hash_bucket_fast(keys_op, shard_num)
            mask = constant_op.constant(0x7fffffff, dtypes.int64)
            ids = math_ops.cast(bitwise_ops.bitwise_and(ids, mask),
                                dtypes.int32)
        else:
            ids = math_ops.cast(math_ops.mod(keys_op, shard_num),
                                dtype=dtypes.int32)
    return ids
Exemple #2
0
def IsGoogleCudaEnabled():
    return pywrap_tensorflow.IsGoogleCudaEnabled()
Exemple #3
0
def _get_default_devices():
    # use GPU if available
    return ["/GPU:0"] if pywrap_tensorflow.IsGoogleCudaEnabled() else [
        "/CPU:0",
    ]