Exemple #1
0
def xla_launch_eager_fallback(constants, args, resources, Tresults, function, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function xla_launch
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(resources, (list, tuple)):
    raise TypeError(
        "Expected list for 'resources' argument to "
        "'xla_launch' Op, not %r." % resources)
  _attr_Nresources = len(resources)
  if not isinstance(Tresults, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tresults' argument to "
        "'xla_launch' Op, not %r." % Tresults)
  Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults]
  _attr_Tconstants, constants = _execute.convert_to_mixed_eager_tensors(constants, _ctx)
  _attr_Targs, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
  resources = _ops.convert_n_to_tensor(resources, _dtypes.resource)
  _inputs_flat = list(constants) + list(args) + list(resources)
  _attrs = ("Tconstants", _attr_Tconstants, "Targs", _attr_Targs,
  "Nresources", _attr_Nresources, "Tresults", Tresults, "function", function)
  _result = _execute.execute(b"XlaLaunch", len(Tresults), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "XlaLaunch", _inputs_flat, _attrs, _result, name)
  return _result
Exemple #2
0
def xla_launch(constants, args, resources, Tresults, function, name=None):
  r"""XLA Launch Op. For use by the XLA JIT only.

  Args:
    constants: A list of `Tensor` objects.
    args: A list of `Tensor` objects.
    resources: A list of `Tensor` objects with type `resource`.
    Tresults: A list of `tf.DTypes`.
    function: A function decorated with @Defun.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Tresults`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if not isinstance(resources, (list, tuple)):
      raise TypeError(
          "Expected list for 'resources' argument to "
          "'xla_launch' Op, not %r." % resources)
    _attr_Nresources = len(resources)
    if not isinstance(Tresults, (list, tuple)):
      raise TypeError(
          "Expected list for 'Tresults' argument to "
          "'xla_launch' Op, not %r." % Tresults)
    Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults]
    _, _, _op = _op_def_lib._apply_op_helper(
        "XlaLaunch", constants=constants, args=args, resources=resources,
        Tresults=Tresults, function=function, name=name)
    _result = _op.outputs[:]
    if not _result:
      return _op
    _inputs_flat = _op.inputs
    _attrs = ("Tconstants", _op.get_attr("Tconstants"), "Targs",
              _op.get_attr("Targs"), "Nresources", _op.get_attr("Nresources"),
              "Tresults", _op.get_attr("Tresults"), "function",
              _op.get_attr("function"))
    _execute.record_gradient(
      "XlaLaunch", _inputs_flat, _attrs, _result, name)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name, "XlaLaunch",
        name, _ctx._post_execution_callbacks, constants, args, resources,
        "Tresults", Tresults, "function", function)
      return _result
    except _core._FallbackException:
      return xla_launch_eager_fallback(
          constants, args, resources, Tresults=Tresults, function=function,
          name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_feature_cross_v2_eager_fallback(indices, values, shapes, dense, hashed_output, num_buckets, hash_key, out_type, internal_type, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function sparse_feature_cross_v2
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'indices' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % indices)
  _attr_N = len(indices)
  if not isinstance(shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'shapes' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % shapes)
  if len(shapes) != _attr_N:
    raise ValueError(
        "List argument 'shapes' to 'sparse_feature_cross_v2' Op with length %d "
        "must match length %d of argument 'indices'." %
        (len(shapes), _attr_N))
  hashed_output = _execute.make_bool(hashed_output, "hashed_output")
  num_buckets = _execute.make_int(num_buckets, "num_buckets")
  hash_key = _execute.make_int(hash_key, "hash_key")
  out_type = _execute.make_type(out_type, "out_type")
  internal_type = _execute.make_type(internal_type, "internal_type")
  _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
  _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(dense, _ctx)
  indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
  shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
  _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense)
  _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
  num_buckets, "hash_key", hash_key, "sparse_types", _attr_sparse_types,
  "dense_types", _attr_dense_types, "out_type", out_type, "internal_type",
  internal_type)
  _result = _execute.execute(b"SparseFeatureCrossV2", 3, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "SparseFeatureCrossV2", _inputs_flat, _attrs, _result, name)
  _result = _SparseFeatureCrossV2Output._make(_result)
  return _result
Exemple #4
0
def zero_var_initializer_eager_fallback(var, dtype, shape, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function zero_var_initializer
  """
  _ctx = ctx if ctx else _context.context()
  dtype = _execute.make_type(dtype, "dtype")
  shape = _execute.make_shape(shape, "shape")
  var = _ops.convert_to_tensor(var, _dtypes.resource)
  _inputs_flat = [var]
  _attrs = ("dtype", dtype, "shape", shape)
  _result = _execute.execute(b"ZeroVarInitializer", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "ZeroVarInitializer", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Exemple #5
0
def zero_var_initializer(var, dtype, shape, name=None):
  r"""Initialize 'var' with all zeros. This op requires that the resource var is not

  initialized. The var will first be allocated memory, then be filled with all
  zeros. This op is intended to save memory during initialization,
  if you use this op, you should not run initializer of the var.

  Args:
    var: A `Tensor` of type `resource`. Should be a ResourceVariable.
    dtype: A `tf.DType`.
    shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    Same as "var".
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    dtype = _execute.make_type(dtype, "dtype")
    shape = _execute.make_shape(shape, "shape")
    _, _, _op = _op_def_lib._apply_op_helper(
        "ZeroVarInitializer", var=var, dtype=dtype, shape=shape, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape"))
    _execute.record_gradient(
      "ZeroVarInitializer", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "ZeroVarInitializer", name, _ctx._post_execution_callbacks, var,
        "dtype", dtype, "shape", shape)
      return _result
    except _core._FallbackException:
      return zero_var_initializer_eager_fallback(
          var, dtype=dtype, shape=shape, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #6
0
def tensor_list_stack_eager_fallback(input_handle, element_dtype, num_elements=-1, name=None):
  r"""This is the slowpath function for Eager mode.
  This is for function tensor_list_stack
  """
  _ctx = _context.context()
  element_dtype = _execute.make_type(element_dtype, "element_dtype")
  if num_elements is None:
    num_elements = -1
  num_elements = _execute.make_int(num_elements, "num_elements")
  input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
  _inputs_flat = [input_handle]
  _attrs = ("element_dtype", element_dtype, "num_elements", num_elements)
  _result = _execute.execute(b"TensorListStack", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "TensorListStack", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def truncated_normal_eager_fallback(shape, dtype, seed, seed2, name, ctx):
  dtype = _execute.make_type(dtype, "dtype")
  if seed is None:
    seed = 0
  seed = _execute.make_int(seed, "seed")
  if seed2 is None:
    seed2 = 0
  seed2 = _execute.make_int(seed2, "seed2")
  _attr_T, (shape,) = _execute.args_to_matching_eager([shape], ctx)
  _inputs_flat = [shape]
  _attrs = ("seed", seed, "seed2", seed2, "dtype", dtype, "T", _attr_T)
  _result = _execute.execute(b"TruncatedNormal", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "TruncatedNormal", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
Exemple #8
0
def tensor_map_lookup_eager_fallback(input_handle, key, value_dtype, name,
                                     ctx):
    value_dtype = _execute.make_type(value_dtype, "value_dtype")
    _attr_key_dtype, (key, ) = _execute.args_to_matching_eager([key], ctx, [])
    input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
    _inputs_flat = [input_handle, key]
    _attrs = ("key_dtype", _attr_key_dtype, "value_dtype", value_dtype)
    _result = _execute.execute(b"TensorMapLookup",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("TensorMapLookup", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
def py_func_eager_fallback(input, token, Tout, name, ctx):
    token = _execute.make_str(token, "token")
    if not isinstance(Tout, (list, tuple)):
        raise TypeError("Expected list for 'Tout' argument to "
                        "'py_func' Op, not %r." % Tout)
    Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
    _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, ctx)
    _inputs_flat = list(input)
    _attrs = ("token", token, "Tin", _attr_Tin, "Tout", Tout)
    _result = _execute.execute(b"PyFunc",
                               len(Tout),
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("PyFunc", _inputs_flat, _attrs, _result)
    return _result
def stateless_random_normal_eager_fallback(shape, seed, dtype=_dtypes.float32, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function stateless_random_normal
  """
  _ctx = ctx if ctx else _context.context()
  if dtype is None:
    dtype = _dtypes.float32
  dtype = _execute.make_type(dtype, "dtype")
  _attr_T, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)
  _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], _ctx, _dtypes.int64)
  _inputs_flat = [shape, seed]
  _attrs = ("dtype", dtype, "T", _attr_T, "Tseed", _attr_Tseed)
  _result = _execute.execute(b"StatelessRandomNormal", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "StatelessRandomNormal", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def fake_param_eager_fallback(dtype, shape, name=None, ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function fake_param
  """
    _ctx = ctx if ctx else _context.context()
    dtype = _execute.make_type(dtype, "dtype")
    shape = _execute.make_shape(shape, "shape")
    _inputs_flat = []
    _attrs = ("dtype", dtype, "shape", shape)
    _result = _execute.execute(b"FakeParam",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("FakeParam", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
def ragged_range_eager_fallback(starts, limits, deltas, Tsplits=_dtypes.int64, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function ragged_range
  """
  _ctx = ctx if ctx else _context.context()
  if Tsplits is None:
    Tsplits = _dtypes.int64
  Tsplits = _execute.make_type(Tsplits, "Tsplits")
  _attr_T, _inputs_T = _execute.args_to_matching_eager([starts, limits, deltas], _ctx, _dtypes.int32)
  (starts, limits, deltas) = _inputs_T
  _inputs_flat = [starts, limits, deltas]
  _attrs = ("T", _attr_T, "Tsplits", Tsplits)
  _result = _execute.execute(b"RaggedRange", 2, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "RaggedRange", _inputs_flat, _attrs, _result, name)
  _result = _RaggedRangeOutput._make(_result)
  return _result
Exemple #13
0
def stateless_truncated_normal_v2_eager_fallback(shape, key, counter, alg, dtype, name, ctx):
  if dtype is None:
    dtype = _dtypes.float32
  dtype = _execute.make_type(dtype, "dtype")
  _attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32)
  key = _ops.convert_to_tensor(key, _dtypes.uint64)
  counter = _ops.convert_to_tensor(counter, _dtypes.uint64)
  alg = _ops.convert_to_tensor(alg, _dtypes.int32)
  _inputs_flat = [shape, key, counter, alg]
  _attrs = ("dtype", dtype, "Tshape", _attr_Tshape)
  _result = _execute.execute(b"StatelessTruncatedNormalV2", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
                             name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "StatelessTruncatedNormalV2", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
Exemple #14
0
def tensor_list_element_shape(input_handle, shape_type, name=None):
  r"""The shape of the elements of the given list, as a tensor.

    input_handle: the list
    element_shape: the shape of elements of the list

  Args:
    input_handle: A `Tensor` of type `variant`.
    shape_type: A `tf.DType` from: `tf.int32, tf.int64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `shape_type`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    shape_type = _execute.make_type(shape_type, "shape_type")
    _, _, _op = _op_def_lib._apply_op_helper(
        "TensorListElementShape", input_handle=input_handle,
        shape_type=shape_type, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("shape_type", _op.get_attr("shape_type"))
    _execute.record_gradient(
      "TensorListElementShape", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "TensorListElementShape", name,
        _ctx._post_execution_callbacks, input_handle, "shape_type",
        shape_type)
      return _result
    except _core._FallbackException:
      return tensor_list_element_shape_eager_fallback(
          input_handle, shape_type=shape_type, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def remote_call(target, args, Tout, f, name=None):
    r"""Runs function `f` on a remote device indicated by `target`.

  Args:
    target: A `Tensor` of type `string`.
      A fully specified device name where we want to run the function.
    args: A list of `Tensor` objects. A list of arguments for the function.
    Tout: A list of `tf.DTypes` that has length `>= 1`.
      The type list for the return values.
    f: A function decorated with @Defun. The function to run remotely.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Tout`.
  """
    if not isinstance(Tout, (list, tuple)):
        raise TypeError("Expected list for 'Tout' argument to "
                        "'remote_call' Op, not %r." % Tout)
    Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("RemoteCall",
                                                 target=target,
                                                 args=args,
                                                 Tout=Tout,
                                                 f=f,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"),
                  "f", _op.get_attr("f"))
    else:
        _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
        target = _ops.convert_to_tensor(target, _dtypes.string)
        _inputs_flat = [target] + list(args)
        _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f)
        _result = _execute.execute(b"RemoteCall",
                                   len(Tout),
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("RemoteCall", _inputs_flat, _attrs, _result, name)
    return _result
Exemple #16
0
def stateless_truncated_normal(shape, seed, dtype=_dtypes.float32, name=None):
  r"""Outputs deterministic pseudorandom values from a truncated normal distribution.

  The generated values follow a normal distribution with mean 0 and standard
  deviation 1, except that values whose magnitude is more than 2 standard
  deviations from the mean are dropped and re-picked.

  The outputs are a deterministic function of `shape` and `seed`.

  Args:
    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      The shape of the output tensor.
    seed: A `Tensor` of type `int64`. 2 seeds (shape [2]).
    dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64`. Defaults to `tf.float32`.
      The type of the output.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `dtype`. Random values with specified shape.
  """
  if dtype is None:
    dtype = _dtypes.float32
  dtype = _execute.make_type(dtype, "dtype")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "StatelessTruncatedNormal", shape=shape, seed=seed, dtype=dtype,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("dtype", _op.get_attr("dtype"), "T", _op.get_attr("T"))
  else:
    _attr_T, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)
    _attr_T = _attr_T.as_datatype_enum
    seed = _ops.convert_to_tensor(seed, _dtypes.int64)
    _inputs_flat = [shape, seed]
    _attrs = ("dtype", dtype, "T", _attr_T)
    _result = _execute.execute(b"StatelessTruncatedNormal", 1,
                               inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                               name=name)
  _execute.record_gradient(
      "StatelessTruncatedNormal", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def stateless_random_uniform(shape, seed, dtype=_dtypes.float32, name=None):
  r"""Outputs deterministic pseudorandom random values from a uniform distribution.

  The generated values follow a uniform distribution in the range `[0, 1)`. The
  lower bound 0 is included in the range, while the upper bound 1 is excluded.
  
  The outputs are a deterministic function of `shape` and `seed`.

  Args:
    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      The shape of the output tensor.
    seed: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      2 seeds (shape [2]).
    dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64`. Defaults to `tf.float32`.
      The type of the output.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `dtype`.
  """
  if dtype is None:
    dtype = _dtypes.float32
  dtype = _execute.make_type(dtype, "dtype")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "StatelessRandomUniform", shape=shape, seed=seed, dtype=dtype,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("dtype", _op.get_attr("dtype"), "T", _op.get_attr("T"), "Tseed",
              _op.get_attr("Tseed"))
  else:
    _attr_T, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)
    _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], _ctx, _dtypes.int64)
    _inputs_flat = [shape, seed]
    _attrs = ("dtype", dtype, "T", _attr_T, "Tseed", _attr_Tseed)
    _result = _execute.execute(b"StatelessRandomUniform", 1,
                               inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                               name=name)
  _execute.record_gradient(
      "StatelessRandomUniform", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def ragged_range_eager_fallback(starts, limits, deltas, Tsplits, name, ctx):
    if Tsplits is None:
        Tsplits = _dtypes.int64
    Tsplits = _execute.make_type(Tsplits, "Tsplits")
    _attr_T, _inputs_T = _execute.args_to_matching_eager(
        [starts, limits, deltas], ctx, _dtypes.int32)
    (starts, limits, deltas) = _inputs_T
    _inputs_flat = [starts, limits, deltas]
    _attrs = ("T", _attr_T, "Tsplits", Tsplits)
    _result = _execute.execute(b"RaggedRange",
                               2,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("RaggedRange", _inputs_flat, _attrs, _result)
    _result = _RaggedRangeOutput._make(_result)
    return _result
Exemple #19
0
def collective_bcast_recv_eager_fallback(T, group_size, group_key, instance_key, shape, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function collective_bcast_recv
  """
  _ctx = ctx if ctx else _context.context()
  T = _execute.make_type(T, "T")
  group_size = _execute.make_int(group_size, "group_size")
  group_key = _execute.make_int(group_key, "group_key")
  instance_key = _execute.make_int(instance_key, "instance_key")
  shape = _execute.make_shape(shape, "shape")
  _inputs_flat = []
  _attrs = ("T", T, "group_size", group_size, "group_key", group_key,
  "instance_key", instance_key, "shape", shape)
  _result = _execute.execute(b"CollectiveBcastRecv", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "CollectiveBcastRecv", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def remote_call_eager_fallback(target, args, Tout, f, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function remote_call
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'remote_call' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
  target = _ops.convert_to_tensor(target, _dtypes.string)
  _inputs_flat = [target] + list(args)
  _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f)
  _result = _execute.execute(b"RemoteCall", len(Tout), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "RemoteCall", _inputs_flat, _attrs, _result, name)
  return _result
def stateless_multinomial_eager_fallback(logits, num_samples, seed, output_dtype=_dtypes.int64, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function stateless_multinomial
  """
  _ctx = ctx if ctx else _context.context()
  if output_dtype is None:
    output_dtype = _dtypes.int64
  output_dtype = _execute.make_type(output_dtype, "output_dtype")
  _attr_T, (logits,) = _execute.args_to_matching_eager([logits], _ctx)
  _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], _ctx, _dtypes.int64)
  num_samples = _ops.convert_to_tensor(num_samples, _dtypes.int32)
  _inputs_flat = [logits, num_samples, seed]
  _attrs = ("T", _attr_T, "Tseed", _attr_Tseed, "output_dtype", output_dtype)
  _result = _execute.execute(b"StatelessMultinomial", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "StatelessMultinomial", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Exemple #22
0
def batch_function_eager_fallback(in_tensors, captured_tensors, f, num_batch_threads, max_batch_size, batch_timeout_micros, Tout, max_enqueued_batches, allowed_batch_sizes, container, shared_name, batching_queue, name, ctx):
  num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads")
  max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
  batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros")
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'batch_function' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if max_enqueued_batches is None:
    max_enqueued_batches = 10
  max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches")
  if allowed_batch_sizes is None:
    allowed_batch_sizes = []
  if not isinstance(allowed_batch_sizes, (list, tuple)):
    raise TypeError(
        "Expected list for 'allowed_batch_sizes' argument to "
        "'batch_function' Op, not %r." % allowed_batch_sizes)
  allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes]
  if container is None:
    container = ""
  container = _execute.make_str(container, "container")
  if shared_name is None:
    shared_name = ""
  shared_name = _execute.make_str(shared_name, "shared_name")
  if batching_queue is None:
    batching_queue = ""
  batching_queue = _execute.make_str(batching_queue, "batching_queue")
  _attr_Tin, in_tensors = _execute.convert_to_mixed_eager_tensors(in_tensors, ctx)
  _attr_Tcaptured, captured_tensors = _execute.convert_to_mixed_eager_tensors(captured_tensors, ctx)
  _inputs_flat = list(in_tensors) + list(captured_tensors)
  _attrs = ("f", f, "num_batch_threads", num_batch_threads, "max_batch_size",
  max_batch_size, "batch_timeout_micros", batch_timeout_micros,
  "max_enqueued_batches", max_enqueued_batches, "allowed_batch_sizes",
  allowed_batch_sizes, "container", container, "shared_name", shared_name,
  "batching_queue", batching_queue, "Tin", _attr_Tin, "Tcaptured",
  _attr_Tcaptured, "Tout", Tout)
  _result = _execute.execute(b"BatchFunction", len(Tout), inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "BatchFunction", _inputs_flat, _attrs, _result)
  return _result
Exemple #23
0
def sequence_file_dataset_eager_fallback(filenames, output_types, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function sequence_file_dataset
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(output_types, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_types' argument to "
        "'sequence_file_dataset' Op, not %r." % output_types)
  output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
  filenames = _ops.convert_to_tensor(filenames, _dtypes.string)
  _inputs_flat = [filenames]
  _attrs = ("output_types", output_types)
  _result = _execute.execute(b"SequenceFileDataset", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "SequenceFileDataset", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Exemple #24
0
def stateful_uniform_eager_fallback(resource, algorithm, shape, dtype=_dtypes.float32, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function stateful_uniform
  """
  _ctx = ctx if ctx else _context.context()
  if dtype is None:
    dtype = _dtypes.float32
  dtype = _execute.make_type(dtype, "dtype")
  _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int64)
  resource = _ops.convert_to_tensor(resource, _dtypes.resource)
  algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64)
  _inputs_flat = [resource, algorithm, shape]
  _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype)
  _result = _execute.execute(b"StatefulUniform", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "StatefulUniform", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Exemple #25
0
def non_deterministic_ints_eager_fallback(shape, dtype, name, ctx):
    if dtype is None:
        dtype = _dtypes.int64
    dtype = _execute.make_type(dtype, "dtype")
    _attr_shape_dtype, (shape, ) = _execute.args_to_matching_eager(
        [shape], ctx, _dtypes.int64)
    _inputs_flat = [shape]
    _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype)
    _result = _execute.execute(b"NonDeterministicInts",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("NonDeterministicInts", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
def read_variable_op_eager_fallback(resource, dtype, name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function read_variable_op
  """
    _ctx = _context.context()
    dtype = _execute.make_type(dtype, "dtype")
    resource = _ops.convert_to_tensor(resource, _dtypes.resource)
    _inputs_flat = [resource]
    _attrs = ("dtype", dtype)
    _result = _execute.execute(b"ReadVariableOp",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("ReadVariableOp", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
def _py_func(input, token, Tout, name=None):
  r"""Invokes a python function to compute func(input)->output.

  This operation is considered stateful. For a stateless version, see
  PyFuncStateless.

  Args:
    input: A list of `Tensor` objects.
      List of Tensors that will provide input to the Op.
    token: A `string`.
      A token representing a registered python function in this address space.
    Tout: A list of `tf.DTypes`. Data types of the outputs from the op.
      The length of the list specifies the number of outputs.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Tout`.
  """
  token = _execute.make_str(token, "token")
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'py_func' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "PyFunc", input=input, token=token, Tout=Tout, name=name)
    _result = _op.outputs[:]
    if not _result:
      return _op
    _inputs_flat = _op.inputs
    _attrs = ("token", _op.get_attr("token"), "Tin", _op.get_attr("Tin"),
              "Tout", _op.get_attr("Tout"))
  else:
    _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)
    _inputs_flat = list(input)
    _attrs = ("token", token, "Tin", _attr_Tin, "Tout", Tout)
    _result = _execute.execute(b"PyFunc", len(Tout), inputs=_inputs_flat,
                               attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "PyFunc", _inputs_flat, _attrs, _result, name)
  return _result
def read_variable_op(resource, dtype, name=None):
    r"""Reads the value of a variable.

  The tensor returned by this operation is immutable.

  The value returned by this operation is guaranteed to be influenced by all the
  writes on which this operation depends directly or indirectly, and to not be
  influenced by any of the writes which depend directly or indirectly on this
  operation.

  Args:
    resource: A `Tensor` of type `resource`.
      handle to the resource in which to store the variable.
    dtype: A `tf.DType`. the dtype of the value.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `dtype`.
  """
    dtype = _execute.make_type(dtype, "dtype")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("ReadVariableOp",
                                                 resource=resource,
                                                 dtype=dtype,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("dtype", _op.get_attr("dtype"))
    else:
        resource = _ops.convert_to_tensor(resource, _dtypes.resource)
        _inputs_flat = [resource]
        _attrs = ("dtype", dtype)
        _result = _execute.execute(b"ReadVariableOp",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("ReadVariableOp", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
def stateless_random_uniform(shape, seed, dtype=_dtypes.float32, name=None):
    r"""TODO: add doc.

  Args:
    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
    seed: A `Tensor`. Must be one of the following types: `int32`, `int64`.
    dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64`. Defaults to `tf.float32`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `dtype`.
  """
    if dtype is None:
        dtype = _dtypes.float32
    dtype = _execute.make_type(dtype, "dtype")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("StatelessRandomUniform",
                                                 shape=shape,
                                                 seed=seed,
                                                 dtype=dtype,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("dtype", _op.get_attr("dtype"), "T", _op.get_attr("T"),
                  "Tseed", _op.get_attr("Tseed"))
    else:
        _attr_T, (shape, ) = _execute.args_to_matching_eager([shape], _ctx,
                                                             _dtypes.int32)
        _attr_Tseed, (seed, ) = _execute.args_to_matching_eager([seed], _ctx,
                                                                _dtypes.int64)
        _inputs_flat = [shape, seed]
        _attrs = ("dtype", dtype, "T", _attr_T, "Tseed", _attr_Tseed)
        _result = _execute.execute(b"StatelessRandomUniform",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("StatelessRandomUniform", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
def eager_py_func_eager_fallback(input, token, Tout, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function eager_py_func
  """
  _ctx = ctx if ctx else _context.context()
  token = _execute.make_str(token, "token")
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'eager_py_func' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)
  _inputs_flat = list(input)
  _attrs = ("token", token, "Tin", _attr_Tin, "Tout", Tout)
  _result = _execute.execute(b"EagerPyFunc", len(Tout), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "EagerPyFunc", _inputs_flat, _attrs, _result, name)
  return _result
Exemple #31
0
def function_buffering_resource_get_next_eager_fallback(function_buffer_resource, output_types, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function function_buffering_resource_get_next
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(output_types, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_types' argument to "
        "'function_buffering_resource_get_next' Op, not %r." % output_types)
  output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
  function_buffer_resource = _ops.convert_to_tensor(function_buffer_resource, _dtypes.resource)
  _inputs_flat = [function_buffer_resource]
  _attrs = ("output_types", output_types)
  _result = _execute.execute(b"FunctionBufferingResourceGetNext",
                             len(output_types), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "FunctionBufferingResourceGetNext", _inputs_flat, _attrs, _result, name)
  return _result
def decode_proto_v2_eager_fallback(bytes, message_type, field_names,
                                   output_types, descriptor_source,
                                   message_format, sanitize, name, ctx):
    message_type = _execute.make_str(message_type, "message_type")
    if not isinstance(field_names, (list, tuple)):
        raise TypeError("Expected list for 'field_names' argument to "
                        "'decode_proto_v2' Op, not %r." % field_names)
    field_names = [_execute.make_str(_s, "field_names") for _s in field_names]
    if not isinstance(output_types, (list, tuple)):
        raise TypeError("Expected list for 'output_types' argument to "
                        "'decode_proto_v2' Op, not %r." % output_types)
    output_types = [
        _execute.make_type(_t, "output_types") for _t in output_types
    ]
    if descriptor_source is None:
        descriptor_source = "local://"
    descriptor_source = _execute.make_str(descriptor_source,
                                          "descriptor_source")
    if message_format is None:
        message_format = "binary"
    message_format = _execute.make_str(message_format, "message_format")
    if sanitize is None:
        sanitize = False
    sanitize = _execute.make_bool(sanitize, "sanitize")
    bytes = _ops.convert_to_tensor(bytes, _dtypes.string)
    _inputs_flat = [bytes]
    _attrs = ("message_type", message_type, "field_names", field_names,
              "output_types", output_types, "descriptor_source",
              descriptor_source, "message_format", message_format, "sanitize",
              sanitize)
    _result = _execute.execute(b"DecodeProtoV2",
                               len(output_types) + 1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("DecodeProtoV2", _inputs_flat, _attrs,
                                 _result)
    _result = _result[:1] + [_result[1:]]
    _result = _DecodeProtoV2Output._make(_result)
    return _result
Exemple #33
0
def _py_func_stateless(input, token, Tout, name=None):
    r"""A stateless version of PyFunc.

  Args:
    input: A list of `Tensor` objects.
    token: A `string`.
    Tout: A list of `tf.DTypes`.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Tout`.
  """
    token = _execute.make_str(token, "token")
    if not isinstance(Tout, (list, tuple)):
        raise TypeError("Expected list for 'Tout' argument to "
                        "'py_func_stateless' Op, not %r." % Tout)
    Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("PyFuncStateless",
                                                 input=input,
                                                 token=token,
                                                 Tout=Tout,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("token", _op.get_attr("token"), "Tin", _op.get_attr("Tin"),
                  "Tout", _op.get_attr("Tout"))
    else:
        _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)
        _attr_Tin = [_t.as_datatype_enum for _t in _attr_Tin]
        _inputs_flat = list(input)
        _attrs = ("token", token, "Tin", _attr_Tin, "Tout", Tout)
        _result = _execute.execute(b"PyFuncStateless",
                                   len(Tout),
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("PyFuncStateless", _inputs_flat, _attrs, _result,
                             name)
    return _result
Exemple #34
0
def experimental_directed_interleave_dataset(selector_input_dataset, data_input_datasets, output_types, output_shapes, name=None):
  r"""A substitute for `InterleaveDataset` on a fixed list of `N` datasets.

  Args:
    selector_input_dataset: A `Tensor` of type `variant`.
      A dataset of scalar `DT_INT64` elements that determines which of the
      `N` data inputs should produce the next output element.
    data_input_datasets: A list of at least 1 `Tensor` objects with type `variant`.
      `N` datasets with the same type that will be interleaved according to
      the values of `selector_input_dataset`.
    output_types: A list of `tf.DTypes` that has length `>= 1`.
    output_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`) that has length `>= 1`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `variant`.
  """
  # Add nodes to the TensorFlow graph.
  if not isinstance(data_input_datasets, (list, tuple)):
    raise TypeError(
        "Expected list for 'data_input_datasets' argument to "
        "'experimental_directed_interleave_dataset' Op, not %r." % data_input_datasets)
  _attr_N = len(data_input_datasets)
  if not isinstance(output_types, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_types' argument to "
        "'experimental_directed_interleave_dataset' Op, not %r." % output_types)
  output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'experimental_directed_interleave_dataset' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  _, _, _op = _op_def_lib._apply_op_helper(
        "ExperimentalDirectedInterleaveDataset", selector_input_dataset=selector_input_dataset,
                                                 data_input_datasets=data_input_datasets,
                                                 output_types=output_types,
                                                 output_shapes=output_shapes,
                                                 name=name)
  _result = _op.outputs[:]
  _result, = _result
  return _result
def string_to_number(string_tensor, out_type=_dtypes.float32, name=None):
    r"""Converts each string in the input Tensor to the specified numeric type.

  (Note that int32 overflow results in an error while float overflow
  results in a rounded value.)

  Args:
    string_tensor: A `Tensor` of type `string`.
    out_type: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.float32`.
      The numeric type to interpret each string in `string_tensor` as.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `out_type`.
    A Tensor of the same shape as the input `string_tensor`.
  """
    if out_type is None:
        out_type = _dtypes.float32
    out_type = _execute.make_type(out_type, "out_type")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("StringToNumber",
                                                 string_tensor=string_tensor,
                                                 out_type=out_type,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("out_type", _op.get_attr("out_type"))
    else:
        string_tensor = _ops.convert_to_tensor(string_tensor, _dtypes.string)
        _inputs_flat = [string_tensor]
        _attrs = ("out_type", out_type)
        _result = _execute.execute(b"StringToNumber",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("StringToNumber", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
def sparse_feature_cross(indices, values, shapes, dense, hashed_output, num_buckets, out_type, internal_type, name=None):
  r"""Generates sparse cross form a list of sparse tensors.

  The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
  representing features of one feature column. It outputs a 2D `SparseTensor` with
  the batchwise crosses of these features.

  For example, if the inputs are

      inputs[0]: SparseTensor with shape = [2, 2]
      [0, 0]: "a"
      [1, 0]: "b"
      [1, 1]: "c"

      inputs[1]: SparseTensor with shape = [2, 1]
      [0, 0]: "d"
      [1, 0]: "e"

      inputs[2]: Tensor [["f"], ["g"]]

  then the output will be

      shape = [2, 2]
      [0, 0]: "a_X_d_X_f"
      [1, 0]: "b_X_e_X_g"
      [1, 1]: "c_X_e_X_g"

  if hashed_output=true then the output will be

      shape = [2, 2]
      [0, 0]: HashCombine(
                  Fingerprint64("f"), HashCombine(
                      Fingerprint64("d"), Fingerprint64("a")))
      [1, 0]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("b")))
      [1, 1]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("c")))

  Args:
    indices: A list of `Tensor` objects with type `int64`.
      2-D.  Indices of each input `SparseTensor`.
    values: A list of `Tensor` objects with types from: `int64`, `string`.
      1-D.   values of each `SparseTensor`.
    shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
      1-D.   Shapes of each `SparseTensor`.
    dense: A list of `Tensor` objects with types from: `int64`, `string`.
      2-D.    Columns represented by dense `Tensor`.
    hashed_output: A `bool`.
    num_buckets: An `int` that is `>= 0`.
    out_type: A `tf.DType` from: `tf.int64, tf.string`.
    internal_type: A `tf.DType` from: `tf.int64, tf.string`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (output_indices, output_values, output_shape).

    output_indices: A `Tensor` of type `int64`. 2-D.  Indices of the concatenated `SparseTensor`.
    output_values: A `Tensor` of type `out_type`. 1-D.  Non-empty values of the concatenated or hashed
      `SparseTensor`.
    output_shape: A `Tensor` of type `int64`. 1-D.  Shape of the concatenated `SparseTensor`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if not isinstance(indices, (list, tuple)):
      raise TypeError(
          "Expected list for 'indices' argument to "
          "'sparse_feature_cross' Op, not %r." % indices)
    _attr_N = len(indices)
    if not isinstance(shapes, (list, tuple)):
      raise TypeError(
          "Expected list for 'shapes' argument to "
          "'sparse_feature_cross' Op, not %r." % shapes)
    if len(shapes) != _attr_N:
      raise ValueError(
          "List argument 'shapes' to 'sparse_feature_cross' Op with length %d "
          "must match length %d of argument 'indices'." %
          (len(shapes), _attr_N))
    hashed_output = _execute.make_bool(hashed_output, "hashed_output")
    num_buckets = _execute.make_int(num_buckets, "num_buckets")
    out_type = _execute.make_type(out_type, "out_type")
    internal_type = _execute.make_type(internal_type, "internal_type")
    _, _, _op = _op_def_lib._apply_op_helper(
        "SparseFeatureCross", indices=indices, values=values, shapes=shapes,
        dense=dense, hashed_output=hashed_output, num_buckets=num_buckets,
        out_type=out_type, internal_type=internal_type, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("N", _op.get_attr("N"), "hashed_output",
              _op.get_attr("hashed_output"), "num_buckets",
              _op.get_attr("num_buckets"), "sparse_types",
              _op.get_attr("sparse_types"), "dense_types",
              _op.get_attr("dense_types"), "out_type",
              _op.get_attr("out_type"), "internal_type",
              _op.get_attr("internal_type"))
    _execute.record_gradient(
      "SparseFeatureCross", _inputs_flat, _attrs, _result, name)
    _result = _SparseFeatureCrossOutput._make(_result)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "SparseFeatureCross", name, _ctx._post_execution_callbacks, indices,
        values, shapes, dense, "hashed_output", hashed_output, "num_buckets",
        num_buckets, "out_type", out_type, "internal_type", internal_type)
      _result = _SparseFeatureCrossOutput._make(_result)
      return _result
    except _core._FallbackException:
      return sparse_feature_cross_eager_fallback(
          indices, values, shapes, dense, hashed_output=hashed_output,
          num_buckets=num_buckets, out_type=out_type,
          internal_type=internal_type, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)