Ejemplo n.º 1
0
def xla_launch_eager_fallback(constants, args, resources, Tresults, function, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function xla_launch
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(resources, (list, tuple)):
    raise TypeError(
        "Expected list for 'resources' argument to "
        "'xla_launch' Op, not %r." % resources)
  _attr_Nresources = len(resources)
  if not isinstance(Tresults, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tresults' argument to "
        "'xla_launch' Op, not %r." % Tresults)
  Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults]
  _attr_Tconstants, constants = _execute.convert_to_mixed_eager_tensors(constants, _ctx)
  _attr_Targs, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
  resources = _ops.convert_n_to_tensor(resources, _dtypes.resource)
  _inputs_flat = list(constants) + list(args) + list(resources)
  _attrs = ("Tconstants", _attr_Tconstants, "Targs", _attr_Targs,
  "Nresources", _attr_Nresources, "Tresults", Tresults, "function", function)
  _result = _execute.execute(b"XlaLaunch", len(Tresults), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "XlaLaunch", _inputs_flat, _attrs, _result, name)
  return _result
Ejemplo n.º 2
0
def xla_launch_eager_fallback(constants, args, resources, Tresults, function, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function xla_launch
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(resources, (list, tuple)):
    raise TypeError(
        "Expected list for 'resources' argument to "
        "'xla_launch' Op, not %r." % resources)
  _attr_Nresources = len(resources)
  if not isinstance(Tresults, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tresults' argument to "
        "'xla_launch' Op, not %r." % Tresults)
  Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults]
  _attr_Tconstants, constants = _execute.convert_to_mixed_eager_tensors(constants, _ctx)
  _attr_Targs, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
  resources = _ops.convert_n_to_tensor(resources, _dtypes.resource)
  _inputs_flat = list(constants) + list(args) + list(resources)
  _attrs = ("Tconstants", _attr_Tconstants, "Targs", _attr_Targs,
  "Nresources", _attr_Nresources, "Tresults", Tresults, "function", function)
  _result = _execute.execute(b"XlaLaunch", len(Tresults), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "XlaLaunch", _inputs_flat, _attrs, _result, name)
  return _result
Ejemplo n.º 3
0
def batch_function_eager_fallback(in_tensors, captured_tensors, f,
                                  num_batch_threads, max_batch_size,
                                  batch_timeout_micros, Tout,
                                  max_enqueued_batches, allowed_batch_sizes,
                                  container, shared_name, batching_queue, name,
                                  ctx):
    num_batch_threads = _execute.make_int(num_batch_threads,
                                          "num_batch_threads")
    max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
    batch_timeout_micros = _execute.make_int(batch_timeout_micros,
                                             "batch_timeout_micros")
    if not isinstance(Tout, (list, tuple)):
        raise TypeError("Expected list for 'Tout' argument to "
                        "'batch_function' Op, not %r." % Tout)
    Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
    if max_enqueued_batches is None:
        max_enqueued_batches = 10
    max_enqueued_batches = _execute.make_int(max_enqueued_batches,
                                             "max_enqueued_batches")
    if allowed_batch_sizes is None:
        allowed_batch_sizes = []
    if not isinstance(allowed_batch_sizes, (list, tuple)):
        raise TypeError("Expected list for 'allowed_batch_sizes' argument to "
                        "'batch_function' Op, not %r." % allowed_batch_sizes)
    allowed_batch_sizes = [
        _execute.make_int(_i, "allowed_batch_sizes")
        for _i in allowed_batch_sizes
    ]
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    if batching_queue is None:
        batching_queue = ""
    batching_queue = _execute.make_str(batching_queue, "batching_queue")
    _attr_Tin, in_tensors = _execute.convert_to_mixed_eager_tensors(
        in_tensors, ctx)
    _attr_Tcaptured, captured_tensors = _execute.convert_to_mixed_eager_tensors(
        captured_tensors, ctx)
    _inputs_flat = list(in_tensors) + list(captured_tensors)
    _attrs = ("f", f, "num_batch_threads", num_batch_threads, "max_batch_size",
              max_batch_size, "batch_timeout_micros", batch_timeout_micros,
              "max_enqueued_batches", max_enqueued_batches,
              "allowed_batch_sizes", allowed_batch_sizes, "container",
              container, "shared_name", shared_name, "batching_queue",
              batching_queue, "Tin", _attr_Tin, "Tcaptured", _attr_Tcaptured,
              "Tout", Tout)
    _result = _execute.execute(b"BatchFunction",
                               len(Tout),
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("BatchFunction", _inputs_flat, _attrs,
                                 _result)
    return _result
Ejemplo n.º 4
0
def ragged_cross_eager_fallback(ragged_values, ragged_row_splits,
                                sparse_indices, sparse_values, sparse_shape,
                                dense_inputs, input_order, hashed_output,
                                num_buckets, hash_key, out_values_type,
                                out_row_splits_type, name, ctx):
    if not isinstance(sparse_indices, (list, tuple)):
        raise TypeError("Expected list for 'sparse_indices' argument to "
                        "'ragged_cross' Op, not %r." % sparse_indices)
    _attr_Nsparse = len(sparse_indices)
    if not isinstance(sparse_shape, (list, tuple)):
        raise TypeError("Expected list for 'sparse_shape' argument to "
                        "'ragged_cross' Op, not %r." % sparse_shape)
    if len(sparse_shape) != _attr_Nsparse:
        raise ValueError(
            "List argument 'sparse_shape' to 'ragged_cross' Op with length %d "
            "must match length %d of argument 'sparse_indices'." %
            (len(sparse_shape), _attr_Nsparse))
    input_order = _execute.make_str(input_order, "input_order")
    hashed_output = _execute.make_bool(hashed_output, "hashed_output")
    num_buckets = _execute.make_int(num_buckets, "num_buckets")
    hash_key = _execute.make_int(hash_key, "hash_key")
    out_values_type = _execute.make_type(out_values_type, "out_values_type")
    out_row_splits_type = _execute.make_type(out_row_splits_type,
                                             "out_row_splits_type")
    _attr_ragged_values_types, ragged_values = _execute.convert_to_mixed_eager_tensors(
        ragged_values, ctx)
    _attr_ragged_splits_types, ragged_row_splits = _execute.convert_to_mixed_eager_tensors(
        ragged_row_splits, ctx)
    _attr_sparse_values_types, sparse_values = _execute.convert_to_mixed_eager_tensors(
        sparse_values, ctx)
    _attr_dense_types, dense_inputs = _execute.convert_to_mixed_eager_tensors(
        dense_inputs, ctx)
    sparse_indices = _ops.convert_n_to_tensor(sparse_indices, _dtypes.int64)
    sparse_shape = _ops.convert_n_to_tensor(sparse_shape, _dtypes.int64)
    _inputs_flat = list(ragged_values) + list(ragged_row_splits) + list(
        sparse_indices) + list(sparse_values) + list(sparse_shape) + list(
            dense_inputs)
    _attrs = ("Nsparse", _attr_Nsparse, "input_order", input_order,
              "hashed_output", hashed_output, "num_buckets", num_buckets,
              "hash_key", hash_key, "ragged_values_types",
              _attr_ragged_values_types, "ragged_splits_types",
              _attr_ragged_splits_types, "sparse_values_types",
              _attr_sparse_values_types, "dense_types", _attr_dense_types,
              "out_values_type", out_values_type, "out_row_splits_type",
              out_row_splits_type)
    _result = _execute.execute(b"RaggedCross",
                               2,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("RaggedCross", _inputs_flat, _attrs, _result)
    _result = _RaggedCrossOutput._make(_result)
    return _result
Ejemplo n.º 5
0
def sparse_feature_cross_v2_eager_fallback(indices,
                                           values,
                                           shapes,
                                           dense,
                                           hashed_output,
                                           num_buckets,
                                           hash_key,
                                           out_type,
                                           internal_type,
                                           name=None,
                                           ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function sparse_feature_cross_v2
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(indices, (list, tuple)):
        raise TypeError("Expected list for 'indices' argument to "
                        "'sparse_feature_cross_v2' Op, not %r." % indices)
    _attr_N = len(indices)
    if not isinstance(shapes, (list, tuple)):
        raise TypeError("Expected list for 'shapes' argument to "
                        "'sparse_feature_cross_v2' Op, not %r." % shapes)
    if len(shapes) != _attr_N:
        raise ValueError(
            "List argument 'shapes' to 'sparse_feature_cross_v2' Op with length %d "
            "must match length %d of argument 'indices'." %
            (len(shapes), _attr_N))
    hashed_output = _execute.make_bool(hashed_output, "hashed_output")
    num_buckets = _execute.make_int(num_buckets, "num_buckets")
    hash_key = _execute.make_int(hash_key, "hash_key")
    out_type = _execute.make_type(out_type, "out_type")
    internal_type = _execute.make_type(internal_type, "internal_type")
    _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(
        values, _ctx)
    _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(
        dense, _ctx)
    indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
    shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
    _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense)
    _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
              num_buckets, "hash_key", hash_key, "sparse_types",
              _attr_sparse_types, "dense_types", _attr_dense_types, "out_type",
              out_type, "internal_type", internal_type)
    _result = _execute.execute(b"SparseFeatureCrossV2",
                               3,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("SparseFeatureCrossV2", _inputs_flat, _attrs,
                             _result, name)
    _result = _SparseFeatureCrossV2Output._make(_result)
    return _result
Ejemplo n.º 6
0
def _print_eager_fallback(input,
                          data,
                          message="",
                          first_n=-1,
                          summarize=3,
                          name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function _print
  """
    _ctx = _context.context()
    if message is None:
        message = ""
    message = _execute.make_str(message, "message")
    if first_n is None:
        first_n = -1
    first_n = _execute.make_int(first_n, "first_n")
    if summarize is None:
        summarize = 3
    summarize = _execute.make_int(summarize, "summarize")
    _attr_T, (input, ) = _execute.args_to_matching_eager([input], _ctx)
    _attr_U, data = _execute.convert_to_mixed_eager_tensors(data, _ctx)
    _inputs_flat = [input] + list(data)
    _attrs = ("T", _attr_T, "U", _attr_U, "message", message, "first_n",
              first_n, "summarize", summarize)
    _result = _execute.execute(b"Print",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("Print", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
Ejemplo n.º 7
0
def partitioned_call_eager_fallback(args, Tout, f, config="", config_proto="", executor_type="", name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function partitioned_call
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'partitioned_call' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if config is None:
    config = ""
  config = _execute.make_str(config, "config")
  if config_proto is None:
    config_proto = ""
  config_proto = _execute.make_str(config_proto, "config_proto")
  if executor_type is None:
    executor_type = ""
  executor_type = _execute.make_str(executor_type, "executor_type")
  _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
  _inputs_flat = list(args)
  _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f, "config", config,
  "config_proto", config_proto, "executor_type", executor_type)
  _result = _execute.execute(b"PartitionedCall", len(Tout),
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "PartitionedCall", _inputs_flat, _attrs, _result, name)
  return _result
def _assert(condition, data, summarize=3, name=None):
  r"""Asserts that the given condition is true.

  If `condition` evaluates to false, print the list of tensors in `data`.
  `summarize` determines how many entries of the tensors to print.

  Args:
    condition: A `Tensor` of type `bool`. The condition to evaluate.
    data: A list of `Tensor` objects.
      The tensors to print out when condition is false.
    summarize: An optional `int`. Defaults to `3`.
      Print this many entries of each tensor.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
  if summarize is None:
    summarize = 3
  summarize = _execute.make_int(summarize, "summarize")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "Assert", condition=condition, data=data, summarize=summarize,
        name=name)
    return _op
  else:
    _attr_T, data = _execute.convert_to_mixed_eager_tensors(data, _ctx)
    condition = _ops.convert_to_tensor(condition, _dtypes.bool)
    _inputs_flat = [condition] + list(data)
    _attrs = ("T", _attr_T, "summarize", summarize)
    _result = _execute.execute(b"Assert", 0, inputs=_inputs_flat,
                               attrs=_attrs, ctx=_ctx, name=name)
    _result = None
  return _result
def _symbolic_gradient(input, Tout, f, name=None):
    r"""Computes the gradient function for function f via backpropagation.

  Args:
    input: A list of `Tensor` objects. a list of input tensors of size N + M;
    Tout: A list of `tf.DTypes` that has length `>= 1`.
      the type list for the input list.
    f: A function decorated with @Defun.
      The function we want to compute the gradient for.

      The function 'f' must be a numerical function which takes N inputs and
      produces M outputs. Its gradient function 'g', which is computed by
      this SymbolicGradient op is a function taking N + M inputs and
      produces N outputs.

      I.e. if we have
         (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),
      then, g is
         (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,
                                           dL/dy1, dL/dy2, ..., dL/dy_M),

      where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the
      loss function). dL/dx_i is the partial derivative of L with respect
      to x_i.

      (Needs some math expert to say the comment above better.)
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Tout`.
    a list of output tensors of size N;
  """
    if not isinstance(Tout, (list, tuple)):
        raise TypeError("Expected list for 'Tout' argument to "
                        "'symbolic_gradient' Op, not %r." % Tout)
    Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("SymbolicGradient",
                                                 input=input,
                                                 Tout=Tout,
                                                 f=f,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"),
                  "f", _op.get_attr("f"))
    else:
        _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)
        _inputs_flat = list(input)
        _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f)
        _result = _execute.execute(b"SymbolicGradient",
                                   len(Tout),
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("SymbolicGradient", _inputs_flat, _attrs, _result,
                             name)
    return _result
Ejemplo n.º 10
0
def case_eager_fallback(branch_index, input, Tout, branches, output_shapes, name, ctx):
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'case' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if not isinstance(branches, (list, tuple)):
    raise TypeError(
        "Expected list for 'branches' argument to "
        "'case' Op, not %r." % branches)
  if output_shapes is None:
    output_shapes = []
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'case' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, ctx)
  branch_index = _ops.convert_to_tensor(branch_index, _dtypes.int32)
  _inputs_flat = [branch_index] + list(input)
  _attrs = ("Tin", _attr_Tin, "Tout", Tout, "branches", branches,
  "output_shapes", output_shapes)
  _result = _execute.execute(b"Case", len(Tout), inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "Case", _inputs_flat, _attrs, _result)
  return _result
def stateless_if_eager_fallback(cond,
                                input,
                                Tout,
                                then_branch,
                                else_branch,
                                name=None,
                                ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function stateless_if
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(Tout, (list, tuple)):
        raise TypeError("Expected list for 'Tout' argument to "
                        "'stateless_if' Op, not %r." % Tout)
    Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
    _attr_Tcond, (cond, ) = _execute.args_to_matching_eager([cond], _ctx)
    _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)
    _inputs_flat = [cond] + list(input)
    _attrs = ("Tcond", _attr_Tcond, "Tin", _attr_Tin, "Tout", Tout,
              "then_branch", then_branch, "else_branch", else_branch)
    _result = _execute.execute(b"StatelessIf",
                               len(Tout),
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("StatelessIf", _inputs_flat, _attrs, _result,
                             name)
    return _result
Ejemplo n.º 12
0
def outfeed_enqueue_tuple(inputs, name=None):
    r"""An op which emits multiple Tensor values from an XLA computation.

  Args:
    inputs: A list of `Tensor` objects.
      A list of tensors that will be inserted into the outfeed queue as an
      XLA tuple.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("OutfeedEnqueueTuple",
                                                 inputs=inputs,
                                                 name=name)
        return _op
    else:
        _attr_dtypes, inputs = _execute.convert_to_mixed_eager_tensors(
            inputs, _ctx)
        _attr_dtypes = [_t.as_datatype_enum for _t in _attr_dtypes]
        _inputs_flat = list(inputs)
        _attrs = ("dtypes", _attr_dtypes)
        _result = _execute.execute(b"OutfeedEnqueueTuple",
                                   0,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    return _result
Ejemplo n.º 13
0
def _print_eager_fallback(input, data, message, first_n, summarize, name, ctx):
    if message is None:
        message = ""
    message = _execute.make_str(message, "message")
    if first_n is None:
        first_n = -1
    first_n = _execute.make_int(first_n, "first_n")
    if summarize is None:
        summarize = 3
    summarize = _execute.make_int(summarize, "summarize")
    _attr_T, (input, ) = _execute.args_to_matching_eager([input], ctx, [])
    _attr_U, data = _execute.convert_to_mixed_eager_tensors(data, ctx)
    _inputs_flat = [input] + list(data)
    _attrs = ("T", _attr_T, "U", _attr_U, "message", message, "first_n",
              first_n, "summarize", summarize)
    _result = _execute.execute(b"Print",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("Print", _inputs_flat, _attrs, _result)
    _result, = _result
    return _result
Ejemplo n.º 14
0
def _print(input, data, message="", first_n=-1, summarize=3, name=None):
    r"""Prints a list of tensors.

  Passes `input` through to `output` and prints `data` when evaluating.

  Args:
    input: A `Tensor`. The tensor passed to `output`
    data: A list of `Tensor` objects.
      A list of tensors to print out when op is evaluated.
    message: An optional `string`. Defaults to `""`.
      A string, prefix of the error message.
    first_n: An optional `int`. Defaults to `-1`.
      Only log `first_n` number of times. -1 disables logging.
    summarize: An optional `int`. Defaults to `3`.
      Only print this many entries of each tensor.
    name: A name for the operation (optional).

  Returns:
    The unmodified `input` tensor
  """
    if message is None:
        message = ""
    message = _execute.make_str(message, "message")
    if first_n is None:
        first_n = -1
    first_n = _execute.make_int(first_n, "first_n")
    if summarize is None:
        summarize = 3
    summarize = _execute.make_int(summarize, "summarize")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("Print",
                                                 input=input,
                                                 data=data,
                                                 message=message,
                                                 first_n=first_n,
                                                 summarize=summarize,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("T", _op.get_attr("T"), "U", _op.get_attr("U"), "message",
                  _op.get_attr("message"), "first_n", _op.get_attr("first_n"),
                  "summarize", _op.get_attr("summarize"))
    else:
        _attr_T, (input, ) = _execute.args_to_matching_eager([input], _ctx)
        _attr_T = _attr_T.as_datatype_enum
        _attr_U, data = _execute.convert_to_mixed_eager_tensors(data, _ctx)
        _attr_U = [_t.as_datatype_enum for _t in _attr_U]
        _inputs_flat = [input] + list(data)
        _attrs = ("T", _attr_T, "U", _attr_U, "message", message, "first_n",
                  first_n, "summarize", summarize)
        _result = _execute.execute(b"Print",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("Print", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
Ejemplo n.º 15
0
def remote_fused_graph_execute_eager_fallback(
        inputs,
        Toutputs,
        serialized_remote_fused_graph_execute_info,
        name=None,
        ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function remote_fused_graph_execute
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(Toutputs, (list, tuple)):
        raise TypeError("Expected list for 'Toutputs' argument to "
                        "'remote_fused_graph_execute' Op, not %r." % Toutputs)
    Toutputs = [_execute.make_type(_t, "Toutputs") for _t in Toutputs]
    serialized_remote_fused_graph_execute_info = _execute.make_str(
        serialized_remote_fused_graph_execute_info,
        "serialized_remote_fused_graph_execute_info")
    _attr_Tinputs, inputs = _execute.convert_to_mixed_eager_tensors(
        inputs, _ctx)
    _inputs_flat = list(inputs)
    _attrs = ("Tinputs", _attr_Tinputs, "Toutputs", Toutputs,
              "serialized_remote_fused_graph_execute_info",
              serialized_remote_fused_graph_execute_info)
    _result = _execute.execute(b"RemoteFusedGraphExecute",
                               len(Toutputs),
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("RemoteFusedGraphExecute", _inputs_flat, _attrs,
                             _result, name)
    return _result
Ejemplo n.º 16
0
 def testConvertMixedEagerTensorsWithVariables(self):
   var = resource_variable_ops.ResourceVariable(1.0)
   types, tensors = execute_lib.convert_to_mixed_eager_tensors(
       ['foo', var], context.context())
   self.assertAllEqual([dtypes.string, dtypes.float32], types)
   for t in tensors:
     self.assertIsInstance(t, ops.EagerTensor)
Ejemplo n.º 17
0
 def testConvertMixedEagerTensorsWithVariables(self):
     var = resource_variable_ops.ResourceVariable(1.0)
     types, tensors = execute_lib.convert_to_mixed_eager_tensors(
         ['foo', var], context.context())
     self.assertAllEqual([dtypes.string, dtypes.float32], types)
     for t in tensors:
         self.assertIsInstance(t, ops.EagerTensor)
Ejemplo n.º 18
0
def csv_dataset_eager_fallback(filenames, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols, record_defaults, output_shapes, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function csv_dataset
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'csv_dataset' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  _attr_output_types, record_defaults = _execute.convert_to_mixed_eager_tensors(record_defaults, _ctx)
  filenames = _ops.convert_to_tensor(filenames, _dtypes.string)
  buffer_size = _ops.convert_to_tensor(buffer_size, _dtypes.int64)
  header = _ops.convert_to_tensor(header, _dtypes.bool)
  field_delim = _ops.convert_to_tensor(field_delim, _dtypes.string)
  use_quote_delim = _ops.convert_to_tensor(use_quote_delim, _dtypes.bool)
  na_value = _ops.convert_to_tensor(na_value, _dtypes.string)
  select_cols = _ops.convert_to_tensor(select_cols, _dtypes.int64)
  _inputs_flat = [filenames, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols] + list(record_defaults)
  _attrs = ("output_types", _attr_output_types, "output_shapes",
  output_shapes)
  _result = _execute.execute(b"CSVDataset", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "CSVDataset", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def stateful_partitioned_call_eager_fallback(args,
                                             Tout,
                                             f,
                                             name=None,
                                             ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function stateful_partitioned_call
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(Tout, (list, tuple)):
        raise TypeError("Expected list for 'Tout' argument to "
                        "'stateful_partitioned_call' Op, not %r." % Tout)
    Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
    _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
    _inputs_flat = list(args)
    _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f)
    _result = _execute.execute(b"StatefulPartitionedCall",
                               len(Tout),
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("StatefulPartitionedCall", _inputs_flat, _attrs,
                             _result, name)
    return _result
Ejemplo n.º 20
0
def stateless_if_eager_fallback(cond, input, Tout, then_branch, else_branch, output_shapes, name, ctx):
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'stateless_if' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if output_shapes is None:
    output_shapes = []
  if not isinstance(output_shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_shapes' argument to "
        "'stateless_if' Op, not %r." % output_shapes)
  output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
  _attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], ctx)
  _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, ctx)
  _inputs_flat = [cond] + list(input)
  _attrs = ("Tcond", _attr_Tcond, "Tin", _attr_Tin, "Tout", Tout,
  "then_branch", then_branch, "else_branch", else_branch, "output_shapes",
  output_shapes)
  _result = _execute.execute(b"StatelessIf", len(Tout), inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "StatelessIf", _inputs_flat, _attrs, _result)
  return _result
Ejemplo n.º 21
0
def stateful_partitioned_call_eager_fallback(args, Tout, f, config, config_proto, executor_type, name, ctx):
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'stateful_partitioned_call' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if config is None:
    config = ""
  config = _execute.make_str(config, "config")
  if config_proto is None:
    config_proto = ""
  config_proto = _execute.make_str(config_proto, "config_proto")
  if executor_type is None:
    executor_type = ""
  executor_type = _execute.make_str(executor_type, "executor_type")
  _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, ctx)
  _inputs_flat = list(args)
  _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f, "config", config,
  "config_proto", config_proto, "executor_type", executor_type)
  _result = _execute.execute(b"StatefulPartitionedCall", len(Tout),
                             inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
                             name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "StatefulPartitionedCall", _inputs_flat, _attrs, _result)
  return _result
def encode_proto_eager_fallback(sizes, values, field_names, message_type, descriptor_source="local://", name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function encode_proto
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(field_names, (list, tuple)):
    raise TypeError(
        "Expected list for 'field_names' argument to "
        "'encode_proto' Op, not %r." % field_names)
  field_names = [_execute.make_str(_s, "field_names") for _s in field_names]
  message_type = _execute.make_str(message_type, "message_type")
  if descriptor_source is None:
    descriptor_source = "local://"
  descriptor_source = _execute.make_str(descriptor_source, "descriptor_source")
  _attr_Tinput_types, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
  sizes = _ops.convert_to_tensor(sizes, _dtypes.int32)
  _inputs_flat = [sizes] + list(values)
  _attrs = ("field_names", field_names, "message_type", message_type,
  "descriptor_source", descriptor_source, "Tinput_types", _attr_Tinput_types)
  _result = _execute.execute(b"EncodeProto", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "EncodeProto", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Ejemplo n.º 23
0
 def testConvertMixedEagerTensors(self):
     array = np.zeros((), dtype=np.float32)
     tensor = constant_op.constant(0., dtype=dtypes.float32)
     types, tensors = execute_lib.convert_to_mixed_eager_tensors(
         [array, tensor], context.context())
     for typ, t in zip(types, tensors):
         self.assertEquals(typ, dtypes.float32)
         self.assertIsInstance(t, ops.EagerTensor)
Ejemplo n.º 24
0
 def testConvertMixedEagerTensors(self):
   array = np.zeros((), dtype=np.float32)
   tensor = constant_op.constant(0., dtype=dtypes.float32)
   types, tensors = execute_lib.convert_to_mixed_eager_tensors(
       [array, tensor], context.context())
   for typ, t in zip(types, tensors):
     self.assertEquals(typ, dtypes.float32)
     self.assertIsInstance(t, ops.EagerTensor)
Ejemplo n.º 25
0
def batch_eager_fallback(in_tensors, num_batch_threads, max_batch_size,
                         batch_timeout_micros, grad_timeout_micros,
                         max_enqueued_batches, allowed_batch_sizes, container,
                         shared_name, batching_queue, name, ctx):
    num_batch_threads = _execute.make_int(num_batch_threads,
                                          "num_batch_threads")
    max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
    batch_timeout_micros = _execute.make_int(batch_timeout_micros,
                                             "batch_timeout_micros")
    grad_timeout_micros = _execute.make_int(grad_timeout_micros,
                                            "grad_timeout_micros")
    if max_enqueued_batches is None:
        max_enqueued_batches = 10
    max_enqueued_batches = _execute.make_int(max_enqueued_batches,
                                             "max_enqueued_batches")
    if allowed_batch_sizes is None:
        allowed_batch_sizes = []
    if not isinstance(allowed_batch_sizes, (list, tuple)):
        raise TypeError("Expected list for 'allowed_batch_sizes' argument to "
                        "'batch' Op, not %r." % allowed_batch_sizes)
    allowed_batch_sizes = [
        _execute.make_int(_i, "allowed_batch_sizes")
        for _i in allowed_batch_sizes
    ]
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    if batching_queue is None:
        batching_queue = ""
    batching_queue = _execute.make_str(batching_queue, "batching_queue")
    _attr_T, in_tensors = _execute.convert_to_mixed_eager_tensors(
        in_tensors, ctx)
    _inputs_flat = list(in_tensors)
    _attrs = ("num_batch_threads", num_batch_threads, "max_batch_size",
              max_batch_size, "max_enqueued_batches", max_enqueued_batches,
              "batch_timeout_micros", batch_timeout_micros,
              "allowed_batch_sizes", allowed_batch_sizes,
              "grad_timeout_micros", grad_timeout_micros, "container",
              container, "shared_name", shared_name, "batching_queue",
              batching_queue, "T", _attr_T)
    _result = _execute.execute(b"Batch",
                               len(in_tensors) + 2,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=ctx,
                               name=name)
    if _execute.must_record_gradient():
        _execute.record_gradient("Batch", _inputs_flat, _attrs, _result)
    _result = [_result[:len(in_tensors)]] + _result[len(in_tensors):]
    _result = _BatchOutput._make(_result)
    return _result
Ejemplo n.º 26
0
def remote_fused_graph_execute(inputs,
                               Toutputs,
                               serialized_remote_fused_graph_execute_info,
                               name=None):
    r"""TODO: add doc.

  Args:
    inputs: A list of `Tensor` objects.
    Toutputs: A list of `tf.DTypes`.
    serialized_remote_fused_graph_execute_info: A `string`.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Toutputs`.
  """
    if not isinstance(Toutputs, (list, tuple)):
        raise TypeError("Expected list for 'Toutputs' argument to "
                        "'remote_fused_graph_execute' Op, not %r." % Toutputs)
    Toutputs = [_execute.make_type(_t, "Toutputs") for _t in Toutputs]
    serialized_remote_fused_graph_execute_info = _execute.make_str(
        serialized_remote_fused_graph_execute_info,
        "serialized_remote_fused_graph_execute_info")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper(
            "RemoteFusedGraphExecute",
            inputs=inputs,
            Toutputs=Toutputs,
            serialized_remote_fused_graph_execute_info=
            serialized_remote_fused_graph_execute_info,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("Tinputs", _op.get_attr("Tinputs"), "Toutputs",
                  _op.get_attr("Toutputs"),
                  "serialized_remote_fused_graph_execute_info",
                  _op.get_attr("serialized_remote_fused_graph_execute_info"))
    else:
        _attr_Tinputs, inputs = _execute.convert_to_mixed_eager_tensors(
            inputs, _ctx)
        _inputs_flat = list(inputs)
        _attrs = ("Tinputs", _attr_Tinputs, "Toutputs", Toutputs,
                  "serialized_remote_fused_graph_execute_info",
                  serialized_remote_fused_graph_execute_info)
        _result = _execute.execute(b"RemoteFusedGraphExecute",
                                   len(Toutputs),
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("RemoteFusedGraphExecute", _inputs_flat, _attrs,
                             _result, name)
    return _result
Ejemplo n.º 27
0
def _assert_eager_fallback(condition, data, summarize, name, ctx):
  if summarize is None:
    summarize = 3
  summarize = _execute.make_int(summarize, "summarize")
  _attr_T, data = _execute.convert_to_mixed_eager_tensors(data, ctx)
  condition = _ops.convert_to_tensor(condition, _dtypes.bool)
  _inputs_flat = [condition] + list(data)
  _attrs = ("T", _attr_T, "summarize", summarize)
  _result = _execute.execute(b"Assert", 0, inputs=_inputs_flat, attrs=_attrs,
                             ctx=ctx, name=name)
  _result = None
  return _result
Ejemplo n.º 28
0
def _for_eager_fallback(start, limit, delta, input, body, name, ctx):
  _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, ctx)
  start = _ops.convert_to_tensor(start, _dtypes.int32)
  limit = _ops.convert_to_tensor(limit, _dtypes.int32)
  delta = _ops.convert_to_tensor(delta, _dtypes.int32)
  _inputs_flat = [start, limit, delta] + list(input)
  _attrs = ("T", _attr_T, "body", body)
  _result = _execute.execute(b"For", len(input), inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "For", _inputs_flat, _attrs, _result)
  return _result
Ejemplo n.º 29
0
def sparse_feature_cross_v2_eager_fallback(indices, values, shapes, dense, hashed_output, num_buckets, hash_key, out_type, internal_type, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function sparse_feature_cross_v2
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'indices' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % indices)
  _attr_N = len(indices)
  if not isinstance(shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'shapes' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % shapes)
  if len(shapes) != _attr_N:
    raise ValueError(
        "List argument 'shapes' to 'sparse_feature_cross_v2' Op with length %d "
        "must match length %d of argument 'indices'." %
        (len(shapes), _attr_N))
  hashed_output = _execute.make_bool(hashed_output, "hashed_output")
  num_buckets = _execute.make_int(num_buckets, "num_buckets")
  hash_key = _execute.make_int(hash_key, "hash_key")
  out_type = _execute.make_type(out_type, "out_type")
  internal_type = _execute.make_type(internal_type, "internal_type")
  _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
  _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(dense, _ctx)
  indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
  shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
  _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense)
  _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
  num_buckets, "hash_key", hash_key, "sparse_types", _attr_sparse_types,
  "dense_types", _attr_dense_types, "out_type", out_type, "internal_type",
  internal_type)
  _result = _execute.execute(b"SparseFeatureCrossV2", 3, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "SparseFeatureCrossV2", _inputs_flat, _attrs, _result, name)
  _result = _SparseFeatureCrossV2Output._make(_result)
  return _result
Ejemplo n.º 30
0
def _while_eager_fallback(input, cond, body, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function _while
  """
  _ctx = ctx if ctx else _context.context()
  _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)
  _inputs_flat = list(input)
  _attrs = ("T", _attr_T, "cond", cond, "body", body)
  _result = _execute.execute(b"While", len(input), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "While", _inputs_flat, _attrs, _result, name)
  return _result
Ejemplo n.º 31
0
def _py_func(input, token, Tout, name=None):
    r"""Invokes a python function to compute func(input)->output.

  This operation is considered stateful. For a stateless version, see

  PyFuncStateless.

  Args:
    input: A list of `Tensor` objects.
      List of Tensors that will provide input to the Op.
    token: A `string`.
      A token representing a registered python function in this address space.
    Tout: A list of `tf.DTypes`. Data types of the outputs from the op.

      The length of the list specifies the number of outputs.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Tout`. The outputs from the Op.
  """
    token = _execute.make_str(token, "token")
    if not isinstance(Tout, (list, tuple)):
        raise TypeError("Expected list for 'Tout' argument to "
                        "'py_func' Op, not %r." % Tout)
    Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("PyFunc",
                                                 input=input,
                                                 token=token,
                                                 Tout=Tout,
                                                 name=name)
        _result = _op.outputs[:]
        if not _result:
            return _op
        _inputs_flat = _op.inputs
        _attrs = ("token", _op.get_attr("token"), "Tin", _op.get_attr("Tin"),
                  "Tout", _op.get_attr("Tout"))
    else:
        _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)
        _inputs_flat = list(input)
        _attrs = ("token", token, "Tin", _attr_Tin, "Tout", Tout)
        _result = _execute.execute(b"PyFunc",
                                   len(Tout),
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("PyFunc", _inputs_flat, _attrs, _result, name)
    return _result
Ejemplo n.º 32
0
def symbolic_gradient_eager_fallback(input, Tout, f, name, ctx):
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'symbolic_gradient' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, ctx)
  _inputs_flat = list(input)
  _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f)
  _result = _execute.execute(b"SymbolicGradient", len(Tout),
                             inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
                             name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "SymbolicGradient", _inputs_flat, _attrs, _result)
  return _result
Ejemplo n.º 33
0
def remote_call_eager_fallback(target, args, Tout, f, name, ctx):
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'remote_call' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, ctx)
  target = _ops.convert_to_tensor(target, _dtypes.string)
  _inputs_flat = [target] + list(args)
  _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f)
  _result = _execute.execute(b"RemoteCall", len(Tout), inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "RemoteCall", _inputs_flat, _attrs, _result)
  return _result
def _assert_eager_fallback(condition, data, summarize=3, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function _assert
  """
  _ctx = ctx if ctx else _context.context()
  if summarize is None:
    summarize = 3
  summarize = _execute.make_int(summarize, "summarize")
  _attr_T, data = _execute.convert_to_mixed_eager_tensors(data, _ctx)
  condition = _ops.convert_to_tensor(condition, _dtypes.bool)
  _inputs_flat = [condition] + list(data)
  _attrs = ("T", _attr_T, "summarize", summarize)
  _result = _execute.execute(b"Assert", 0, inputs=_inputs_flat, attrs=_attrs,
                             ctx=_ctx, name=name)
  _result = None
  return _result