Esempio n. 1
0
def decision_tree_ensemble_resource_handle_op(container="", shared_name="", name=None):
  r"""TODO: add doc.

  Args:
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `resource`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if container is None:
      container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
      shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    _, _, _op = _op_def_lib._apply_op_helper(
        "DecisionTreeEnsembleResourceHandleOp", container=container,
        shared_name=shared_name, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("container", _op.get_attr("container"), "shared_name",
              _op.get_attr("shared_name"))
    _execute.record_gradient(
      "DecisionTreeEnsembleResourceHandleOp", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "DecisionTreeEnsembleResourceHandleOp", name,
        _ctx._post_execution_callbacks, "container", container, "shared_name",
        shared_name)
      return _result
    except _core._FallbackException:
      return decision_tree_ensemble_resource_handle_op_eager_fallback(
          container=container, shared_name=shared_name, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 2
0
def image_projective_transform(images, transforms, interpolation, name=None):
  r"""Applies the given transform to each of the images.

  Input `image` is a `Tensor` in NHWC format (where the axes are image in batch,
  rows, columns, and channels. Input `transforms` is a num_images x 8 or 1 x 8
  matrix, where each row corresponds to a 3 x 3 projective transformation matrix,
  with the last entry assumed to be 1. If there is one row, the same
  transformation will be applied to all images.

  If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps
  the *output* point `(x, y)` to a transformed *input* point
  `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
  `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input
  image, the output pixel is set to 0.

  Args:
    images: A `Tensor`. Must be one of the following types: `uint8`, `int32`, `int64`, `half`, `float32`, `float64`.
      4D `Tensor`, input image(s) in NHWC format.
    transforms: A `Tensor` of type `float32`.
      2D `Tensor`, projective transform(s) to apply to the image(s).
    interpolation: A `string`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `images`.
    4D `Tensor`, image(s) in NHWC format, generated by applying
    the `transforms` to the `images`. Satisfies the description above.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    interpolation = _execute.make_str(interpolation, "interpolation")
    _, _, _op = _op_def_lib._apply_op_helper(
        "ImageProjectiveTransform", images=images, transforms=transforms,
        interpolation=interpolation, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("dtype", _op.get_attr("dtype"), "interpolation",
              _op.get_attr("interpolation"))
    _execute.record_gradient(
      "ImageProjectiveTransform", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "ImageProjectiveTransform", name, _ctx._post_execution_callbacks,
        images, transforms, "interpolation", interpolation)
      return _result
    except _core._FallbackException:
      return image_projective_transform_eager_fallback(
          images, transforms, interpolation=interpolation, name=name,
          ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 3
0
def decision_tree_ensemble_resource_handle_op_eager_fallback(container="", shared_name="", name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function decision_tree_ensemble_resource_handle_op
  """
  _ctx = ctx if ctx else _context.context()
  if container is None:
    container = ""
  container = _execute.make_str(container, "container")
  if shared_name is None:
    shared_name = ""
  shared_name = _execute.make_str(shared_name, "shared_name")
  _inputs_flat = []
  _attrs = ("container", container, "shared_name", shared_name)
  _result = _execute.execute(b"DecisionTreeEnsembleResourceHandleOp", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "DecisionTreeEnsembleResourceHandleOp", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 4
0
def grow_tree_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed, max_tree_depth, weak_learner_type, partition_ids, gains, splits, learner_config, center_bias, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function grow_tree_ensemble
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(partition_ids, (list, tuple)):
    raise TypeError(
        "Expected list for 'partition_ids' argument to "
        "'grow_tree_ensemble' Op, not %r." % partition_ids)
  _attr_num_handlers = len(partition_ids)
  if not isinstance(gains, (list, tuple)):
    raise TypeError(
        "Expected list for 'gains' argument to "
        "'grow_tree_ensemble' Op, not %r." % gains)
  if len(gains) != _attr_num_handlers:
    raise ValueError(
        "List argument 'gains' to 'grow_tree_ensemble' Op with length %d "
        "must match length %d of argument 'partition_ids'." %
        (len(gains), _attr_num_handlers))
  if not isinstance(splits, (list, tuple)):
    raise TypeError(
        "Expected list for 'splits' argument to "
        "'grow_tree_ensemble' Op, not %r." % splits)
  if len(splits) != _attr_num_handlers:
    raise ValueError(
        "List argument 'splits' to 'grow_tree_ensemble' Op with length %d "
        "must match length %d of argument 'partition_ids'." %
        (len(splits), _attr_num_handlers))
  learner_config = _execute.make_str(learner_config, "learner_config")
  center_bias = _execute.make_bool(center_bias, "center_bias")
  tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
  stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
  next_stamp_token = _ops.convert_to_tensor(next_stamp_token, _dtypes.int64)
  learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32)
  dropout_seed = _ops.convert_to_tensor(dropout_seed, _dtypes.int64)
  max_tree_depth = _ops.convert_to_tensor(max_tree_depth, _dtypes.int32)
  weak_learner_type = _ops.convert_to_tensor(weak_learner_type, _dtypes.int32)
  partition_ids = _ops.convert_n_to_tensor(partition_ids, _dtypes.int32)
  gains = _ops.convert_n_to_tensor(gains, _dtypes.float32)
  splits = _ops.convert_n_to_tensor(splits, _dtypes.string)
  _inputs_flat = [tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed, max_tree_depth, weak_learner_type] + list(partition_ids) + list(gains) + list(splits)
  _attrs = ("learner_config", learner_config, "num_handlers",
  _attr_num_handlers, "center_bias", center_bias)
  _result = _execute.execute(b"GrowTreeEnsemble", 0, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _result = None
  return _result
Esempio n. 5
0
def image_projective_transform_eager_fallback(images, transforms, interpolation, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function image_projective_transform
  """
  _ctx = ctx if ctx else _context.context()
  interpolation = _execute.make_str(interpolation, "interpolation")
  _attr_dtype, (images,) = _execute.args_to_matching_eager([images], _ctx)
  transforms = _ops.convert_to_tensor(transforms, _dtypes.float32)
  _inputs_flat = [images, transforms]
  _attrs = ("dtype", _attr_dtype, "interpolation", interpolation)
  _result = _execute.execute(b"ImageProjectiveTransform", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "ImageProjectiveTransform", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 6
0
def center_tree_ensemble_bias_eager_fallback(tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates, learner_config, centering_epsilon=0.01, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function center_tree_ensemble_bias
  """
  _ctx = ctx if ctx else _context.context()
  learner_config = _execute.make_str(learner_config, "learner_config")
  if centering_epsilon is None:
    centering_epsilon = 0.01
  centering_epsilon = _execute.make_float(centering_epsilon, "centering_epsilon")
  tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
  stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
  next_stamp_token = _ops.convert_to_tensor(next_stamp_token, _dtypes.int64)
  delta_updates = _ops.convert_to_tensor(delta_updates, _dtypes.float32)
  _inputs_flat = [tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates]
  _attrs = ("learner_config", learner_config, "centering_epsilon",
  centering_epsilon)
  _result = _execute.execute(b"CenterTreeEnsembleBias", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "CenterTreeEnsembleBias", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 7
0
def rpc(address, method, request, protocol="", fail_fast=True, timeout_in_ms=0, name=None):
  r"""Perform batches of RPC requests.

  This op asynchronously performs either a single RPC request, or a batch
  of requests.  RPC requests are defined by three main parameters:
  
    - `address` (the host+port or BNS address of the request)
    - `method` (the RPC method name for the request)
    - `request` (the serialized proto string, or vector of strings,
       of the RPC request argument).
  
  For example, if you have an RPC service running on port localhost:2345,
  and its interface is configured with the following proto declaration:
  
  ```
  service MyService {
    rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
    }
  };
  ```
  
  then call this op with arguments:
  
  ```
  address = "localhost:2345"
  method = "MyService/MyMethod"
  ```
  
  The `request` tensor is a string tensor representing serialized `MyRequestProto`
  strings; and the output string tensor `response` will have the same shape
  and contain (upon successful completion) corresponding serialized
  `MyResponseProto` strings.
  
  For example, to send a single, empty, `MyRequestProto`, call
  this op with `request = ""`.  To send 5 **parallel** empty requests,
  call this op with `request = ["", "", "", "", ""]`.
  
  More generally, one can create a batch of `MyRequestProto` serialized protos
  from regular batched tensors using the `encode_proto` op, and convert
  the response `MyResponseProto` serialized protos to batched tensors
  using the `decode_proto` op.
  
  **NOTE** Working with serialized proto strings is faster than instantiating
  actual proto objects in memory, so no performance degradation is expected
  compared to writing custom kernels for this workflow.
  
  If the connection fails or the remote worker returns an error
  status, the op reraises this exception locally.
  
  See the `TryRpc` op if you prefer to handle RPC failures manually in the graph.

  Args:
    address: A `Tensor` of type `string`.
      `0-D` or `1-D`.  The address (i.e. host_name:port) of the RPC server.
      If this tensor has more than 1 element, then multiple parallel rpc requests
      are sent.  This argument broadcasts with `method` and `request`.
    method: A `Tensor` of type `string`.
      `0-D` or `1-D`.  The method address on the RPC server.
      If this tensor has more than 1 element, then multiple parallel rpc requests
      are sent.  This argument broadcasts with `address` and `request`.
    request: A `Tensor` of type `string`.
      `0-D` or `1-D`.  Serialized proto strings: the rpc request argument.
      If this tensor has more than 1 element, then multiple parallel rpc requests
      are sent.  This argument broadcasts with `address` and `method`.
    protocol: An optional `string`. Defaults to `""`.
      RPC protocol to use.  Empty string means use the default protocol.
      Options include 'grpc'.
    fail_fast: An optional `bool`. Defaults to `True`.
      `boolean`. If `true` (default), then failures to connect
      (i.e., the server does not immediately respond) cause an RPC failure.
    timeout_in_ms: An optional `int`. Defaults to `0`.
      `int`. If `0` (default), then the kernel will run the RPC
      request and only time out if the RPC deadline passes or the session times out.
      If this value is greater than `0`, then the op will raise an exception if
      the RPC takes longer than `timeout_in_ms`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if protocol is None:
      protocol = ""
    protocol = _execute.make_str(protocol, "protocol")
    if fail_fast is None:
      fail_fast = True
    fail_fast = _execute.make_bool(fail_fast, "fail_fast")
    if timeout_in_ms is None:
      timeout_in_ms = 0
    timeout_in_ms = _execute.make_int(timeout_in_ms, "timeout_in_ms")
    _, _, _op = _op_def_lib._apply_op_helper(
        "Rpc", address=address, method=method, request=request,
        protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("protocol", _op.get_attr("protocol"), "fail_fast",
              _op.get_attr("fail_fast"), "timeout_in_ms",
              _op.get_attr("timeout_in_ms"))
    _execute.record_gradient(
      "Rpc", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name, "Rpc", name,
        _ctx._post_execution_callbacks, address, method, request, "protocol",
        protocol, "fail_fast", fail_fast, "timeout_in_ms", timeout_in_ms)
      return _result
    except _core._FallbackException:
      return rpc_eager_fallback(
          address, method, request, protocol=protocol, fail_fast=fail_fast,
          timeout_in_ms=timeout_in_ms, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 8
0
def grow_tree_ensemble(tree_ensemble_handle,
                       stamp_token,
                       next_stamp_token,
                       learning_rate,
                       dropout_seed,
                       max_tree_depth,
                       weak_learner_type,
                       partition_ids,
                       gains,
                       splits,
                       learner_config,
                       center_bias,
                       name=None):
    r"""Grows the tree ensemble by either adding a layer to the last tree being grown

  or by starting a new tree.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the ensemble variable.
    stamp_token: A `Tensor` of type `int64`.
      Stamp token for validating operation consistency.
    next_stamp_token: A `Tensor` of type `int64`.
      Stamp token to be used for the next iteration.
    learning_rate: A `Tensor` of type `float32`. Scalar learning rate.
    dropout_seed: A `Tensor` of type `int64`.
    max_tree_depth: A `Tensor` of type `int32`.
    weak_learner_type: A `Tensor` of type `int32`.
      The type of weak learner to use.
    partition_ids: A list of `Tensor` objects with type `int32`.
      List of Rank 1 Tensors containing partition Id per candidate.
    gains: A list with the same length as `partition_ids` of `Tensor` objects with type `float32`.
      List of Rank 1 Tensors containing gains per candidate.
    splits: A list with the same length as `partition_ids` of `Tensor` objects with type `string`.
      List of Rank 1 Tensors containing serialized SplitInfo protos per candidate.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto.
    center_bias: A `bool`.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "GrowTreeEnsemble", name, _ctx._post_execution_callbacks,
                tree_ensemble_handle, stamp_token, next_stamp_token,
                learning_rate, dropout_seed, max_tree_depth, weak_learner_type,
                partition_ids, gains, splits, "learner_config", learner_config,
                "center_bias", center_bias)
            return _result
        except _core._FallbackException:
            try:
                return grow_tree_ensemble_eager_fallback(
                    tree_ensemble_handle,
                    stamp_token,
                    next_stamp_token,
                    learning_rate,
                    dropout_seed,
                    max_tree_depth,
                    weak_learner_type,
                    partition_ids,
                    gains,
                    splits,
                    learner_config=learner_config,
                    center_bias=center_bias,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
            except (TypeError, ValueError):
                result = _dispatch.dispatch(
                    grow_tree_ensemble,
                    tree_ensemble_handle=tree_ensemble_handle,
                    stamp_token=stamp_token,
                    next_stamp_token=next_stamp_token,
                    learning_rate=learning_rate,
                    dropout_seed=dropout_seed,
                    max_tree_depth=max_tree_depth,
                    weak_learner_type=weak_learner_type,
                    partition_ids=partition_ids,
                    gains=gains,
                    splits=splits,
                    learner_config=learner_config,
                    center_bias=center_bias,
                    name=name)
                if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                    return result
                raise
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if not isinstance(partition_ids, (list, tuple)):
        raise TypeError("Expected list for 'partition_ids' argument to "
                        "'grow_tree_ensemble' Op, not %r." % partition_ids)
    _attr_num_handlers = len(partition_ids)
    if not isinstance(gains, (list, tuple)):
        raise TypeError("Expected list for 'gains' argument to "
                        "'grow_tree_ensemble' Op, not %r." % gains)
    if len(gains) != _attr_num_handlers:
        raise ValueError(
            "List argument 'gains' to 'grow_tree_ensemble' Op with length %d "
            "must match length %d of argument 'partition_ids'." %
            (len(gains), _attr_num_handlers))
    if not isinstance(splits, (list, tuple)):
        raise TypeError("Expected list for 'splits' argument to "
                        "'grow_tree_ensemble' Op, not %r." % splits)
    if len(splits) != _attr_num_handlers:
        raise ValueError(
            "List argument 'splits' to 'grow_tree_ensemble' Op with length %d "
            "must match length %d of argument 'partition_ids'." %
            (len(splits), _attr_num_handlers))
    learner_config = _execute.make_str(learner_config, "learner_config")
    center_bias = _execute.make_bool(center_bias, "center_bias")
    try:
        _, _, _op = _op_def_lib._apply_op_helper(
            "GrowTreeEnsemble",
            tree_ensemble_handle=tree_ensemble_handle,
            stamp_token=stamp_token,
            next_stamp_token=next_stamp_token,
            learning_rate=learning_rate,
            dropout_seed=dropout_seed,
            max_tree_depth=max_tree_depth,
            weak_learner_type=weak_learner_type,
            partition_ids=partition_ids,
            gains=gains,
            splits=splits,
            learner_config=learner_config,
            center_bias=center_bias,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(grow_tree_ensemble,
                                    tree_ensemble_handle=tree_ensemble_handle,
                                    stamp_token=stamp_token,
                                    next_stamp_token=next_stamp_token,
                                    learning_rate=learning_rate,
                                    dropout_seed=dropout_seed,
                                    max_tree_depth=max_tree_depth,
                                    weak_learner_type=weak_learner_type,
                                    partition_ids=partition_ids,
                                    gains=gains,
                                    splits=splits,
                                    learner_config=learner_config,
                                    center_bias=center_bias,
                                    name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    return _op
    _result = None
    return _result
def unbatch_grad(original_input,
                 batch_index,
                 grad,
                 id,
                 container="",
                 shared_name="",
                 name=None):
    r"""Gradient of Unbatch.

  Acts like Batch but using the given batch_index index of batching things as they
  become available. This ensures that the gradients are propagated back in the
  same session which did the forward pass.

  Args:
    original_input: A `Tensor`.
      The input to the Unbatch operation this is the gradient of.
    batch_index: A `Tensor` of type `int64`.
      The batch_index given to the Unbatch operation this is the gradient
      of.
    grad: A `Tensor`. Must have the same type as `original_input`.
      The downstream gradient.
    id: A `Tensor` of type `int64`. The id scalar emitted by Batch.
    container: An optional `string`. Defaults to `""`.
      Container to control resource sharing.
    shared_name: An optional `string`. Defaults to `""`.
      Instances of UnbatchGrad with the same container and shared_name
      are assumed to possibly belong to the same batch. If left empty, the op name
      will be used as the shared name.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `original_input`.
    The return value, either an empty tensor or the batched gradient.
  """
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("UnbatchGrad",
                                                 original_input=original_input,
                                                 batch_index=batch_index,
                                                 grad=grad,
                                                 id=id,
                                                 container=container,
                                                 shared_name=shared_name,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("container", _op.get_attr("container"), "shared_name",
                  _op.get_attr("shared_name"), "T", _op.get_attr("T"))
    else:
        _attr_T, _inputs_T = _execute.args_to_matching_eager(
            [original_input, grad], _ctx)
        (original_input, grad) = _inputs_T
        _attr_T = _attr_T.as_datatype_enum
        batch_index = _ops.convert_to_tensor(batch_index, _dtypes.int64)
        id = _ops.convert_to_tensor(id, _dtypes.int64)
        _inputs_flat = [original_input, batch_index, grad, id]
        _attrs = ("container", container, "shared_name", shared_name, "T",
                  _attr_T)
        _result = _execute.execute(b"UnbatchGrad",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("UnbatchGrad", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Esempio n. 10
0
def enter(data,
          frame_name,
          is_constant=False,
          parallel_iterations=10,
          name=None):
    r"""Creates or finds a child frame, and makes `data` available to the child frame.

  This op is used together with `Exit` to create loops in the graph.
  The unique `frame_name` is used by the `Executor` to identify frames. If
  `is_constant` is true, `output` is a constant in the child frame; otherwise
  it may be changed in the child frame. At most `parallel_iterations` iterations
  are run in parallel in the child frame.

  Args:
    data: A `Tensor`. The tensor to be made available to the child frame.
    frame_name: A `string`. The name of the child frame.
    is_constant: An optional `bool`. Defaults to `False`.
      If true, the output is constant within the child frame.
    parallel_iterations: An optional `int`. Defaults to `10`.
      The number of iterations allowed to run in parallel.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `data`.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, tld.device_name, "Enter", name,
                tld.op_callbacks, data, "frame_name", frame_name,
                "is_constant", is_constant, "parallel_iterations",
                parallel_iterations)
            return _result
        except _core._FallbackException:
            try:
                return enter_eager_fallback(
                    data,
                    frame_name=frame_name,
                    is_constant=is_constant,
                    parallel_iterations=parallel_iterations,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            _ops.raise_from_not_ok_status(e, name)
    # Add nodes to the TensorFlow graph.
    frame_name = _execute.make_str(frame_name, "frame_name")
    if is_constant is None:
        is_constant = False
    is_constant = _execute.make_bool(is_constant, "is_constant")
    if parallel_iterations is None:
        parallel_iterations = 10
    parallel_iterations = _execute.make_int(parallel_iterations,
                                            "parallel_iterations")
    _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "Enter",
        data=data,
        frame_name=frame_name,
        is_constant=is_constant,
        parallel_iterations=parallel_iterations,
        name=name)
    _result = _outputs[:]
    if _execute.must_record_gradient():
        _attrs = ("T", _op._get_attr_type("T"), "frame_name",
                  _op.get_attr("frame_name"), "is_constant",
                  _op._get_attr_bool("is_constant"), "parallel_iterations",
                  _op._get_attr_int("parallel_iterations"))
        _inputs_flat = _op.inputs
        _execute.record_gradient("Enter", _inputs_flat, _attrs, _result)
    _result, = _result
    return _result
Esempio n. 11
0
def sparse_to_sparse_set_operation(set1_indices,
                                   set1_values,
                                   set1_shape,
                                   set2_indices,
                                   set2_values,
                                   set2_shape,
                                   set_operation,
                                   validate_indices=True,
                                   name=None):
    r"""Applies set operation along last dimension of 2 `SparseTensor` inputs.

  See SetOperationOp::SetOperationFromContext for values of `set_operation`.

  If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
  order and range of `set1` and `set2` indices.

  Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
  and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
  as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
  ignored.

  Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
  and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
  as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
  ignored.

  If `validate_indices` is `True`, this op validates the order and range of `set1`
  and `set2` indices.

  Output `result` is a `SparseTensor` represented by `result_indices`,
  `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
  has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
  dimension contains the result of `set_operation` applied to the corresponding
  `[0...n-1]` dimension of `set`.

  Args:
    set1_indices: A `Tensor` of type `int64`.
      2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
      order.
    set1_values: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
      1D `Tensor`, values of a `SparseTensor`. Must be in row-major
      order.
    set1_shape: A `Tensor` of type `int64`.
      1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
      be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
      max set size across `0...n-1` dimensions.
    set2_indices: A `Tensor` of type `int64`.
      2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
      order.
    set2_values: A `Tensor`. Must have the same type as `set1_values`.
      1D `Tensor`, values of a `SparseTensor`. Must be in row-major
      order.
    set2_shape: A `Tensor` of type `int64`.
      1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
      be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
      max set size across `0...n-1` dimensions.
    set_operation: A `string`.
    validate_indices: An optional `bool`. Defaults to `True`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (result_indices, result_values, result_shape).

    result_indices: A `Tensor` of type `int64`.
    result_values: A `Tensor`. Has the same type as `set1_values`.
    result_shape: A `Tensor` of type `int64`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "SparseToSparseSetOperation", name,
                _ctx._post_execution_callbacks, set1_indices, set1_values,
                set1_shape, set2_indices, set2_values, set2_shape,
                "set_operation", set_operation, "validate_indices",
                validate_indices)
            _result = _SparseToSparseSetOperationOutput._make(_result)
            return _result
        except _core._FallbackException:
            try:
                return sparse_to_sparse_set_operation_eager_fallback(
                    set1_indices,
                    set1_values,
                    set1_shape,
                    set2_indices,
                    set2_values,
                    set2_shape,
                    set_operation=set_operation,
                    validate_indices=validate_indices,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    set_operation = _execute.make_str(set_operation, "set_operation")
    if validate_indices is None:
        validate_indices = True
    validate_indices = _execute.make_bool(validate_indices, "validate_indices")
    _, _, _op = _op_def_lib._apply_op_helper("SparseToSparseSetOperation",
                                             set1_indices=set1_indices,
                                             set1_values=set1_values,
                                             set1_shape=set1_shape,
                                             set2_indices=set2_indices,
                                             set2_values=set2_values,
                                             set2_shape=set2_shape,
                                             set_operation=set_operation,
                                             validate_indices=validate_indices,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("set_operation", _op.get_attr("set_operation"),
              "validate_indices", _op.get_attr("validate_indices"), "T",
              _op._get_attr_type("T"))
    _execute.record_gradient("SparseToSparseSetOperation", _inputs_flat,
                             _attrs, _result, name)
    _result = _SparseToSparseSetOperationOutput._make(_result)
    return _result
Esempio n. 12
0
def fused_conv2d_bias_activation(conv_input,
                                 filter,
                                 bias,
                                 side_input,
                                 conv_input_scale,
                                 side_input_scale,
                                 strides,
                                 padding,
                                 data_format="NHWC",
                                 filter_format="HWIO",
                                 activation_mode="Relu",
                                 dilations=[1, 1, 1, 1],
                                 name=None):
    r"""    Computes a fused kernel which implements: 2-D convolution, adds side input,

      with separate scaling on convolution and side inputs, then adds bias and
      applies the RELU activation function to the result. Supports both float and
      qint8 data formats. In the case of qint8, the output is clipped to [0..127].

      conv_input: A tensor with format as specified by `data_format` (see below).
      filter: A tensor with format depending on `data_format` as follows:
          "NHWC", "NCHW":
               `float [ filter_height, filter_width, in_channels, out_channels ]`
          "NCHW_VECT_C":
               `qint8 [ out_channels, in_channels, filter_height, filter_width ]`
      bias: 1-D float tensor with size matching the `out_channels` dimension of
          `filter`.
          Note: this tensor is still float, even if other inputs are qint8.
      side_input: A tensor with format as specified by `data_format` (see below).
          This tensor will be ignored and can be [] if side_input_scale == 0.
          Otherwise, the size of each dimension must match the `output` tensor.
      conv_input_scale: scalar float value to be multiplied by `conv_input`.
          (conceptually.. in reality it is applied after convolution).
      side_input_scale: scalar float value to be multiplied by `side_input`.
      output: A tensor with format as specified by `data_format` (see below).
          The dimension sizes are determined automatically based on other inputs
          and attributes.
      T: The element data type of `conv_input`, `side_input` and `output` tensors.
          Note: must match with the `data_format`.
      Tbias: The element data type of `bias`.
      strides: 1-D tensor of length 4.  The stride of the sliding window for each
          dimension of `input`. The dimension order is determined by the value of
          `data_format`, see below for details.
          Note: the stride for batch and channel dimensions must be 1.
      padding: The type of padding algorithm to use.
      data_format: A string specifying the data format of `conv_input`,
          `side_input` and `output` tensors with the following options:
          "NHWC": `float [ batch, height, width, channels ]`
          "NCHW": `float [ batch, channels, height, width ]`
          "NCHW_VECT_C":
              `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
          Note: for "NCHW_VECT_C", `channels` must be a multiple of 4.
      filter_format: A string specifying the data format of `filter`,
          "HWIO": `float [ kernel_height, kernel_width, input_channels,
                           output_channels ]`
          "OIHW_VECT_I":
              `qint8 [ output_channels, input_channels / 4,
                       kernel_height, kernel_width, input_channels % 4 ]`
      activation_mode: The activation applied to the output.
          Must be "Relu" or "None".
      dilations: 1-D tensor of length 4.  The dilation factor for each dimension
          of `input`. If set to k > 1, there will be k-1 skipped cells between
          each filter element on that dimension. The dimension order is determined
          by the value of `data_format`, see above for details. Dilations in the
          batch and depth dimensions must be 1.

  Args:
    conv_input: A `Tensor`. Must be one of the following types: `float32`, `half`, `qint8`.
    filter: A `Tensor`. Must have the same type as `conv_input`.
    bias: A `Tensor`. Must be one of the following types: `float32`, `half`.
    side_input: A `Tensor`. Must have the same type as `conv_input`.
    conv_input_scale: A `Tensor` of type `float32`.
    side_input_scale: A `Tensor` of type `float32`.
    strides: A list of `ints`.
    padding: A `string` from: `"SAME", "VALID"`.
    data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
    filter_format: An optional `string` from: `"HWIO", "OIHW", "OIHW_VECT_I"`. Defaults to `"HWIO"`.
    activation_mode: An optional `string` from: `"Relu", "None"`. Defaults to `"Relu"`.
    dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `conv_input`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "FusedConv2DBiasActivation", name,
                _ctx.post_execution_callbacks, conv_input, filter, bias,
                side_input, conv_input_scale, side_input_scale, "strides",
                strides, "padding", padding, "data_format", data_format,
                "filter_format", filter_format, "activation_mode",
                activation_mode, "dilations", dilations)
            return _result
        except _core._FallbackException:
            try:
                return fused_conv2d_bias_activation_eager_fallback(
                    conv_input,
                    filter,
                    bias,
                    side_input,
                    conv_input_scale,
                    side_input_scale,
                    strides=strides,
                    padding=padding,
                    data_format=data_format,
                    filter_format=filter_format,
                    activation_mode=activation_mode,
                    dilations=dilations,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
            except (TypeError, ValueError):
                result = _dispatch.dispatch(fused_conv2d_bias_activation,
                                            conv_input=conv_input,
                                            filter=filter,
                                            bias=bias,
                                            side_input=side_input,
                                            conv_input_scale=conv_input_scale,
                                            side_input_scale=side_input_scale,
                                            strides=strides,
                                            padding=padding,
                                            data_format=data_format,
                                            filter_format=filter_format,
                                            activation_mode=activation_mode,
                                            dilations=dilations,
                                            name=name)
                if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                    return result
                raise
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if not isinstance(strides, (list, tuple)):
        raise TypeError("Expected list for 'strides' argument to "
                        "'fused_conv2d_bias_activation' Op, not %r." % strides)
    strides = [_execute.make_int(_i, "strides") for _i in strides]
    padding = _execute.make_str(padding, "padding")
    if data_format is None:
        data_format = "NHWC"
    data_format = _execute.make_str(data_format, "data_format")
    if filter_format is None:
        filter_format = "HWIO"
    filter_format = _execute.make_str(filter_format, "filter_format")
    if activation_mode is None:
        activation_mode = "Relu"
    activation_mode = _execute.make_str(activation_mode, "activation_mode")
    if dilations is None:
        dilations = [1, 1, 1, 1]
    if not isinstance(dilations, (list, tuple)):
        raise TypeError("Expected list for 'dilations' argument to "
                        "'fused_conv2d_bias_activation' Op, not %r." %
                        dilations)
    dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
    try:
        _, _, _op = _op_def_lib._apply_op_helper(
            "FusedConv2DBiasActivation",
            conv_input=conv_input,
            filter=filter,
            bias=bias,
            side_input=side_input,
            conv_input_scale=conv_input_scale,
            side_input_scale=side_input_scale,
            strides=strides,
            padding=padding,
            data_format=data_format,
            filter_format=filter_format,
            activation_mode=activation_mode,
            dilations=dilations,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(fused_conv2d_bias_activation,
                                    conv_input=conv_input,
                                    filter=filter,
                                    bias=bias,
                                    side_input=side_input,
                                    conv_input_scale=conv_input_scale,
                                    side_input_scale=side_input_scale,
                                    strides=strides,
                                    padding=padding,
                                    data_format=data_format,
                                    filter_format=filter_format,
                                    activation_mode=activation_mode,
                                    dilations=dilations,
                                    name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op._get_attr_type("T"), "Tbias",
              _op._get_attr_type("Tbias"), "strides", _op.get_attr("strides"),
              "padding", _op.get_attr("padding"), "data_format",
              _op.get_attr("data_format"), "filter_format",
              _op.get_attr("filter_format"), "activation_mode",
              _op.get_attr("activation_mode"), "dilations",
              _op.get_attr("dilations"))
    _execute.record_gradient("FusedConv2DBiasActivation", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
def ragged_tensor_to_tensor(shape, values, default_value, row_partition_tensors, row_partition_types, name=None):
  r"""Create a dense tensor from a ragged tensor, possibly altering its shape.

  The `ragged_to_dense` op creates a dense tensor from a list of row partition
  tensors, a value vector, and default values. If the shape is unspecified, the
  minimal shape required to contain all the elements in the ragged tensor (the
  natural shape) will be used. If some dimensions are left unspecified, then the
  size of the natural shape is used in that dimension.

  The default_value will be broadcast to the output shape. After that, the values
  from the ragged tensor overwrite the default values. Note that the default_value
  must have less dimensions than the value.

  The row partition tensors are in the order of the dimensions.
  At present, the types can be:
  * "ROW_SPLITS": the row_splits tensor from the ragged tensor.
  * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
  * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
    is preceded by "FIRST_DIM_SIZE".

  Args:
    shape: A `Tensor`. Must be one of the following types: `int64`, `int32`.
      The desired shape of the the output tensor. If left unspecified (empty),
      the minimal shape required to contain all the elements in the ragged tensor
      (the natural shape) will be used. If some dimensions are left unspecified, then
      the size of the natural shape is used in that dimension.

      Note that dense dimensions cannot be modified by the shape argument. Trying to
      change the size of a dense dimension will cause the op to fail.
      Examples:
      natural shape: [4, 5, 6]
      shape: -1
      output shape: [4, 5, 6]

      natural shape: [4, 5, 6]
      shape: [3, -1, 2]
      output shape: [3, 5, 2]

      natural shape: [4, 5, 6]
      shape: [3, 7, 2]
      output shape: [3, 7, 2]
    values: A `Tensor`.
      A 1D tensor representing the values of the ragged tensor.
    default_value: A `Tensor`. Must have the same type as `values`.
      The default_value when the shape is larger than the ragged tensor. The
      default_value is broadcast until it is the shape of the output tensor, and
      then overwritten by values in the ragged tensor. The default value must be
      compatible with this broadcast operation, and must have fewer dimensions than
      the value tensor.
    row_partition_tensors: A list of at least 1 `Tensor` objects with the same type in: `int64`, `int32`.
    row_partition_types: A list of `strings`.
      The types of the row partition tensors. At present, these can be:
      * "ROW_SPLITS": the row_splits tensor from the ragged tensor.
      * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
      * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
        is preceeded by "FIRST_DIM_SIZE".
      The tensors are in the order of the dimensions.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `values`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = pywrap_tfe.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "RaggedTensorToTensor", name,
        tld.op_callbacks, shape, values, default_value, row_partition_tensors,
        "row_partition_types", row_partition_types)
      return _result
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
    except _core._FallbackException:
      pass
    try:
      return ragged_tensor_to_tensor_eager_fallback(
          shape, values, default_value, row_partition_tensors,
          row_partition_types=row_partition_types, name=name, ctx=_ctx)
    except _core._SymbolicException:
      pass  # Add nodes to the TensorFlow graph.
  # Add nodes to the TensorFlow graph.
  if not isinstance(row_partition_tensors, (list, tuple)):
    raise TypeError(
        "Expected list for 'row_partition_tensors' argument to "
        "'ragged_tensor_to_tensor' Op, not %r." % row_partition_tensors)
  _attr_num_row_partition_tensors = len(row_partition_tensors)
  if not isinstance(row_partition_types, (list, tuple)):
    raise TypeError(
        "Expected list for 'row_partition_types' argument to "
        "'ragged_tensor_to_tensor' Op, not %r." % row_partition_types)
  row_partition_types = [_execute.make_str(_s, "row_partition_types") for _s in row_partition_types]
  _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "RaggedTensorToTensor", shape=shape, values=values,
                                default_value=default_value,
                                row_partition_tensors=row_partition_tensors,
                                row_partition_types=row_partition_types,
                                name=name)
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("T", _op._get_attr_type("T"), "Tindex",
              _op._get_attr_type("Tindex"), "Tshape",
              _op._get_attr_type("Tshape"), "num_row_partition_tensors",
              _op._get_attr_int("num_row_partition_tensors"),
              "row_partition_types", _op.get_attr("row_partition_types"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "RaggedTensorToTensor", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
Esempio n. 14
0
def tensor_summary(tensor,
                   description="",
                   labels=[],
                   display_name="",
                   name=None):
    r"""Outputs a `Summary` protocol buffer with a tensor.

  This op is being phased out in favor of TensorSummaryV2, which lets callers pass
  a tag as well as a serialized SummaryMetadata proto string that contains
  plugin-specific data. We will keep this op to maintain backwards compatibility.

  Args:
    tensor: A `Tensor`. A tensor to serialize.
    description: An optional `string`. Defaults to `""`.
      A json-encoded SummaryDescription proto.
    labels: An optional list of `strings`. Defaults to `[]`.
      An unused list of strings.
    display_name: An optional `string`. Defaults to `""`. An unused string.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        try:
            _result = pywrap_tfe.TFE_Py_FastPathExecute(
                _ctx, "TensorSummary", name, tensor, "description",
                description, "labels", labels, "display_name", display_name)
            return _result
        except _core._NotOkStatusException as e:
            _ops.raise_from_not_ok_status(e, name)
        except _core._FallbackException:
            pass
        try:
            return tensor_summary_eager_fallback(tensor,
                                                 description=description,
                                                 labels=labels,
                                                 display_name=display_name,
                                                 name=name,
                                                 ctx=_ctx)
        except _core._SymbolicException:
            pass  # Add nodes to the TensorFlow graph.
    # Add nodes to the TensorFlow graph.
    if description is None:
        description = ""
    description = _execute.make_str(description, "description")
    if labels is None:
        labels = []
    if not isinstance(labels, (list, tuple)):
        raise TypeError("Expected list for 'labels' argument to "
                        "'tensor_summary' Op, not %r." % labels)
    labels = [_execute.make_str(_s, "labels") for _s in labels]
    if display_name is None:
        display_name = ""
    display_name = _execute.make_str(display_name, "display_name")
    _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "TensorSummary",
        tensor=tensor,
        description=description,
        labels=labels,
        display_name=display_name,
        name=name)
    _result = _outputs[:]
    if _execute.must_record_gradient():
        _attrs = ("T", _op._get_attr_type("T"), "description",
                  _op.get_attr("description"), "labels",
                  _op.get_attr("labels"), "display_name",
                  _op.get_attr("display_name"))
        _inputs_flat = _op.inputs
        _execute.record_gradient("TensorSummary", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
Esempio n. 15
0
def gradient_trees_prediction(tree_ensemble_handle,
                              seed,
                              dense_float_features,
                              sparse_float_feature_indices,
                              sparse_float_feature_values,
                              sparse_float_feature_shapes,
                              sparse_int_feature_indices,
                              sparse_int_feature_values,
                              sparse_int_feature_shapes,
                              learner_config,
                              apply_dropout,
                              apply_averaging,
                              center_bias,
                              reduce_dim,
                              use_locking=False,
                              name=None):
    r"""Runs multiple additive regression forests predictors on input instances

  and computes the final prediction for each class.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      The handle to the tree ensemble.
    seed: A `Tensor` of type `int64`. random seed to be used for dropout.
    dense_float_features: A list of `Tensor` objects with type `float32`.
      Rank 2 Tensors containing dense float feature values.
    sparse_float_feature_indices: A list of `Tensor` objects with type `int64`.
      Rank 2 Tensors containing sparse float indices.
    sparse_float_feature_values: A list with the same length as `sparse_float_feature_indices` of `Tensor` objects with type `float32`.
      Rank 1 Tensors containing sparse float values.
    sparse_float_feature_shapes: A list with the same length as `sparse_float_feature_indices` of `Tensor` objects with type `int64`.
      Rank 1 Tensors containing sparse float shapes.
    sparse_int_feature_indices: A list of `Tensor` objects with type `int64`.
      Rank 2 Tensors containing sparse int indices.
    sparse_int_feature_values: A list with the same length as `sparse_int_feature_indices` of `Tensor` objects with type `int64`.
      Rank 1 Tensors containing sparse int values.
    sparse_int_feature_shapes: A list with the same length as `sparse_int_feature_indices` of `Tensor` objects with type `int64`.
      Rank 1 Tensors containing sparse int shapes.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto. Prediction
      ops for now uses only LearningRateDropoutDrivenConfig config from the learner.
    apply_dropout: A `bool`. whether to apply dropout during prediction.
    apply_averaging: A `bool`.
      whether averaging of tree ensembles should take place. If set
      to true, will be based on AveragingConfig from learner_config.
    center_bias: A `bool`.
    reduce_dim: A `bool`.
      whether to reduce the dimension (legacy impl) or not.
    use_locking: An optional `bool`. Defaults to `False`.
      Whether to use locking.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (predictions, drop_out_tree_indices_weights).

    predictions: A `Tensor` of type `float32`. Rank 2 Tensor containing predictions per example per class.
    drop_out_tree_indices_weights: A `Tensor` of type `float32`. Tensor of Rank 2 containing dropped trees indices
      and original weights of those trees during prediction.
  """
    if not isinstance(dense_float_features, (list, tuple)):
        raise TypeError("Expected list for 'dense_float_features' argument to "
                        "'gradient_trees_prediction' Op, not %r." %
                        dense_float_features)
    _attr_num_dense_float_features = len(dense_float_features)
    if not isinstance(sparse_float_feature_indices, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_float_feature_indices' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_float_feature_indices)
    _attr_num_sparse_float_features = len(sparse_float_feature_indices)
    if not isinstance(sparse_float_feature_values, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_float_feature_values' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_float_feature_values)
    if len(sparse_float_feature_values) != _attr_num_sparse_float_features:
        raise ValueError(
            "List argument 'sparse_float_feature_values' to 'gradient_trees_prediction' Op with length %d "
            "must match length %d of argument 'sparse_float_feature_indices'."
            % (len(sparse_float_feature_values),
               _attr_num_sparse_float_features))
    if not isinstance(sparse_float_feature_shapes, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_float_feature_shapes' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_float_feature_shapes)
    if len(sparse_float_feature_shapes) != _attr_num_sparse_float_features:
        raise ValueError(
            "List argument 'sparse_float_feature_shapes' to 'gradient_trees_prediction' Op with length %d "
            "must match length %d of argument 'sparse_float_feature_indices'."
            % (len(sparse_float_feature_shapes),
               _attr_num_sparse_float_features))
    if not isinstance(sparse_int_feature_indices, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_int_feature_indices' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_int_feature_indices)
    _attr_num_sparse_int_features = len(sparse_int_feature_indices)
    if not isinstance(sparse_int_feature_values, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_int_feature_values' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_int_feature_values)
    if len(sparse_int_feature_values) != _attr_num_sparse_int_features:
        raise ValueError(
            "List argument 'sparse_int_feature_values' to 'gradient_trees_prediction' Op with length %d "
            "must match length %d of argument 'sparse_int_feature_indices'." %
            (len(sparse_int_feature_values), _attr_num_sparse_int_features))
    if not isinstance(sparse_int_feature_shapes, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_int_feature_shapes' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_int_feature_shapes)
    if len(sparse_int_feature_shapes) != _attr_num_sparse_int_features:
        raise ValueError(
            "List argument 'sparse_int_feature_shapes' to 'gradient_trees_prediction' Op with length %d "
            "must match length %d of argument 'sparse_int_feature_indices'." %
            (len(sparse_int_feature_shapes), _attr_num_sparse_int_features))
    learner_config = _execute.make_str(learner_config, "learner_config")
    apply_dropout = _execute.make_bool(apply_dropout, "apply_dropout")
    apply_averaging = _execute.make_bool(apply_averaging, "apply_averaging")
    center_bias = _execute.make_bool(center_bias, "center_bias")
    reduce_dim = _execute.make_bool(reduce_dim, "reduce_dim")
    if use_locking is None:
        use_locking = False
    use_locking = _execute.make_bool(use_locking, "use_locking")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper(
            "GradientTreesPrediction",
            tree_ensemble_handle=tree_ensemble_handle,
            seed=seed,
            dense_float_features=dense_float_features,
            sparse_float_feature_indices=sparse_float_feature_indices,
            sparse_float_feature_values=sparse_float_feature_values,
            sparse_float_feature_shapes=sparse_float_feature_shapes,
            sparse_int_feature_indices=sparse_int_feature_indices,
            sparse_int_feature_values=sparse_int_feature_values,
            sparse_int_feature_shapes=sparse_int_feature_shapes,
            learner_config=learner_config,
            apply_dropout=apply_dropout,
            apply_averaging=apply_averaging,
            center_bias=center_bias,
            reduce_dim=reduce_dim,
            use_locking=use_locking,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("learner_config", _op.get_attr("learner_config"),
                  "num_dense_float_features",
                  _op.get_attr("num_dense_float_features"),
                  "num_sparse_float_features",
                  _op.get_attr("num_sparse_float_features"),
                  "num_sparse_int_features",
                  _op.get_attr("num_sparse_int_features"), "use_locking",
                  _op.get_attr("use_locking"), "apply_dropout",
                  _op.get_attr("apply_dropout"), "apply_averaging",
                  _op.get_attr("apply_averaging"), "center_bias",
                  _op.get_attr("center_bias"), "reduce_dim",
                  _op.get_attr("reduce_dim"))
    else:
        tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle,
                                                      _dtypes.resource)
        seed = _ops.convert_to_tensor(seed, _dtypes.int64)
        dense_float_features = _ops.convert_n_to_tensor(
            dense_float_features, _dtypes.float32)
        sparse_float_feature_indices = _ops.convert_n_to_tensor(
            sparse_float_feature_indices, _dtypes.int64)
        sparse_float_feature_values = _ops.convert_n_to_tensor(
            sparse_float_feature_values, _dtypes.float32)
        sparse_float_feature_shapes = _ops.convert_n_to_tensor(
            sparse_float_feature_shapes, _dtypes.int64)
        sparse_int_feature_indices = _ops.convert_n_to_tensor(
            sparse_int_feature_indices, _dtypes.int64)
        sparse_int_feature_values = _ops.convert_n_to_tensor(
            sparse_int_feature_values, _dtypes.int64)
        sparse_int_feature_shapes = _ops.convert_n_to_tensor(
            sparse_int_feature_shapes, _dtypes.int64)
        _inputs_flat = [
            tree_ensemble_handle, seed
        ] + list(dense_float_features) + list(
            sparse_float_feature_indices
        ) + list(sparse_float_feature_values) + list(
            sparse_float_feature_shapes
        ) + list(sparse_int_feature_indices) + list(
            sparse_int_feature_values) + list(sparse_int_feature_shapes)
        _attrs = ("learner_config", learner_config, "num_dense_float_features",
                  _attr_num_dense_float_features, "num_sparse_float_features",
                  _attr_num_sparse_float_features, "num_sparse_int_features",
                  _attr_num_sparse_int_features, "use_locking", use_locking,
                  "apply_dropout", apply_dropout, "apply_averaging",
                  apply_averaging, "center_bias", center_bias, "reduce_dim",
                  reduce_dim)
        _result = _execute.execute(b"GradientTreesPrediction",
                                   2,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("GradientTreesPrediction", _inputs_flat, _attrs,
                             _result, name)
    _result = _GradientTreesPredictionOutput._make(_result)
    return _result
Esempio n. 16
0
def unbatch(batched_tensor, batch_index, id, timeout_micros, container="", shared_name="", name=None):
  r"""Reverses the operation of Batch for a single output Tensor.

  An instance of Unbatch either receives an empty batched_tensor, in which case it
  asynchronously waits until the values become available from a concurrently
  running instance of Unbatch with the same container and shared_name, or receives
  a non-empty batched_tensor in which case it finalizes all other concurrently
  running instances and outputs its own element from the batch.

  batched_tensor: The possibly transformed output of Batch. The size of the first
   dimension should remain unchanged by the transformations for the operation to
   work.
  batch_index: The matching batch_index obtained from Batch.
  id: The id scalar emitted by Batch.
  unbatched_tensor: The Tensor corresponding to this execution.
  timeout_micros: Maximum amount of time (in microseconds) to wait to receive the
   batched input tensor associated with a given invocation of the op.
  container: Container to control resource sharing.
  shared_name: Instances of Unbatch with the same container and shared_name are
   assumed to possibly belong to the same batch. If left empty, the op name will
   be used as the shared name.

  Args:
    batched_tensor: A `Tensor`.
    batch_index: A `Tensor` of type `int64`.
    id: A `Tensor` of type `int64`.
    timeout_micros: An `int`.
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `batched_tensor`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name, "Unbatch",
        name, _ctx.post_execution_callbacks, batched_tensor, batch_index, id,
        "timeout_micros", timeout_micros, "container", container,
        "shared_name", shared_name)
      return _result
    except _core._FallbackException:
      try:
        return unbatch_eager_fallback(
            batched_tensor, batch_index, id, timeout_micros=timeout_micros,
            container=container, shared_name=shared_name, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  timeout_micros = _execute.make_int(timeout_micros, "timeout_micros")
  if container is None:
    container = ""
  container = _execute.make_str(container, "container")
  if shared_name is None:
    shared_name = ""
  shared_name = _execute.make_str(shared_name, "shared_name")
  _, _, _op = _op_def_lib._apply_op_helper(
        "Unbatch", batched_tensor=batched_tensor, batch_index=batch_index,
                   id=id, timeout_micros=timeout_micros, container=container,
                   shared_name=shared_name, name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("timeout_micros", _op.get_attr("timeout_micros"), "container",
            _op.get_attr("container"), "shared_name",
            _op.get_attr("shared_name"), "T", _op._get_attr_type("T"))
  _execute.record_gradient(
      "Unbatch", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 17
0
def enter(data,
          frame_name,
          is_constant=False,
          parallel_iterations=10,
          name=None):
    r"""Creates or finds a child frame, and makes `data` available to the child frame.

  This op is used together with `Exit` to create loops in the graph.

  The unique `frame_name` is used by the `Executor` to identify frames. If

  `is_constant` is true, `output` is a constant in the child frame; otherwise

  it may be changed in the child frame. At most `parallel_iterations` iterations

  are run in parallel in the child frame.

  Args:
    data: A `Tensor`. The tensor to be made available to the child frame.
    frame_name: A `string`. The name of the child frame.
    is_constant: An optional `bool`. Defaults to `False`.
      If true, the output is constant within the child frame.
    parallel_iterations: An optional `int`. Defaults to `10`.
      The number of iterations allowed to run in parallel.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `data`.
  """
    frame_name = _execute.make_str(frame_name, "frame_name")
    if is_constant is None:
        is_constant = False
    is_constant = _execute.make_bool(is_constant, "is_constant")
    if parallel_iterations is None:
        parallel_iterations = 10
    parallel_iterations = _execute.make_int(parallel_iterations,
                                            "parallel_iterations")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper(
            "Enter",
            data=data,
            frame_name=frame_name,
            is_constant=is_constant,
            parallel_iterations=parallel_iterations,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("T", _op.get_attr("T"), "frame_name",
                  _op.get_attr("frame_name"), "is_constant",
                  _op.get_attr("is_constant"), "parallel_iterations",
                  _op.get_attr("parallel_iterations"))
    else:
        _attr_T, (data, ) = _execute.args_to_matching_eager([data], _ctx)
        _inputs_flat = [data]
        _attrs = ("T", _attr_T, "frame_name", frame_name, "is_constant",
                  is_constant, "parallel_iterations", parallel_iterations)
        _result = _execute.execute(b"Enter",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("Enter", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
def big_query_reader(project_id,
                     dataset_id,
                     table_id,
                     columns,
                     timestamp_millis,
                     container="",
                     shared_name="",
                     test_end_point="",
                     name=None):
    r"""A Reader that outputs rows from a BigQuery table as tensorflow Examples.

  Args:
    project_id: A `string`. GCP project ID.
    dataset_id: A `string`. BigQuery Dataset ID.
    table_id: A `string`. Table to read.
    columns: A list of `strings`.
      List of columns to read. Leave empty to read all columns.
    timestamp_millis: An `int`.
      Table snapshot timestamp in millis since epoch. Relative
      (negative or zero) snapshot times are not allowed. For more details, see
      'Table Decorators' in BigQuery docs.
    container: An optional `string`. Defaults to `""`.
      If non-empty, this reader is placed in the given container.
      Otherwise, a default container is used.
    shared_name: An optional `string`. Defaults to `""`.
      If non-empty, this reader is named in the given bucket
      with this shared_name. Otherwise, the node name is used instead.
    test_end_point: An optional `string`. Defaults to `""`.
      Do not use. For testing purposes only.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type mutable `string`. The handle to reference the Reader.
  """
    _ctx = _context._context
    if _ctx is not None and _ctx._eager_context.is_eager:
        raise RuntimeError(
            "big_query_reader op does not support eager execution. Arg 'reader_handle' is a ref."
        )
    # Add nodes to the TensorFlow graph.
    project_id = _execute.make_str(project_id, "project_id")
    dataset_id = _execute.make_str(dataset_id, "dataset_id")
    table_id = _execute.make_str(table_id, "table_id")
    if not isinstance(columns, (list, tuple)):
        raise TypeError("Expected list for 'columns' argument to "
                        "'big_query_reader' Op, not %r." % columns)
    columns = [_execute.make_str(_s, "columns") for _s in columns]
    timestamp_millis = _execute.make_int(timestamp_millis, "timestamp_millis")
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    if test_end_point is None:
        test_end_point = ""
    test_end_point = _execute.make_str(test_end_point, "test_end_point")
    try:
        _, _, _op = _op_def_lib._apply_op_helper(
            "BigQueryReader",
            project_id=project_id,
            dataset_id=dataset_id,
            table_id=table_id,
            columns=columns,
            timestamp_millis=timestamp_millis,
            container=container,
            shared_name=shared_name,
            test_end_point=test_end_point,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(big_query_reader,
                                    project_id=project_id,
                                    dataset_id=dataset_id,
                                    table_id=table_id,
                                    columns=columns,
                                    timestamp_millis=timestamp_millis,
                                    container=container,
                                    shared_name=shared_name,
                                    test_end_point=test_end_point,
                                    name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("container", _op.get_attr("container"), "shared_name",
              _op.get_attr("shared_name"), "project_id",
              _op.get_attr("project_id"), "dataset_id",
              _op.get_attr("dataset_id"), "table_id", _op.get_attr("table_id"),
              "columns", _op.get_attr("columns"), "timestamp_millis",
              _op.get_attr("timestamp_millis"), "test_end_point",
              _op.get_attr("test_end_point"))
    _execute.record_gradient("BigQueryReader", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Esempio n. 19
0
def grow_tree_ensemble(tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed, max_tree_depth, weak_learner_type, partition_ids, gains, splits, learner_config, center_bias, name=None):
  r"""Grows the tree ensemble by either adding a layer to the last tree being grown

  or by starting a new tree.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the ensemble variable.
    stamp_token: A `Tensor` of type `int64`.
      Stamp token for validating operation consistency.
    next_stamp_token: A `Tensor` of type `int64`.
      Stamp token to be used for the next iteration.
    learning_rate: A `Tensor` of type `float32`. Scalar learning rate.
    dropout_seed: A `Tensor` of type `int64`.
    max_tree_depth: A `Tensor` of type `int32`.
    weak_learner_type: A `Tensor` of type `int32`.
      The type of weak learner to use.
    partition_ids: A list of `Tensor` objects with type `int32`.
      List of Rank 1 Tensors containing partition Id per candidate.
    gains: A list with the same length as `partition_ids` of `Tensor` objects with type `float32`.
      List of Rank 1 Tensors containing gains per candidate.
    splits: A list with the same length as `partition_ids` of `Tensor` objects with type `string`.
      List of Rank 1 Tensors containing serialized SplitInfo protos per candidate.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto.
    center_bias: A `bool`.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if not isinstance(partition_ids, (list, tuple)):
      raise TypeError(
          "Expected list for 'partition_ids' argument to "
          "'grow_tree_ensemble' Op, not %r." % partition_ids)
    _attr_num_handlers = len(partition_ids)
    if not isinstance(gains, (list, tuple)):
      raise TypeError(
          "Expected list for 'gains' argument to "
          "'grow_tree_ensemble' Op, not %r." % gains)
    if len(gains) != _attr_num_handlers:
      raise ValueError(
          "List argument 'gains' to 'grow_tree_ensemble' Op with length %d "
          "must match length %d of argument 'partition_ids'." %
          (len(gains), _attr_num_handlers))
    if not isinstance(splits, (list, tuple)):
      raise TypeError(
          "Expected list for 'splits' argument to "
          "'grow_tree_ensemble' Op, not %r." % splits)
    if len(splits) != _attr_num_handlers:
      raise ValueError(
          "List argument 'splits' to 'grow_tree_ensemble' Op with length %d "
          "must match length %d of argument 'partition_ids'." %
          (len(splits), _attr_num_handlers))
    learner_config = _execute.make_str(learner_config, "learner_config")
    center_bias = _execute.make_bool(center_bias, "center_bias")
    _, _, _op = _op_def_lib._apply_op_helper(
        "GrowTreeEnsemble", tree_ensemble_handle=tree_ensemble_handle,
        stamp_token=stamp_token, next_stamp_token=next_stamp_token,
        learning_rate=learning_rate, dropout_seed=dropout_seed,
        max_tree_depth=max_tree_depth, weak_learner_type=weak_learner_type,
        partition_ids=partition_ids, gains=gains, splits=splits,
        learner_config=learner_config, center_bias=center_bias, name=name)
    return _op
    _result = None
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "GrowTreeEnsemble", name, _ctx._post_execution_callbacks,
        tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate,
        dropout_seed, max_tree_depth, weak_learner_type, partition_ids, gains,
        splits, "learner_config", learner_config, "center_bias", center_bias)
      return _result
    except _core._FallbackException:
      return grow_tree_ensemble_eager_fallback(
          tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate,
          dropout_seed, max_tree_depth, weak_learner_type, partition_ids,
          gains, splits, learner_config=learner_config,
          center_bias=center_bias, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 20
0
def center_tree_ensemble_bias(tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates, learner_config, centering_epsilon=0.01, name=None):
  r"""Centers the tree ensemble bias before adding trees based on feature splits.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the ensemble variable.
    stamp_token: A `Tensor` of type `int64`.
      Stamp token for validating operation consistency.
    next_stamp_token: A `Tensor` of type `int64`.
      Stamp token to be used for the next iteration.
    delta_updates: A `Tensor` of type `float32`.
      Rank 1 Tensor containing delta updates per bias dimension.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto.
    centering_epsilon: An optional `float`. Defaults to `0.01`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `bool`.
    Scalar indicating whether more centering is needed.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    learner_config = _execute.make_str(learner_config, "learner_config")
    if centering_epsilon is None:
      centering_epsilon = 0.01
    centering_epsilon = _execute.make_float(centering_epsilon, "centering_epsilon")
    _, _, _op = _op_def_lib._apply_op_helper(
        "CenterTreeEnsembleBias", tree_ensemble_handle=tree_ensemble_handle,
        stamp_token=stamp_token, next_stamp_token=next_stamp_token,
        delta_updates=delta_updates, learner_config=learner_config,
        centering_epsilon=centering_epsilon, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("learner_config", _op.get_attr("learner_config"),
              "centering_epsilon", _op.get_attr("centering_epsilon"))
    _execute.record_gradient(
      "CenterTreeEnsembleBias", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "CenterTreeEnsembleBias", name, _ctx._post_execution_callbacks,
        tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates,
        "learner_config", learner_config, "centering_epsilon",
        centering_epsilon)
      return _result
    except _core._FallbackException:
      return center_tree_ensemble_bias_eager_fallback(
          tree_ensemble_handle, stamp_token, next_stamp_token, delta_updates,
          learner_config=learner_config, centering_epsilon=centering_epsilon,
          name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 21
0
def unbatch_grad(original_input, batch_index, grad, id, container="", shared_name="", name=None):
  r"""Gradient of Unbatch.

  Acts like Batch but using the given batch_index index of batching things as they
  become available. This ensures that the gradients are propagated back in the
  same session which did the forward pass.

  original_input: The input to the Unbatch operation this is the gradient of.
  batch_index: The batch_index given to the Unbatch operation this is the gradient
  of.
  grad: The downstream gradient.
  id: The id scalar emitted by Batch.
  batched_grad: The return value, either an empty tensor or the batched gradient.
  container: Container to control resource sharing.
  shared_name: Instances of UnbatchGrad with the same container and shared_name
   are assumed to possibly belong to the same batch. If left empty, the op name
   will be used as the shared name.

  Args:
    original_input: A `Tensor`.
    batch_index: A `Tensor` of type `int64`.
    grad: A `Tensor`. Must have the same type as `original_input`.
    id: A `Tensor` of type `int64`.
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `original_input`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "UnbatchGrad", name, _ctx.post_execution_callbacks, original_input,
        batch_index, grad, id, "container", container, "shared_name",
        shared_name)
      return _result
    except _core._FallbackException:
      try:
        return unbatch_grad_eager_fallback(
            original_input, batch_index, grad, id, container=container,
            shared_name=shared_name, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  if container is None:
    container = ""
  container = _execute.make_str(container, "container")
  if shared_name is None:
    shared_name = ""
  shared_name = _execute.make_str(shared_name, "shared_name")
  _, _, _op = _op_def_lib._apply_op_helper(
        "UnbatchGrad", original_input=original_input, batch_index=batch_index,
                       grad=grad, id=id, container=container,
                       shared_name=shared_name, name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("container", _op.get_attr("container"), "shared_name",
            _op.get_attr("shared_name"), "T", _op._get_attr_type("T"))
  _execute.record_gradient(
      "UnbatchGrad", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 22
0
def tree_predictions_v4(tree_handle,
                        input_data,
                        sparse_input_indices,
                        sparse_input_values,
                        sparse_input_shape,
                        input_spec,
                        params,
                        name=None):
    r"""Outputs the predictions for the given input data.

  Args:
    tree_handle: A `Tensor` of type `resource`. The handle to the tree.
    input_data: A `Tensor` of type `float32`.
      The training batch's features as a 2-d tensor; `input_data[i][j]`
      gives the j-th feature of the i-th input.
    sparse_input_indices: A `Tensor` of type `int64`.
      The indices tensor from the SparseTensor input.
    sparse_input_values: A `Tensor` of type `float32`.
      The values tensor from the SparseTensor input.
    sparse_input_shape: A `Tensor` of type `int64`.
      The shape tensor from the SparseTensor input.
    input_spec: A `string`.
    params: A `string`. A serialized TensorForestParams proto.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (predictions, tree_paths).

    predictions: A `Tensor` of type `float32`. `predictions[i][j]` is the probability that input i is class j.
    tree_paths: A `Tensor` of type `string`. `tree_paths[i]` is a serialized TreePath proto for example i.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        input_spec = _execute.make_str(input_spec, "input_spec")
        params = _execute.make_str(params, "params")
        _, _, _op = _op_def_lib._apply_op_helper(
            "TreePredictionsV4",
            tree_handle=tree_handle,
            input_data=input_data,
            sparse_input_indices=sparse_input_indices,
            sparse_input_values=sparse_input_values,
            sparse_input_shape=sparse_input_shape,
            input_spec=input_spec,
            params=params,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("input_spec", _op.get_attr("input_spec"), "params",
                  _op.get_attr("params"))
        _execute.record_gradient("TreePredictionsV4", _inputs_flat, _attrs,
                                 _result, name)
        _result = _TreePredictionsV4Output._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "TreePredictionsV4", name, _ctx._post_execution_callbacks,
                tree_handle, input_data, sparse_input_indices,
                sparse_input_values, sparse_input_shape, "input_spec",
                input_spec, "params", params)
            _result = _TreePredictionsV4Output._make(_result)
            return _result
        except _core._FallbackException:
            return tree_predictions_v4_eager_fallback(tree_handle,
                                                      input_data,
                                                      sparse_input_indices,
                                                      sparse_input_values,
                                                      sparse_input_shape,
                                                      input_spec=input_spec,
                                                      params=params,
                                                      name=name,
                                                      ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 23
0
def _print(input, data, message="", first_n=-1, summarize=3, name=None):
    r"""Prints a list of tensors.

  Passes `input` through to `output` and prints `data` when evaluating.

  Args:
    input: A `Tensor`. The tensor passed to `output`
    data: A list of `Tensor` objects.
      A list of tensors to print out when op is evaluated.
    message: An optional `string`. Defaults to `""`.
      A string, prefix of the error message.
    first_n: An optional `int`. Defaults to `-1`.
      Only log `first_n` number of times. -1 disables logging.
    summarize: An optional `int`. Defaults to `3`.
      Only print this many entries of each tensor.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        try:
            _result = pywrap_tfe.TFE_Py_FastPathExecute(
                _ctx, "Print", name, input, data, "message", message,
                "first_n", first_n, "summarize", summarize)
            return _result
        except _core._NotOkStatusException as e:
            _ops.raise_from_not_ok_status(e, name)
        except _core._FallbackException:
            pass
        try:
            return _print_eager_fallback(input,
                                         data,
                                         message=message,
                                         first_n=first_n,
                                         summarize=summarize,
                                         name=name,
                                         ctx=_ctx)
        except _core._SymbolicException:
            pass  # Add nodes to the TensorFlow graph.
    # Add nodes to the TensorFlow graph.
    if message is None:
        message = ""
    message = _execute.make_str(message, "message")
    if first_n is None:
        first_n = -1
    first_n = _execute.make_int(first_n, "first_n")
    if summarize is None:
        summarize = 3
    summarize = _execute.make_int(summarize, "summarize")
    _, _, _op, _outputs = _op_def_library._apply_op_helper("Print",
                                                           input=input,
                                                           data=data,
                                                           message=message,
                                                           first_n=first_n,
                                                           summarize=summarize,
                                                           name=name)
    _result = _outputs[:]
    if _execute.must_record_gradient():
        _attrs = ("T", _op._get_attr_type("T"), "U", _op.get_attr("U"),
                  "message", _op.get_attr("message"), "first_n",
                  _op._get_attr_int("first_n"), "summarize",
                  _op._get_attr_int("summarize"))
        _inputs_flat = _op.inputs
        _execute.record_gradient("Print", _inputs_flat, _attrs, _result)
    _result, = _result
    return _result
Esempio n. 24
0
def stateful_partitioned_call(args, Tout, f, config="", config_proto="", executor_type="", name=None):
  r"""returns `f(inputs)`, where `f`'s body is placed and partitioned.

  Args:
    args: A list of `Tensor` objects. A list of input tensors.
    Tout: A list of `tf.DTypes`. A list of output types.
    f: A function decorated with @Defun.
            A function that takes 'args', a list of tensors, and returns 'output',
            another list of tensors. Input and output types are specified by 'Tin'
            and 'Tout'. The function body of f will be placed and partitioned across
            devices, setting this op apart from the regular Call op. This op is
            stateful.
    config: An optional `string`. Defaults to `""`.
    config_proto: An optional `string`. Defaults to `""`.
    executor_type: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Tout`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "StatefulPartitionedCall",
        name, tld.op_callbacks, args, "Tout", Tout, "f", f, "config", config,
        "config_proto", config_proto, "executor_type", executor_type)
      return _result
    except _core._FallbackException:
      try:
        return stateful_partitioned_call_eager_fallback(
            args, Tout=Tout, f=f, config=config, config_proto=config_proto,
            executor_type=executor_type, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
  # Add nodes to the TensorFlow graph.
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'stateful_partitioned_call' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if config is None:
    config = ""
  config = _execute.make_str(config, "config")
  if config_proto is None:
    config_proto = ""
  config_proto = _execute.make_str(config_proto, "config_proto")
  if executor_type is None:
    executor_type = ""
  executor_type = _execute.make_str(executor_type, "executor_type")
  _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "StatefulPartitionedCall", args=args, Tout=Tout, f=f, config=config,
                                   config_proto=config_proto,
                                   executor_type=executor_type, name=name)
  _result = _outputs[:]
  if not _result:
    return _op
  if _execute.must_record_gradient():
    _attrs = ("Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f",
              _op.get_attr("f"), "config", _op.get_attr("config"),
              "config_proto", _op.get_attr("config_proto"), "executor_type",
              _op.get_attr("executor_type"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "StatefulPartitionedCall", _inputs_flat, _attrs, _result)
  return _result
Esempio n. 25
0
def image_projective_transform(images, transforms, interpolation, name=None):
    r"""Applies the given transform to each of the images.

  Input `image` is a `Tensor` in NHWC format (where the axes are image in batch,
  rows, columns, and channels. Input `transforms` is a num_images x 8 or 1 x 8
  matrix, where each row corresponds to a 3 x 3 projective transformation matrix,
  with the last entry assumed to be 1. If there is one row, the same
  transformation will be applied to all images.

  If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps
  the *output* point `(x, y)` to a transformed *input* point
  `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
  `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input
  image, the output pixel is set to 0. The output is the same size as the input,

  Args:
    images: A `Tensor`. Must be one of the following types: `uint8`, `int32`, `int64`, `half`, `float32`, `float64`.
      4D `Tensor`, input image(s) in NHWC format.
    transforms: A `Tensor` of type `float32`.
      2D `Tensor`, projective transform(s) to apply to the image(s).
    interpolation: A `string`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `images`.
    4D `Tensor`, image(s) in NHWC format, generated by applying
    the `transforms` to the `images`. Satisfies the description above.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        interpolation = _execute.make_str(interpolation, "interpolation")
        _, _, _op = _op_def_lib._apply_op_helper("ImageProjectiveTransform",
                                                 images=images,
                                                 transforms=transforms,
                                                 interpolation=interpolation,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("dtype", _op.get_attr("dtype"), "interpolation",
                  _op.get_attr("interpolation"))
        _execute.record_gradient("ImageProjectiveTransform", _inputs_flat,
                                 _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "ImageProjectiveTransform", name,
                _ctx._post_execution_callbacks, images, transforms,
                "interpolation", interpolation)
            return _result
        except _core._FallbackException:
            return image_projective_transform_eager_fallback(
                images,
                transforms,
                interpolation=interpolation,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 26
0
def grow_tree_v4(tree_handle, stats_handle, finished_nodes, params, name=None):
  r"""Grows the tree for finished nodes and allocates waiting nodes.

  Args:
    tree_handle: A `Tensor` of type `resource`. The handle to the tree.
    stats_handle: A `Tensor` of type `resource`. The handle to the stats.
    finished_nodes: A `Tensor` of type `int32`.
      A 1-d Tensor of finished node ids from ProcessInput.
    params: A `string`. A serialized TensorForestParams proto.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "GrowTreeV4", name, _ctx._post_execution_callbacks, tree_handle,
        stats_handle, finished_nodes, "params", params)
      return _result
    except _core._FallbackException:
      try:
        return grow_tree_v4_eager_fallback(
            tree_handle, stats_handle, finished_nodes, params=params,
            name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              grow_tree_v4, tree_handle=tree_handle,
                            stats_handle=stats_handle,
                            finished_nodes=finished_nodes, params=params,
                            name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  params = _execute.make_str(params, "params")
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "GrowTreeV4", tree_handle=tree_handle, stats_handle=stats_handle,
                      finished_nodes=finished_nodes, params=params, name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          grow_tree_v4, tree_handle=tree_handle, stats_handle=stats_handle,
                        finished_nodes=finished_nodes, params=params,
                        name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  return _op
  _result = None
  return _result
Esempio n. 27
0
def fused_conv2d_bias_activation_eager_fallback(conv_input,
                                                filter,
                                                bias,
                                                side_input,
                                                conv_input_scale,
                                                side_input_scale,
                                                strides,
                                                padding,
                                                data_format="NHWC",
                                                filter_format="HWIO",
                                                activation_mode="Relu",
                                                dilations=[1, 1, 1, 1],
                                                name=None,
                                                ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function fused_conv2d_bias_activation
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(strides, (list, tuple)):
        raise TypeError("Expected list for 'strides' argument to "
                        "'fused_conv2d_bias_activation' Op, not %r." % strides)
    strides = [_execute.make_int(_i, "strides") for _i in strides]
    padding = _execute.make_str(padding, "padding")
    if data_format is None:
        data_format = "NHWC"
    data_format = _execute.make_str(data_format, "data_format")
    if filter_format is None:
        filter_format = "HWIO"
    filter_format = _execute.make_str(filter_format, "filter_format")
    if activation_mode is None:
        activation_mode = "Relu"
    activation_mode = _execute.make_str(activation_mode, "activation_mode")
    if dilations is None:
        dilations = [1, 1, 1, 1]
    if not isinstance(dilations, (list, tuple)):
        raise TypeError("Expected list for 'dilations' argument to "
                        "'fused_conv2d_bias_activation' Op, not %r." %
                        dilations)
    dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
    _attr_T, _inputs_T = _execute.args_to_matching_eager(
        [conv_input, filter, side_input], _ctx)
    (conv_input, filter, side_input) = _inputs_T
    _attr_Tbias, (bias, ) = _execute.args_to_matching_eager([bias], _ctx)
    conv_input_scale = _ops.convert_to_tensor(conv_input_scale,
                                              _dtypes.float32)
    side_input_scale = _ops.convert_to_tensor(side_input_scale,
                                              _dtypes.float32)
    _inputs_flat = [
        conv_input, filter, bias, side_input, conv_input_scale,
        side_input_scale
    ]
    _attrs = ("T", _attr_T, "Tbias", _attr_Tbias, "strides", strides,
              "padding", padding, "data_format", data_format, "filter_format",
              filter_format, "activation_mode", activation_mode, "dilations",
              dilations)
    _result = _execute.execute(b"FusedConv2DBiasActivation",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("FusedConv2DBiasActivation", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
Esempio n. 28
0
def process_input_v4(tree_handle, stats_handle, input_data, sparse_input_indices, sparse_input_values, sparse_input_shape, input_labels, input_weights, leaf_ids, random_seed, input_spec, params, name=None):
  r"""Add labels to stats after traversing the tree for each example.

  Outputs node ids that are finished.

  Args:
    tree_handle: A `Tensor` of type `resource`. The handle to the tree.
    stats_handle: A `Tensor` of type `resource`. The handle to the stats.
    input_data: A `Tensor` of type `float32`.
      The training batch's features as a 2-d tensor; `input_data[i][j]`
      gives the j-th feature of the i-th input.
    sparse_input_indices: A `Tensor` of type `int64`.
      The indices tensor from the SparseTensor input.
    sparse_input_values: A `Tensor` of type `float32`.
      The values tensor from the SparseTensor input.
    sparse_input_shape: A `Tensor` of type `int64`.
      The shape tensor from the SparseTensor input.
    input_labels: A `Tensor` of type `float32`.
      The training batch's labels as a 1 or 2-d tensor.
      'input_labels[i][j]' gives the j-th label/target for the i-th input.
    input_weights: A `Tensor` of type `float32`.
      The training batch's weights as a 1-d tensor.
      'input_weights[i]' gives the weight for the i-th input.
    leaf_ids: A `Tensor` of type `int32`.
      `leaf_ids[i]` is the leaf id for input i.
    random_seed: An `int`.
    input_spec: A `string`.
    params: A `string`. A serialized TensorForestParams proto.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `int32`.
    A 1-d tensor of node ids that have finished and are ready to
    grow.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "ProcessInputV4", name, _ctx._post_execution_callbacks, tree_handle,
        stats_handle, input_data, sparse_input_indices, sparse_input_values,
        sparse_input_shape, input_labels, input_weights, leaf_ids,
        "random_seed", random_seed, "input_spec", input_spec, "params",
        params)
      return _result
    except _core._FallbackException:
      try:
        return process_input_v4_eager_fallback(
            tree_handle, stats_handle, input_data, sparse_input_indices,
            sparse_input_values, sparse_input_shape, input_labels,
            input_weights, leaf_ids, random_seed=random_seed,
            input_spec=input_spec, params=params, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              process_input_v4, tree_handle=tree_handle,
                                stats_handle=stats_handle,
                                input_data=input_data,
                                sparse_input_indices=sparse_input_indices,
                                sparse_input_values=sparse_input_values,
                                sparse_input_shape=sparse_input_shape,
                                input_labels=input_labels,
                                input_weights=input_weights,
                                leaf_ids=leaf_ids, random_seed=random_seed,
                                input_spec=input_spec, params=params,
                                name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  random_seed = _execute.make_int(random_seed, "random_seed")
  input_spec = _execute.make_str(input_spec, "input_spec")
  params = _execute.make_str(params, "params")
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "ProcessInputV4", tree_handle=tree_handle, stats_handle=stats_handle,
                          input_data=input_data,
                          sparse_input_indices=sparse_input_indices,
                          sparse_input_values=sparse_input_values,
                          sparse_input_shape=sparse_input_shape,
                          input_labels=input_labels,
                          input_weights=input_weights, leaf_ids=leaf_ids,
                          random_seed=random_seed, input_spec=input_spec,
                          params=params, name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          process_input_v4, tree_handle=tree_handle,
                            stats_handle=stats_handle, input_data=input_data,
                            sparse_input_indices=sparse_input_indices,
                            sparse_input_values=sparse_input_values,
                            sparse_input_shape=sparse_input_shape,
                            input_labels=input_labels,
                            input_weights=input_weights, leaf_ids=leaf_ids,
                            random_seed=random_seed, input_spec=input_spec,
                            params=params, name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("random_seed", _op.get_attr("random_seed"), "input_spec",
            _op.get_attr("input_spec"), "params", _op.get_attr("params"))
  _execute.record_gradient(
      "ProcessInputV4", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 29
0
def bigtable_client(project_id,
                    instance_id,
                    connection_pool_size,
                    max_receive_message_size=-1,
                    container="",
                    shared_name="",
                    name=None):
    r"""TODO: add doc.

  Args:
    project_id: A `string`.
    instance_id: A `string`.
    connection_pool_size: An `int`.
    max_receive_message_size: An optional `int`. Defaults to `-1`.
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `resource`.
  """
    _ctx = _context._context
    if _ctx is not None and _ctx._eager_context.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "BigtableClient", name, _ctx._post_execution_callbacks,
                "project_id", project_id, "instance_id", instance_id,
                "connection_pool_size", connection_pool_size,
                "max_receive_message_size", max_receive_message_size,
                "container", container, "shared_name", shared_name)
            return _result
        except _core._FallbackException:
            try:
                return bigtable_client_eager_fallback(
                    project_id=project_id,
                    instance_id=instance_id,
                    connection_pool_size=connection_pool_size,
                    max_receive_message_size=max_receive_message_size,
                    container=container,
                    shared_name=shared_name,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
            except (TypeError, ValueError):
                result = _dispatch.dispatch(
                    bigtable_client,
                    project_id=project_id,
                    instance_id=instance_id,
                    connection_pool_size=connection_pool_size,
                    max_receive_message_size=max_receive_message_size,
                    container=container,
                    shared_name=shared_name,
                    name=name)
                if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                    return result
                raise
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    project_id = _execute.make_str(project_id, "project_id")
    instance_id = _execute.make_str(instance_id, "instance_id")
    connection_pool_size = _execute.make_int(connection_pool_size,
                                             "connection_pool_size")
    if max_receive_message_size is None:
        max_receive_message_size = -1
    max_receive_message_size = _execute.make_int(max_receive_message_size,
                                                 "max_receive_message_size")
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    try:
        _, _, _op = _op_def_lib._apply_op_helper(
            "BigtableClient",
            project_id=project_id,
            instance_id=instance_id,
            connection_pool_size=connection_pool_size,
            max_receive_message_size=max_receive_message_size,
            container=container,
            shared_name=shared_name,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(
            bigtable_client,
            project_id=project_id,
            instance_id=instance_id,
            connection_pool_size=connection_pool_size,
            max_receive_message_size=max_receive_message_size,
            container=container,
            shared_name=shared_name,
            name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("project_id", _op.get_attr("project_id"), "instance_id",
              _op.get_attr("instance_id"), "connection_pool_size",
              _op.get_attr("connection_pool_size"), "max_receive_message_size",
              _op.get_attr("max_receive_message_size"), "container",
              _op.get_attr("container"), "shared_name",
              _op.get_attr("shared_name"))
    _execute.record_gradient("BigtableClient", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Esempio n. 30
0
def decode_proto_v2(bytes, message_type, field_names, output_types, descriptor_source="local://", message_format="binary", sanitize=False, name=None):
  r"""The op extracts fields from a serialized protocol buffers message into tensors.

  The `decode_proto` op extracts fields from a serialized protocol buffers
  message into tensors.  The fields in `field_names` are decoded and converted
  to the corresponding `output_types` if possible.
  
  A `message_type` name must be provided to give context for the field
  names. The actual message descriptor can be looked up either in the
  linked-in descriptor pool or a filename provided by the caller using
  the `descriptor_source` attribute.
  
  Each output tensor is a dense tensor. This means that it is padded to
  hold the largest number of repeated elements seen in the input
  minibatch. (The shape is also padded by one to prevent zero-sized
  dimensions). The actual repeat counts for each example in the
  minibatch can be found in the `sizes` output. In many cases the output
  of `decode_proto` is fed immediately into tf.squeeze if missing values
  are not a concern. When using tf.squeeze, always pass the squeeze
  dimension explicitly to avoid surprises.
  
  For the most part, the mapping between Proto field types and
  TensorFlow dtypes is straightforward. However, there are a few
  special cases:
  
  - A proto field that contains a submessage or group can only be converted
  to `DT_STRING` (the serialized submessage). This is to reduce the
  complexity of the API. The resulting string can be used as input
  to another instance of the decode_proto op.
  
  - TensorFlow lacks support for unsigned integers. The ops represent uint64
  types as a `DT_INT64` with the same twos-complement bit pattern
  (the obvious way). Unsigned int32 values can be represented exactly by
  specifying type `DT_INT64`, or using twos-complement if the caller
  specifies `DT_INT32` in the `output_types` attribute.
  
  The `descriptor_source` attribute selects a source of protocol
  descriptors to consult when looking up `message_type`. This may be a
  filename containing a serialized `FileDescriptorSet` message,
  or the special value `local://`, in which case only descriptors linked
  into the code will be searched; the filename can be on any filesystem
  accessible to TensorFlow.
  
  You can build a `descriptor_source` file using the `--descriptor_set_out`
  and `--include_imports` options to the protocol compiler `protoc`.
  
  The `local://` database only covers descriptors linked into the
  code via C++ libraries, not Python imports. You can link in a proto descriptor
  by creating a cc_library target with alwayslink=1.
  
  Both binary and text proto serializations are supported, and can be
  chosen using the `format` attribute.

  Args:
    bytes: A `Tensor` of type `string`.
      Tensor of serialized protos with shape `batch_shape`.
    message_type: A `string`. Name of the proto message type to decode.
    field_names: A list of `strings`.
      List of strings containing proto field names. An extension field can be decoded
      by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME.
    output_types: A list of `tf.DTypes`.
      List of TF types to use for the respective field in field_names.
    descriptor_source: An optional `string`. Defaults to `"local://"`.
      Either the special value `local://` or a path to a file containing
      a serialized `FileDescriptorSet`.
    message_format: An optional `string`. Defaults to `"binary"`.
      Either `binary` or `text`.
    sanitize: An optional `bool`. Defaults to `False`.
      Whether to sanitize the result or not.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (sizes, values).

    sizes: A `Tensor` of type `int32`.
    values: A list of `Tensor` objects of type `output_types`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "DecodeProtoV2", name, _ctx._post_execution_callbacks, bytes,
        "message_type", message_type, "field_names", field_names,
        "output_types", output_types, "descriptor_source", descriptor_source,
        "message_format", message_format, "sanitize", sanitize)
      _result = _DecodeProtoV2Output._make(_result)
      return _result
    except _core._FallbackException:
      try:
        return decode_proto_v2_eager_fallback(
            bytes, message_type=message_type, field_names=field_names,
            output_types=output_types, descriptor_source=descriptor_source,
            message_format=message_format, sanitize=sanitize, name=name,
            ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  message_type = _execute.make_str(message_type, "message_type")
  if not isinstance(field_names, (list, tuple)):
    raise TypeError(
        "Expected list for 'field_names' argument to "
        "'decode_proto_v2' Op, not %r." % field_names)
  field_names = [_execute.make_str(_s, "field_names") for _s in field_names]
  if not isinstance(output_types, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_types' argument to "
        "'decode_proto_v2' Op, not %r." % output_types)
  output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
  if descriptor_source is None:
    descriptor_source = "local://"
  descriptor_source = _execute.make_str(descriptor_source, "descriptor_source")
  if message_format is None:
    message_format = "binary"
  message_format = _execute.make_str(message_format, "message_format")
  if sanitize is None:
    sanitize = False
  sanitize = _execute.make_bool(sanitize, "sanitize")
  _, _, _op = _op_def_lib._apply_op_helper(
        "DecodeProtoV2", bytes=bytes, message_type=message_type,
                         field_names=field_names, output_types=output_types,
                         descriptor_source=descriptor_source,
                         message_format=message_format, sanitize=sanitize,
                         name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("message_type", _op.get_attr("message_type"), "field_names",
            _op.get_attr("field_names"), "output_types",
            _op.get_attr("output_types"), "descriptor_source",
            _op.get_attr("descriptor_source"), "message_format",
            _op.get_attr("message_format"), "sanitize",
            _op.get_attr("sanitize"))
  _execute.record_gradient(
      "DecodeProtoV2", _inputs_flat, _attrs, _result, name)
  _result = _result[:1] + [_result[1:]]
  _result = _DecodeProtoV2Output._make(_result)
  return _result
Esempio n. 31
0
def decision_tree_ensemble_resource_handle_op(container="", shared_name="", name=None):
  r"""TODO: add doc.

  Args:
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `resource`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "DecisionTreeEnsembleResourceHandleOp", name,
        _ctx._post_execution_callbacks, "container", container, "shared_name",
        shared_name)
      return _result
    except _core._FallbackException:
      try:
        return decision_tree_ensemble_resource_handle_op_eager_fallback(
            container=container, shared_name=shared_name, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              decision_tree_ensemble_resource_handle_op, container=container,
                                                         shared_name=shared_name,
                                                         name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  if container is None:
    container = ""
  container = _execute.make_str(container, "container")
  if shared_name is None:
    shared_name = ""
  shared_name = _execute.make_str(shared_name, "shared_name")
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "DecisionTreeEnsembleResourceHandleOp", container=container,
                                                shared_name=shared_name,
                                                name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          decision_tree_ensemble_resource_handle_op, container=container,
                                                     shared_name=shared_name,
                                                     name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("container", _op.get_attr("container"), "shared_name",
            _op.get_attr("shared_name"))
  _execute.record_gradient(
      "DecisionTreeEnsembleResourceHandleOp", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def encode_proto(sizes,
                 values,
                 field_names,
                 message_type,
                 descriptor_source="local://",
                 name=None):
    r"""The op serializes protobuf messages provided in the input tensors.

  The types of the tensors in `values` must match the schema for the
  fields specified in `field_names`. All the tensors in `values` must
  have a common shape prefix, *batch_shape*.

  The `sizes` tensor specifies repeat counts for each field.  The repeat
  count (last dimension) of a each tensor in `values` must be greater
  than or equal to corresponding repeat count in `sizes`.

  A `message_type` name must be provided to give context for the field
  names. The actual message descriptor can be looked up either in the
  linked-in descriptor pool or a filename provided by the caller using
  the `descriptor_source` attribute.

  The `descriptor_source` attribute selects a source of protocol
  descriptors to consult when looking up `message_type`. This may be a
  filename containing a serialized `FileDescriptorSet` message,
  or the special value `local://`, in which case only descriptors linked
  into the code will be searched; the filename can be on any filesystem
  accessible to TensorFlow.

  You can build a `descriptor_source` file using the `--descriptor_set_out`
  and `--include_imports` options to the protocol compiler `protoc`.

  The `local://` database only covers descriptors linked into the
  code via C++ libraries, not Python imports. You can link in a proto descriptor
  by creating a cc_library target with alwayslink=1.

  There are a few special cases in the value mapping:

  Submessage and group fields must be pre-serialized as TensorFlow strings.

  TensorFlow lacks support for unsigned int64s, so they must be
  represented as `tf.int64` with the same twos-complement bit pattern
  (the obvious way).

  Unsigned int32 values can be represented exactly with `tf.int64`, or
  with sign wrapping if the input is of type `tf.int32`.

  Args:
    sizes: A `Tensor` of type `int32`.
      Tensor of int32 with shape `[batch_shape, len(field_names)]`.
    values: A list of `Tensor` objects.
      List of tensors containing values for the corresponding field.
    field_names: A list of `strings`.
      List of strings containing proto field names.
    message_type: A `string`. Name of the proto message type to decode.
    descriptor_source: An optional `string`. Defaults to `"local://"`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "EncodeProto", name, _ctx._post_execution_callbacks, sizes,
                values, "field_names", field_names, "message_type",
                message_type, "descriptor_source", descriptor_source)
            return _result
        except _core._FallbackException:
            try:
                return encode_proto_eager_fallback(
                    sizes,
                    values,
                    field_names=field_names,
                    message_type=message_type,
                    descriptor_source=descriptor_source,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if not isinstance(field_names, (list, tuple)):
        raise TypeError("Expected list for 'field_names' argument to "
                        "'encode_proto' Op, not %r." % field_names)
    field_names = [_execute.make_str(_s, "field_names") for _s in field_names]
    message_type = _execute.make_str(message_type, "message_type")
    if descriptor_source is None:
        descriptor_source = "local://"
    descriptor_source = _execute.make_str(descriptor_source,
                                          "descriptor_source")
    _, _, _op = _op_def_lib._apply_op_helper(
        "EncodeProto",
        sizes=sizes,
        values=values,
        field_names=field_names,
        message_type=message_type,
        descriptor_source=descriptor_source,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("field_names", _op.get_attr("field_names"), "message_type",
              _op.get_attr("message_type"), "descriptor_source",
              _op.get_attr("descriptor_source"), "Tinput_types",
              _op.get_attr("Tinput_types"))
    _execute.record_gradient("EncodeProto", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Esempio n. 33
0
def unbatch(batched_tensor,
            batch_index,
            id,
            timeout_micros,
            container="",
            shared_name="",
            name=None):
    r"""Reverses the operation of Batch for a single output Tensor.

  An instance of Unbatch either receives an empty batched_tensor, in which case it
  asynchronously waits until the values become available from a concurrently
  running instance of Unbatch with the same container and shared_name, or receives
  a non-empty batched_tensor in which case it finalizes all other concurrently
  running instances and outputs its own element from the batch.

  Args:
    batched_tensor: A `Tensor`.
      The possibly transformed output of Batch. The size of the first
      dimension should remain unchanged by the transformations for the operation to
      work.
    batch_index: A `Tensor` of type `int64`.
      The matching batch_index obtained from Batch.
    id: A `Tensor` of type `int64`. The id scalar emitted by Batch.
    timeout_micros: An `int`.
      Maximum amount of time (in microseconds) to wait to receive the
      batched input tensor associated with a given invocation of the op.
    container: An optional `string`. Defaults to `""`.
      Container to control resource sharing.
    shared_name: An optional `string`. Defaults to `""`.
      Instances of Unbatch with the same container and shared_name are
      assumed to possibly belong to the same batch. If left empty, the op name will
      be used as the shared name.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `batched_tensor`.
    The Tensor corresponding to this execution.
  """
    timeout_micros = _execute.make_int(timeout_micros, "timeout_micros")
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("Unbatch",
                                                 batched_tensor=batched_tensor,
                                                 batch_index=batch_index,
                                                 id=id,
                                                 timeout_micros=timeout_micros,
                                                 container=container,
                                                 shared_name=shared_name,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("timeout_micros", _op.get_attr("timeout_micros"),
                  "container", _op.get_attr("container"), "shared_name",
                  _op.get_attr("shared_name"), "T", _op.get_attr("T"))
    else:
        _attr_T, (batched_tensor, ) = _execute.args_to_matching_eager(
            [batched_tensor], _ctx)
        _attr_T = _attr_T.as_datatype_enum
        batch_index = _ops.convert_to_tensor(batch_index, _dtypes.int64)
        id = _ops.convert_to_tensor(id, _dtypes.int64)
        _inputs_flat = [batched_tensor, batch_index, id]
        _attrs = ("timeout_micros", timeout_micros, "container", container,
                  "shared_name", shared_name, "T", _attr_T)
        _result = _execute.execute(b"Unbatch",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("Unbatch", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
Esempio n. 34
0
def nccl_all_reduce(input, reduction, num_devices, shared_name, name=None):
    r"""Outputs a tensor containing the reduction across all input tensors.

  Outputs a tensor containing the reduction across all input tensors passed to ops
  within the same `shared_name.

  The graph should be constructed so if one op runs with shared_name value `c`,
  then `num_devices` ops will run with shared_name value `c`.  Failure to do so
  will cause the graph execution to fail to complete.

  input: the input to the reduction
  data: the value of the reduction across all `num_devices` devices.
  reduction: the reduction operation to perform.
  num_devices: The number of devices participating in this reduction.
  shared_name: Identifier that shared between ops of the same reduction.

  Args:
    input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`.
    reduction: A `string` from: `"min", "max", "prod", "sum"`.
    num_devices: An `int`.
    shared_name: A `string`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "NcclAllReduce", name, _ctx._post_execution_callbacks, input,
                "reduction", reduction, "num_devices", num_devices,
                "shared_name", shared_name)
            return _result
        except _core._FallbackException:
            try:
                return nccl_all_reduce_eager_fallback(input,
                                                      reduction=reduction,
                                                      num_devices=num_devices,
                                                      shared_name=shared_name,
                                                      name=name,
                                                      ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    reduction = _execute.make_str(reduction, "reduction")
    num_devices = _execute.make_int(num_devices, "num_devices")
    shared_name = _execute.make_str(shared_name, "shared_name")
    _, _, _op = _op_def_lib._apply_op_helper("NcclAllReduce",
                                             input=input,
                                             reduction=reduction,
                                             num_devices=num_devices,
                                             shared_name=shared_name,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("reduction", _op.get_attr("reduction"), "T", _op.get_attr("T"),
              "num_devices", _op.get_attr("num_devices"), "shared_name",
              _op.get_attr("shared_name"))
    _execute.record_gradient("NcclAllReduce", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Esempio n. 35
0
def batch(in_tensors,
          num_batch_threads,
          max_batch_size,
          batch_timeout_micros,
          grad_timeout_micros,
          allowed_batch_sizes=[],
          container="",
          shared_name="",
          batching_queue="",
          name=None):
    r"""Batches all input tensors nondeterministically.

  When many instances of this Op are being run concurrently with the same
  container/shared_name in the same device, some will output zero-shaped Tensors
  and others will output Tensors of size up to max_batch_size.

  All Tensors in in_tensors are batched together (so, for example, labels and
  features should be batched with a single instance of this operation.

  Each invocation of batch emits an `id` scalar which will be used to identify
  this particular invocation when doing unbatch or its gradient.

  Each op which emits a non-empty batch will also emit a non-empty batch_index
  Tensor, which, is a [K, 3] matrix where each row contains the invocation's id,
  start, and length of elements of each set of Tensors present in batched_tensors.

  Batched tensors are concatenated along the first dimension, and all tensors in
  in_tensors must have the first dimension of the same size.

  Args:
    in_tensors: A list of `Tensor` objects. The tensors to be batched.
    num_batch_threads: An `int`.
      Number of scheduling threads for processing batches of work.
      Determines the number of batches processed in parallel.
    max_batch_size: An `int`. Batch sizes will never be bigger than this.
    batch_timeout_micros: An `int`.
      Maximum number of microseconds to wait before outputting
      an incomplete batch.
    grad_timeout_micros: An `int`.
      The timeout to use for the gradient. See Unbatch.
    allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`.
      Optional list of allowed batch sizes. If left empty, does
      nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
      batches up to one of those sizes. The entries must increase monotonically, and
      the final entry must equal max_batch_size.
    container: An optional `string`. Defaults to `""`.
      Controls the scope of sharing of this batch.
    shared_name: An optional `string`. Defaults to `""`.
      Concurrently running instances of batch in the same device with the
      same container and shared_name will batch their elements together. If left
      empty, the op name will be used as the shared name.
    batching_queue: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (batched_tensors, batch_index, id).

    batched_tensors: A list of `Tensor` objects. Has the same type as `in_tensors`. Either empty tensors or a batch of concatenated Tensors.
    batch_index: A `Tensor` of type `int64`. If out_tensors is non-empty, has information to invert it.
    id: A `Tensor` of type `int64`. always contains a scalar with a unique ID for this invocation of Batch.
  """
    num_batch_threads = _execute.make_int(num_batch_threads,
                                          "num_batch_threads")
    max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
    batch_timeout_micros = _execute.make_int(batch_timeout_micros,
                                             "batch_timeout_micros")
    grad_timeout_micros = _execute.make_int(grad_timeout_micros,
                                            "grad_timeout_micros")
    if allowed_batch_sizes is None:
        allowed_batch_sizes = []
    if not isinstance(allowed_batch_sizes, (list, tuple)):
        raise TypeError("Expected list for 'allowed_batch_sizes' argument to "
                        "'batch' Op, not %r." % allowed_batch_sizes)
    allowed_batch_sizes = [
        _execute.make_int(_i, "allowed_batch_sizes")
        for _i in allowed_batch_sizes
    ]
    if container is None:
        container = ""
    container = _execute.make_str(container, "container")
    if shared_name is None:
        shared_name = ""
    shared_name = _execute.make_str(shared_name, "shared_name")
    if batching_queue is None:
        batching_queue = ""
    batching_queue = _execute.make_str(batching_queue, "batching_queue")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper(
            "Batch",
            in_tensors=in_tensors,
            num_batch_threads=num_batch_threads,
            max_batch_size=max_batch_size,
            batch_timeout_micros=batch_timeout_micros,
            grad_timeout_micros=grad_timeout_micros,
            allowed_batch_sizes=allowed_batch_sizes,
            container=container,
            shared_name=shared_name,
            batching_queue=batching_queue,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("num_batch_threads",
                  _op.get_attr("num_batch_threads"), "max_batch_size",
                  _op.get_attr("max_batch_size"), "batch_timeout_micros",
                  _op.get_attr("batch_timeout_micros"), "allowed_batch_sizes",
                  _op.get_attr("allowed_batch_sizes"), "grad_timeout_micros",
                  _op.get_attr("grad_timeout_micros"), "container",
                  _op.get_attr("container"), "shared_name",
                  _op.get_attr("shared_name"), "batching_queue",
                  _op.get_attr("batching_queue"), "T", _op.get_attr("T"))
    else:
        _attr_T, in_tensors = _execute.convert_to_mixed_eager_tensors(
            in_tensors, _ctx)
        _attr_T = [_t.as_datatype_enum for _t in _attr_T]
        _inputs_flat = list(in_tensors)
        _attrs = ("num_batch_threads", num_batch_threads, "max_batch_size",
                  max_batch_size, "batch_timeout_micros", batch_timeout_micros,
                  "allowed_batch_sizes", allowed_batch_sizes,
                  "grad_timeout_micros", grad_timeout_micros, "container",
                  container, "shared_name", shared_name, "batching_queue",
                  batching_queue, "T", _attr_T)
        _result = _execute.execute(b"Batch",
                                   len(in_tensors) + 2,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("Batch", _inputs_flat, _attrs, _result, name)
    _result = [_result[:len(in_tensors)]] + _result[len(in_tensors):]
    _result = _BatchOutput._make(_result)
    return _result
Esempio n. 36
0
def batch_function(in_tensors, captured_tensors, f, num_batch_threads, max_batch_size, batch_timeout_micros, Tout, max_enqueued_batches=10, allowed_batch_sizes=[], container="", shared_name="", batching_queue="", name=None):
  r"""Batches all the inputs tensors to the computation done by the function.

  So, for example, in the following code

    ```python

    # This input will be captured.
    y = tf.placeholder_with_default(1.0, shape=[])

    @tf.Defun(tf.float32)
    def computation(a):
      return tf.matmul(a, a) + y

    b = gen_batch_ops.batch_function(
            f=computation
            in_tensors=[a],
            captured_tensors=computation.captured_inputs,
            Tout=[o.type for o in computation.definition.signature.output_arg],
            num_batch_threads=1,
            max_batch_size=10,
            batch_timeout_micros=100000,  # 100ms
            allowed_batch_sizes=[3, 10],
            batching_queue="")

  If more than one session.run call is simultaneously trying to compute `b`
  the values of `a` will be gathered, non-deterministically concatenated
  along the first axis, and only one thread will run the computation.

  Assumes that all arguments of the function are Tensors which will be batched
  along their first dimension.

  Arguments that are captured, are not batched. The session.run call which does
  the concatenation, will use the values of the captured tensors available to it.
  Therefore, typical uses of captured tensors should involve values which remain
  unchanged across session.run calls. Inference is a good example of this.

  SparseTensor is not supported. The return value of the decorated function
  must be a Tensor or a list/tuple of Tensors.

  Args:
    in_tensors: A list of `Tensor` objects. The tensors to be batched.
    captured_tensors: A list of `Tensor` objects.
      The tensors which are captured in the function, and don't need
      to be batched.
    f: A function decorated with @Defun.
    num_batch_threads: An `int`.
      Number of scheduling threads for processing batches of work.
      Determines the number of batches processed in parallel.
    max_batch_size: An `int`. Batch sizes will never be bigger than this.
    batch_timeout_micros: An `int`.
      Maximum number of microseconds to wait before outputting
      an incomplete batch.
    Tout: A list of `tf.DTypes` that has length `>= 1`.
      the types of the output tensors.
    max_enqueued_batches: An optional `int`. Defaults to `10`.
      Maximum number of batches enqueued. Default: 10.
    allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`.
      Optional list of allowed batch sizes. If left empty, does
      nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
      batches up to one of those sizes. The entries must increase monotonically, and
      the final entry must equal max_batch_size.
    container: An optional `string`. Defaults to `""`.
      Controls the scope of sharing of this batch.
    shared_name: An optional `string`. Defaults to `""`.
      Concurrently running instances of batch in the same device with the
      same container and shared_name will batch their elements together. If left
      empty, the op name will be used as the shared name.
    batching_queue: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Tout`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "BatchFunction", name, _ctx.post_execution_callbacks, in_tensors,
        captured_tensors, "f", f, "num_batch_threads", num_batch_threads,
        "max_batch_size", max_batch_size, "batch_timeout_micros",
        batch_timeout_micros, "max_enqueued_batches", max_enqueued_batches,
        "allowed_batch_sizes", allowed_batch_sizes, "container", container,
        "shared_name", shared_name, "batching_queue", batching_queue, "Tout",
        Tout)
      return _result
    except _core._FallbackException:
      try:
        return batch_function_eager_fallback(
            in_tensors, captured_tensors, f=f,
            num_batch_threads=num_batch_threads,
            max_batch_size=max_batch_size,
            batch_timeout_micros=batch_timeout_micros,
            max_enqueued_batches=max_enqueued_batches,
            allowed_batch_sizes=allowed_batch_sizes, container=container,
            shared_name=shared_name, batching_queue=batching_queue, Tout=Tout,
            name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads")
  max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
  batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros")
  if not isinstance(Tout, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tout' argument to "
        "'batch_function' Op, not %r." % Tout)
  Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
  if max_enqueued_batches is None:
    max_enqueued_batches = 10
  max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches")
  if allowed_batch_sizes is None:
    allowed_batch_sizes = []
  if not isinstance(allowed_batch_sizes, (list, tuple)):
    raise TypeError(
        "Expected list for 'allowed_batch_sizes' argument to "
        "'batch_function' Op, not %r." % allowed_batch_sizes)
  allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes]
  if container is None:
    container = ""
  container = _execute.make_str(container, "container")
  if shared_name is None:
    shared_name = ""
  shared_name = _execute.make_str(shared_name, "shared_name")
  if batching_queue is None:
    batching_queue = ""
  batching_queue = _execute.make_str(batching_queue, "batching_queue")
  _, _, _op = _op_def_lib._apply_op_helper(
        "BatchFunction", in_tensors=in_tensors,
                         captured_tensors=captured_tensors, f=f,
                         num_batch_threads=num_batch_threads,
                         max_batch_size=max_batch_size,
                         batch_timeout_micros=batch_timeout_micros, Tout=Tout,
                         max_enqueued_batches=max_enqueued_batches,
                         allowed_batch_sizes=allowed_batch_sizes,
                         container=container, shared_name=shared_name,
                         batching_queue=batching_queue, name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("f", _op.get_attr("f"), "num_batch_threads",
            _op.get_attr("num_batch_threads"), "max_batch_size",
            _op.get_attr("max_batch_size"), "batch_timeout_micros",
            _op.get_attr("batch_timeout_micros"), "max_enqueued_batches",
            _op.get_attr("max_enqueued_batches"), "allowed_batch_sizes",
            _op.get_attr("allowed_batch_sizes"), "container",
            _op.get_attr("container"), "shared_name",
            _op.get_attr("shared_name"), "batching_queue",
            _op.get_attr("batching_queue"), "Tin", _op.get_attr("Tin"),
            "Tcaptured", _op.get_attr("Tcaptured"), "Tout",
            _op.get_attr("Tout"))
  _execute.record_gradient(
      "BatchFunction", _inputs_flat, _attrs, _result, name)
  return _result
Esempio n. 37
0
def center_tree_ensemble_bias(tree_ensemble_handle,
                              stamp_token,
                              next_stamp_token,
                              delta_updates,
                              learner_config,
                              centering_epsilon=0.01,
                              name=None):
    r"""Centers the tree ensemble bias before adding trees based on feature splits.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the ensemble variable.
    stamp_token: A `Tensor` of type `int64`.
      Stamp token for validating operation consistency.
    next_stamp_token: A `Tensor` of type `int64`.
      Stamp token to be used for the next iteration.
    delta_updates: A `Tensor` of type `float32`.
      Rank 1 Tensor containing delta updates per bias dimension.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto.
    centering_epsilon: An optional `float`. Defaults to `0.01`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `bool`.
    Scalar indicating whether more centering is needed.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "CenterTreeEnsembleBias", name, _ctx._post_execution_callbacks,
                tree_ensemble_handle, stamp_token, next_stamp_token,
                delta_updates, "learner_config", learner_config,
                "centering_epsilon", centering_epsilon)
            return _result
        except _core._FallbackException:
            try:
                return center_tree_ensemble_bias_eager_fallback(
                    tree_ensemble_handle,
                    stamp_token,
                    next_stamp_token,
                    delta_updates,
                    learner_config=learner_config,
                    centering_epsilon=centering_epsilon,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
            except (TypeError, ValueError):
                result = _dispatch.dispatch(
                    center_tree_ensemble_bias,
                    tree_ensemble_handle=tree_ensemble_handle,
                    stamp_token=stamp_token,
                    next_stamp_token=next_stamp_token,
                    delta_updates=delta_updates,
                    learner_config=learner_config,
                    centering_epsilon=centering_epsilon,
                    name=name)
                if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                    return result
                raise
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    learner_config = _execute.make_str(learner_config, "learner_config")
    if centering_epsilon is None:
        centering_epsilon = 0.01
    centering_epsilon = _execute.make_float(centering_epsilon,
                                            "centering_epsilon")
    try:
        _, _, _op = _op_def_lib._apply_op_helper(
            "CenterTreeEnsembleBias",
            tree_ensemble_handle=tree_ensemble_handle,
            stamp_token=stamp_token,
            next_stamp_token=next_stamp_token,
            delta_updates=delta_updates,
            learner_config=learner_config,
            centering_epsilon=centering_epsilon,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(center_tree_ensemble_bias,
                                    tree_ensemble_handle=tree_ensemble_handle,
                                    stamp_token=stamp_token,
                                    next_stamp_token=next_stamp_token,
                                    delta_updates=delta_updates,
                                    learner_config=learner_config,
                                    centering_epsilon=centering_epsilon,
                                    name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("learner_config", _op.get_attr("learner_config"),
              "centering_epsilon", _op.get_attr("centering_epsilon"))
    _execute.record_gradient("CenterTreeEnsembleBias", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
Esempio n. 38
0
def batch(in_tensors, num_batch_threads, max_batch_size, batch_timeout_micros, grad_timeout_micros, max_enqueued_batches=10, allowed_batch_sizes=[], container="", shared_name="", batching_queue="", name=None):
  r"""Batches all input tensors nondeterministically.

  When many instances of this Op are being run concurrently with the same
  container/shared_name in the same device, some will output zero-shaped Tensors
  and others will output Tensors of size up to max_batch_size.

  All Tensors in in_tensors are batched together (so, for example, labels and
  features should be batched with a single instance of this operation.

  Each invocation of batch emits an `id` scalar which will be used to identify
  this particular invocation when doing unbatch or its gradient.

  Each op which emits a non-empty batch will also emit a non-empty batch_index
  Tensor, which, is a [K, 3] matrix where each row contains the invocation's id,
  start, and length of elements of each set of Tensors present in batched_tensors.

  Batched tensors are concatenated along the first dimension, and all tensors in
  in_tensors must have the first dimension of the same size.

  in_tensors: The tensors to be batched.
  num_batch_threads: Number of scheduling threads for processing batches of work.
   Determines the number of batches processed in parallel.
  max_batch_size: Batch sizes will never be bigger than this.
  batch_timeout_micros: Maximum number of microseconds to wait before outputting
   an incomplete batch.
  allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does
   nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
   batches up to one of those sizes. The entries must increase monotonically, and
   the final entry must equal max_batch_size.
  grad_timeout_micros: The timeout to use for the gradient. See Unbatch.
  batched_tensors: Either empty tensors or a batch of concatenated Tensors.
  batch_index: If out_tensors is non-empty, has information to invert it.
  container: Controls the scope of sharing of this batch.
  id: always contains a scalar with a unique ID for this invocation of Batch.
  shared_name: Concurrently running instances of batch in the same device with the
   same container and shared_name will batch their elements together. If left
   empty, the op name will be used as the shared name.
  T: the types of tensors to be batched.

  Args:
    in_tensors: A list of `Tensor` objects.
    num_batch_threads: An `int`.
    max_batch_size: An `int`.
    batch_timeout_micros: An `int`.
    grad_timeout_micros: An `int`.
    max_enqueued_batches: An optional `int`. Defaults to `10`.
    allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`.
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    batching_queue: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (batched_tensors, batch_index, id).

    batched_tensors: A list of `Tensor` objects. Has the same type as `in_tensors`.
    batch_index: A `Tensor` of type `int64`.
    id: A `Tensor` of type `int64`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name, "Batch",
        name, _ctx.post_execution_callbacks, in_tensors, "num_batch_threads",
        num_batch_threads, "max_batch_size", max_batch_size,
        "max_enqueued_batches", max_enqueued_batches, "batch_timeout_micros",
        batch_timeout_micros, "allowed_batch_sizes", allowed_batch_sizes,
        "grad_timeout_micros", grad_timeout_micros, "container", container,
        "shared_name", shared_name, "batching_queue", batching_queue)
      _result = _BatchOutput._make(_result)
      return _result
    except _core._FallbackException:
      try:
        return batch_eager_fallback(
            in_tensors, num_batch_threads=num_batch_threads,
            max_batch_size=max_batch_size,
            max_enqueued_batches=max_enqueued_batches,
            batch_timeout_micros=batch_timeout_micros,
            allowed_batch_sizes=allowed_batch_sizes,
            grad_timeout_micros=grad_timeout_micros, container=container,
            shared_name=shared_name, batching_queue=batching_queue, name=name,
            ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  num_batch_threads = _execute.make_int(num_batch_threads, "num_batch_threads")
  max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
  batch_timeout_micros = _execute.make_int(batch_timeout_micros, "batch_timeout_micros")
  grad_timeout_micros = _execute.make_int(grad_timeout_micros, "grad_timeout_micros")
  if max_enqueued_batches is None:
    max_enqueued_batches = 10
  max_enqueued_batches = _execute.make_int(max_enqueued_batches, "max_enqueued_batches")
  if allowed_batch_sizes is None:
    allowed_batch_sizes = []
  if not isinstance(allowed_batch_sizes, (list, tuple)):
    raise TypeError(
        "Expected list for 'allowed_batch_sizes' argument to "
        "'batch' Op, not %r." % allowed_batch_sizes)
  allowed_batch_sizes = [_execute.make_int(_i, "allowed_batch_sizes") for _i in allowed_batch_sizes]
  if container is None:
    container = ""
  container = _execute.make_str(container, "container")
  if shared_name is None:
    shared_name = ""
  shared_name = _execute.make_str(shared_name, "shared_name")
  if batching_queue is None:
    batching_queue = ""
  batching_queue = _execute.make_str(batching_queue, "batching_queue")
  _, _, _op = _op_def_lib._apply_op_helper(
        "Batch", in_tensors=in_tensors, num_batch_threads=num_batch_threads,
                 max_batch_size=max_batch_size,
                 batch_timeout_micros=batch_timeout_micros,
                 grad_timeout_micros=grad_timeout_micros,
                 max_enqueued_batches=max_enqueued_batches,
                 allowed_batch_sizes=allowed_batch_sizes, container=container,
                 shared_name=shared_name, batching_queue=batching_queue,
                 name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("num_batch_threads", _op.get_attr("num_batch_threads"),
            "max_batch_size", _op.get_attr("max_batch_size"),
            "max_enqueued_batches", _op.get_attr("max_enqueued_batches"),
            "batch_timeout_micros", _op.get_attr("batch_timeout_micros"),
            "allowed_batch_sizes", _op.get_attr("allowed_batch_sizes"),
            "grad_timeout_micros", _op.get_attr("grad_timeout_micros"),
            "container", _op.get_attr("container"), "shared_name",
            _op.get_attr("shared_name"), "batching_queue",
            _op.get_attr("batching_queue"), "T", _op.get_attr("T"))
  _execute.record_gradient(
      "Batch", _inputs_flat, _attrs, _result, name)
  _result = [_result[:len(in_tensors)]] + _result[len(in_tensors):]
  _result = _BatchOutput._make(_result)
  return _result