def single_image_random_dot_stereograms_eager_fallback(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function single_image_random_dot_stereograms
  """
  _ctx = ctx if ctx else _context.context()
  if hidden_surface_removal is None:
    hidden_surface_removal = True
  hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
  if convergence_dots_size is None:
    convergence_dots_size = 8
  convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
  if dots_per_inch is None:
    dots_per_inch = 72
  dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
  if eye_separation is None:
    eye_separation = 2.5
  eye_separation = _execute.make_float(eye_separation, "eye_separation")
  if mu is None:
    mu = 0.3333
  mu = _execute.make_float(mu, "mu")
  if normalize is None:
    normalize = True
  normalize = _execute.make_bool(normalize, "normalize")
  if normalize_max is None:
    normalize_max = -100
  normalize_max = _execute.make_float(normalize_max, "normalize_max")
  if normalize_min is None:
    normalize_min = 100
  normalize_min = _execute.make_float(normalize_min, "normalize_min")
  if border_level is None:
    border_level = 0
  border_level = _execute.make_float(border_level, "border_level")
  if number_colors is None:
    number_colors = 256
  number_colors = _execute.make_int(number_colors, "number_colors")
  if output_image_shape is None:
    output_image_shape = [1024, 768, 1]
  output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
  if output_data_window is None:
    output_data_window = [1022, 757]
  output_data_window = _execute.make_shape(output_data_window, "output_data_window")
  _attr_T, (depth_values,) = _execute.args_to_matching_eager([depth_values], _ctx)
  _inputs_flat = [depth_values]
  _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal,
  "convergence_dots_size", convergence_dots_size, "dots_per_inch",
  dots_per_inch, "eye_separation", eye_separation, "mu", mu, "normalize",
  normalize, "normalize_max", normalize_max, "normalize_min", normalize_min,
  "border_level", border_level, "number_colors", number_colors,
  "output_image_shape", output_image_shape, "output_data_window",
  output_data_window)
  _result = _execute.execute(b"SingleImageRandomDotStereograms", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Esempio n. 2
0
def grow_tree_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed, max_tree_depth, weak_learner_type, partition_ids, gains, splits, learner_config, center_bias, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function grow_tree_ensemble
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(partition_ids, (list, tuple)):
    raise TypeError(
        "Expected list for 'partition_ids' argument to "
        "'grow_tree_ensemble' Op, not %r." % partition_ids)
  _attr_num_handlers = len(partition_ids)
  if not isinstance(gains, (list, tuple)):
    raise TypeError(
        "Expected list for 'gains' argument to "
        "'grow_tree_ensemble' Op, not %r." % gains)
  if len(gains) != _attr_num_handlers:
    raise ValueError(
        "List argument 'gains' to 'grow_tree_ensemble' Op with length %d "
        "must match length %d of argument 'partition_ids'." %
        (len(gains), _attr_num_handlers))
  if not isinstance(splits, (list, tuple)):
    raise TypeError(
        "Expected list for 'splits' argument to "
        "'grow_tree_ensemble' Op, not %r." % splits)
  if len(splits) != _attr_num_handlers:
    raise ValueError(
        "List argument 'splits' to 'grow_tree_ensemble' Op with length %d "
        "must match length %d of argument 'partition_ids'." %
        (len(splits), _attr_num_handlers))
  learner_config = _execute.make_str(learner_config, "learner_config")
  center_bias = _execute.make_bool(center_bias, "center_bias")
  tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
  stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
  next_stamp_token = _ops.convert_to_tensor(next_stamp_token, _dtypes.int64)
  learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32)
  dropout_seed = _ops.convert_to_tensor(dropout_seed, _dtypes.int64)
  max_tree_depth = _ops.convert_to_tensor(max_tree_depth, _dtypes.int32)
  weak_learner_type = _ops.convert_to_tensor(weak_learner_type, _dtypes.int32)
  partition_ids = _ops.convert_n_to_tensor(partition_ids, _dtypes.int32)
  gains = _ops.convert_n_to_tensor(gains, _dtypes.float32)
  splits = _ops.convert_n_to_tensor(splits, _dtypes.string)
  _inputs_flat = [tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed, max_tree_depth, weak_learner_type] + list(partition_ids) + list(gains) + list(splits)
  _attrs = ("learner_config", learner_config, "num_handlers",
  _attr_num_handlers, "center_bias", center_bias)
  _result = _execute.execute(b"GrowTreeEnsemble", 0, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _result = None
  return _result
def sparse_feature_cross_v2_eager_fallback(indices, values, shapes, dense, hashed_output, num_buckets, hash_key, out_type, internal_type, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function sparse_feature_cross_v2
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'indices' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % indices)
  _attr_N = len(indices)
  if not isinstance(shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'shapes' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % shapes)
  if len(shapes) != _attr_N:
    raise ValueError(
        "List argument 'shapes' to 'sparse_feature_cross_v2' Op with length %d "
        "must match length %d of argument 'indices'." %
        (len(shapes), _attr_N))
  hashed_output = _execute.make_bool(hashed_output, "hashed_output")
  num_buckets = _execute.make_int(num_buckets, "num_buckets")
  hash_key = _execute.make_int(hash_key, "hash_key")
  out_type = _execute.make_type(out_type, "out_type")
  internal_type = _execute.make_type(internal_type, "internal_type")
  _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
  _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(dense, _ctx)
  indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
  shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
  _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense)
  _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
  num_buckets, "hash_key", hash_key, "sparse_types", _attr_sparse_types,
  "dense_types", _attr_dense_types, "out_type", out_type, "internal_type",
  internal_type)
  _result = _execute.execute(b"SparseFeatureCrossV2", 3, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "SparseFeatureCrossV2", _inputs_flat, _attrs, _result, name)
  _result = _SparseFeatureCrossV2Output._make(_result)
  return _result
Esempio n. 4
0
def rpc(address,
        method,
        request,
        protocol="",
        fail_fast=True,
        timeout_in_ms=0,
        name=None):
    r"""TODO: add doc.

  Args:
    address: A `Tensor` of type `string`.
    method: A `Tensor` of type `string`.
    request: A `Tensor` of type `string`.
    protocol: An optional `string`. Defaults to `""`.
    fail_fast: An optional `bool`. Defaults to `True`.
    timeout_in_ms: An optional `int`. Defaults to `0`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if protocol is None:
            protocol = ""
        protocol = _execute.make_str(protocol, "protocol")
        if fail_fast is None:
            fail_fast = True
        fail_fast = _execute.make_bool(fail_fast, "fail_fast")
        if timeout_in_ms is None:
            timeout_in_ms = 0
        timeout_in_ms = _execute.make_int(timeout_in_ms, "timeout_in_ms")
        _, _, _op = _op_def_lib._apply_op_helper("Rpc",
                                                 address=address,
                                                 method=method,
                                                 request=request,
                                                 protocol=protocol,
                                                 fail_fast=fail_fast,
                                                 timeout_in_ms=timeout_in_ms,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("protocol", _op.get_attr("protocol"), "fail_fast",
                  _op.get_attr("fail_fast"), "timeout_in_ms",
                  _op.get_attr("timeout_in_ms"))
        _execute.record_gradient("Rpc", _inputs_flat, _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name, "Rpc",
                name, _ctx._post_execution_callbacks, address, method, request,
                "protocol", protocol, "fail_fast", fail_fast, "timeout_in_ms",
                timeout_in_ms)
            return _result
        except _core._FallbackException:
            return rpc_eager_fallback(address,
                                      method,
                                      request,
                                      protocol=protocol,
                                      fail_fast=fail_fast,
                                      timeout_in_ms=timeout_in_ms,
                                      name=name,
                                      ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
def decode_proto_v2(bytes, message_type, field_names, output_types, descriptor_source="local://", message_format="binary", sanitize=False, name=None):
  r"""The op extracts fields from a serialized protocol buffers message into tensors.

  The `decode_proto` op extracts fields from a serialized protocol buffers
  message into tensors.  The fields in `field_names` are decoded and converted
  to the corresponding `output_types` if possible.

  A `message_type` name must be provided to give context for the field
  names. The actual message descriptor can be looked up either in the
  linked-in descriptor pool or a filename provided by the caller using
  the `descriptor_source` attribute.

  Each output tensor is a dense tensor. This means that it is padded to
  hold the largest number of repeated elements seen in the input
  minibatch. (The shape is also padded by one to prevent zero-sized
  dimensions). The actual repeat counts for each example in the
  minibatch can be found in the `sizes` output. In many cases the output
  of `decode_proto` is fed immediately into tf.squeeze if missing values
  are not a concern. When using tf.squeeze, always pass the squeeze
  dimension explicitly to avoid surprises.

  For the most part, the mapping between Proto field types and
  TensorFlow dtypes is straightforward. However, there are a few
  special cases:

  - A proto field that contains a submessage or group can only be converted
  to `DT_STRING` (the serialized submessage). This is to reduce the
  complexity of the API. The resulting string can be used as input
  to another instance of the decode_proto op.

  - TensorFlow lacks support for unsigned integers. The ops represent uint64
  types as a `DT_INT64` with the same twos-complement bit pattern
  (the obvious way). Unsigned int32 values can be represented exactly by
  specifying type `DT_INT64`, or using twos-complement if the caller
  specifies `DT_INT32` in the `output_types` attribute.

  The `descriptor_source` attribute selects a source of protocol
  descriptors to consult when looking up `message_type`. This may be a
  filename containing a serialized `FileDescriptorSet` message,
  or the special value `local://`, in which case only descriptors linked
  into the code will be searched; the filename can be on any filesystem
  accessible to TensorFlow.

  You can build a `descriptor_source` file using the `--descriptor_set_out`
  and `--include_imports` options to the protocol compiler `protoc`.

  The `local://` database only covers descriptors linked into the
  code via C++ libraries, not Python imports. You can link in a proto descriptor
  by creating a cc_library target with alwayslink=1.

  Both binary and text proto serializations are supported, and can be
  chosen using the `format` attribute.

  Args:
    bytes: A `Tensor` of type `string`.
      Tensor of serialized protos with shape `batch_shape`.
    message_type: A `string`. Name of the proto message type to decode.
    field_names: A list of `strings`.
      List of strings containing proto field names. An extension field can be decoded
      by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME.
    output_types: A list of `tf.DTypes`.
      List of TF types to use for the respective field in field_names.
    descriptor_source: An optional `string`. Defaults to `"local://"`.
      Either the special value `local://` or a path to a file containing
      a serialized `FileDescriptorSet`.
    message_format: An optional `string`. Defaults to `"binary"`.
      Either `binary` or `text`.
    sanitize: An optional `bool`. Defaults to `False`.
      Whether to sanitize the result or not.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (sizes, values).

    sizes: A `Tensor` of type `int32`.
    values: A list of `Tensor` objects of type `output_types`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "DecodeProtoV2", name, _ctx._post_execution_callbacks, bytes,
        "message_type", message_type, "field_names", field_names,
        "output_types", output_types, "descriptor_source", descriptor_source,
        "message_format", message_format, "sanitize", sanitize)
      _result = _DecodeProtoV2Output._make(_result)
      return _result
    except _core._FallbackException:
      try:
        return decode_proto_v2_eager_fallback(
            bytes, message_type=message_type, field_names=field_names,
            output_types=output_types, descriptor_source=descriptor_source,
            message_format=message_format, sanitize=sanitize, name=name,
            ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  message_type = _execute.make_str(message_type, "message_type")
  if not isinstance(field_names, (list, tuple)):
    raise TypeError(
        "Expected list for 'field_names' argument to "
        "'decode_proto_v2' Op, not %r." % field_names)
  field_names = [_execute.make_str(_s, "field_names") for _s in field_names]
  if not isinstance(output_types, (list, tuple)):
    raise TypeError(
        "Expected list for 'output_types' argument to "
        "'decode_proto_v2' Op, not %r." % output_types)
  output_types = [_execute.make_type(_t, "output_types") for _t in output_types]
  if descriptor_source is None:
    descriptor_source = "local://"
  descriptor_source = _execute.make_str(descriptor_source, "descriptor_source")
  if message_format is None:
    message_format = "binary"
  message_format = _execute.make_str(message_format, "message_format")
  if sanitize is None:
    sanitize = False
  sanitize = _execute.make_bool(sanitize, "sanitize")
  _, _, _op = _op_def_lib._apply_op_helper(
        "DecodeProtoV2", bytes=bytes, message_type=message_type,
                         field_names=field_names, output_types=output_types,
                         descriptor_source=descriptor_source,
                         message_format=message_format, sanitize=sanitize,
                         name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("message_type", _op.get_attr("message_type"), "field_names",
            _op.get_attr("field_names"), "output_types",
            _op.get_attr("output_types"), "descriptor_source",
            _op.get_attr("descriptor_source"), "message_format",
            _op.get_attr("message_format"), "sanitize",
            _op.get_attr("sanitize"))
  _execute.record_gradient(
      "DecodeProtoV2", _inputs_flat, _attrs, _result, name)
  _result = _result[:1] + [_result[1:]]
  _result = _DecodeProtoV2Output._make(_result)
  return _result
def sdca_optimizer_eager_fallback(sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data, loss_type, l1, l2, num_loss_partitions, num_inner_iterations, adaptative=False, name=None):
  r"""This is the slowpath function for Eager mode.
  This is for function sdca_optimizer
  """
  _ctx = _context.context()
  if not isinstance(sparse_example_indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'sparse_example_indices' argument to "
        "'sdca_optimizer' Op, not %r." % sparse_example_indices)
  _attr_num_sparse_features = len(sparse_example_indices)
  if not isinstance(sparse_feature_indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'sparse_feature_indices' argument to "
        "'sdca_optimizer' Op, not %r." % sparse_feature_indices)
  if len(sparse_feature_indices) != _attr_num_sparse_features:
    raise ValueError(
        "List argument 'sparse_feature_indices' to 'sdca_optimizer' Op with length %d "
        "must match length %d of argument 'sparse_example_indices'." %
        (len(sparse_feature_indices), _attr_num_sparse_features))
  if not isinstance(sparse_indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'sparse_indices' argument to "
        "'sdca_optimizer' Op, not %r." % sparse_indices)
  if len(sparse_indices) != _attr_num_sparse_features:
    raise ValueError(
        "List argument 'sparse_indices' to 'sdca_optimizer' Op with length %d "
        "must match length %d of argument 'sparse_example_indices'." %
        (len(sparse_indices), _attr_num_sparse_features))
  if not isinstance(sparse_weights, (list, tuple)):
    raise TypeError(
        "Expected list for 'sparse_weights' argument to "
        "'sdca_optimizer' Op, not %r." % sparse_weights)
  if len(sparse_weights) != _attr_num_sparse_features:
    raise ValueError(
        "List argument 'sparse_weights' to 'sdca_optimizer' Op with length %d "
        "must match length %d of argument 'sparse_example_indices'." %
        (len(sparse_weights), _attr_num_sparse_features))
  if not isinstance(sparse_feature_values, (list, tuple)):
    raise TypeError(
        "Expected list for 'sparse_feature_values' argument to "
        "'sdca_optimizer' Op, not %r." % sparse_feature_values)
  _attr_num_sparse_features_with_values = len(sparse_feature_values)
  if not isinstance(dense_features, (list, tuple)):
    raise TypeError(
        "Expected list for 'dense_features' argument to "
        "'sdca_optimizer' Op, not %r." % dense_features)
  _attr_num_dense_features = len(dense_features)
  if not isinstance(dense_weights, (list, tuple)):
    raise TypeError(
        "Expected list for 'dense_weights' argument to "
        "'sdca_optimizer' Op, not %r." % dense_weights)
  if len(dense_weights) != _attr_num_dense_features:
    raise ValueError(
        "List argument 'dense_weights' to 'sdca_optimizer' Op with length %d "
        "must match length %d of argument 'dense_features'." %
        (len(dense_weights), _attr_num_dense_features))
  loss_type = _execute.make_str(loss_type, "loss_type")
  l1 = _execute.make_float(l1, "l1")
  l2 = _execute.make_float(l2, "l2")
  num_loss_partitions = _execute.make_int(num_loss_partitions, "num_loss_partitions")
  num_inner_iterations = _execute.make_int(num_inner_iterations, "num_inner_iterations")
  if adaptative is None:
    adaptative = False
  adaptative = _execute.make_bool(adaptative, "adaptative")
  sparse_example_indices = _ops.convert_n_to_tensor(sparse_example_indices, _dtypes.int64)
  sparse_feature_indices = _ops.convert_n_to_tensor(sparse_feature_indices, _dtypes.int64)
  sparse_feature_values = _ops.convert_n_to_tensor(sparse_feature_values, _dtypes.float32)
  dense_features = _ops.convert_n_to_tensor(dense_features, _dtypes.float32)
  example_weights = _ops.convert_to_tensor(example_weights, _dtypes.float32)
  example_labels = _ops.convert_to_tensor(example_labels, _dtypes.float32)
  sparse_indices = _ops.convert_n_to_tensor(sparse_indices, _dtypes.int64)
  sparse_weights = _ops.convert_n_to_tensor(sparse_weights, _dtypes.float32)
  dense_weights = _ops.convert_n_to_tensor(dense_weights, _dtypes.float32)
  example_state_data = _ops.convert_to_tensor(example_state_data, _dtypes.float32)
  _inputs_flat = list(sparse_example_indices) + list(sparse_feature_indices) + list(sparse_feature_values) + list(dense_features) + [example_weights, example_labels] + list(sparse_indices) + list(sparse_weights) + list(dense_weights) + [example_state_data]
  _attrs = ("loss_type", loss_type, "adaptative", adaptative,
  "num_sparse_features", _attr_num_sparse_features,
  "num_sparse_features_with_values", _attr_num_sparse_features_with_values,
  "num_dense_features", _attr_num_dense_features, "l1", l1, "l2", l2,
  "num_loss_partitions", num_loss_partitions, "num_inner_iterations",
  num_inner_iterations)
  _result = _execute.execute(b"SdcaOptimizer", _attr_num_sparse_features +
                             _attr_num_dense_features + 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "SdcaOptimizer", _inputs_flat, _attrs, _result, name)
  _result = _result[:1] + [_result[1:1 + _attr_num_sparse_features]] + _result[1 + _attr_num_sparse_features:]
  _result = _result[:2] + [_result[2:]]
  _result = _SdcaOptimizerOutput._make(_result)
  return _result
Esempio n. 7
0
def debug_numeric_summary(input,
                          device_name="",
                          tensor_name="",
                          debug_urls=[],
                          lower_bound=float('-inf'),
                          upper_bound=float('inf'),
                          mute_if_healthy=False,
                          gated_grpc=False,
                          name=None):
    r"""Debug Numeric Summary Op.

  Provide a basic summary of numeric value types, range and distribution.

  output: A double tensor of shape [14 + nDimensions], where nDimensions is the
    number of dimensions of the tensor's shape. The elements of output are:
    [0]: is initialized (1.0) or not (0.0).
    [1]: total number of elements
    [2]: NaN element count
    [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
      default.
    [4]: negative element count (excluding -inf), if lower_bound is the default
      -inf. Otherwise, this is the count of elements > lower_bound and < 0.
    [5]: zero element count
    [6]: positive element count (excluding +inf), if upper_bound is the default
      +inf. Otherwise, this is the count of elements < upper_bound and > 0.
    [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
      default.
  Output elements [1:8] are all zero, if the tensor is uninitialized.
    [8]: minimum of all non-inf and non-NaN elements.
         If uninitialized or no such element exists: +inf.
    [9]: maximum of all non-inf and non-NaN elements.
         If uninitialized or no such element exists: -inf.
    [10]: mean of all non-inf and non-NaN elements.
          If uninitialized or no such element exists: NaN.
    [11]: variance of all non-inf and non-NaN elements.
          If uninitialized or no such element exists: NaN.
    [12]: Data type of the tensor encoded as an enum integer. See the DataType
          proto for more details.
    [13]: Number of dimensions of the tensor (ndims).
    [14+]: Sizes of the dimensions.

  Args:
    input: A `Tensor`. Input tensor, non-Reference type.
    device_name: An optional `string`. Defaults to `""`.
    tensor_name: An optional `string`. Defaults to `""`.
      Name of the input tensor.
    debug_urls: An optional list of `strings`. Defaults to `[]`.
      List of URLs to debug targets, e.g.,
        file:///foo/tfdbg_dump, grpc:://localhost:11011.
    lower_bound: An optional `float`. Defaults to `float('-inf')`.
      (float) The lower bound <= which values will be included in the
        generalized -inf count. Default: -inf.
    upper_bound: An optional `float`. Defaults to `float('inf')`.
      (float) The upper bound >= which values will be included in the
        generalized +inf count. Default: +inf.
    mute_if_healthy: An optional `bool`. Defaults to `False`.
      (bool) Do not send data to the debug URLs unless at least one
        of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
        inf counts) is non-zero.
    gated_grpc: An optional `bool`. Defaults to `False`.
      Whether this op will be gated. If any of the debug_urls of this
        debug node is of the grpc:// scheme, when the value of this attribute is set
        to True, the data will not actually be sent via the grpc stream unless this
        debug op has been enabled at the debug_url. If all of the debug_urls of this
        debug node are of the grpc:// scheme and the debug op is enabled at none of
        them, the output will be an empty Tensor.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float64`.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        try:
            _result = pywrap_tfe.TFE_Py_FastPathExecute(
                _ctx._context_handle, tld.device_name, "DebugNumericSummary",
                name, tld.op_callbacks, input, "device_name", device_name,
                "tensor_name", tensor_name, "debug_urls", debug_urls,
                "lower_bound", lower_bound, "upper_bound", upper_bound,
                "mute_if_healthy", mute_if_healthy, "gated_grpc", gated_grpc)
            return _result
        except _core._NotOkStatusException as e:
            _ops.raise_from_not_ok_status(e, name)
        except _core._FallbackException:
            pass
        try:
            return debug_numeric_summary_eager_fallback(
                input,
                device_name=device_name,
                tensor_name=tensor_name,
                debug_urls=debug_urls,
                lower_bound=lower_bound,
                upper_bound=upper_bound,
                mute_if_healthy=mute_if_healthy,
                gated_grpc=gated_grpc,
                name=name,
                ctx=_ctx)
        except _core._SymbolicException:
            pass  # Add nodes to the TensorFlow graph.
    # Add nodes to the TensorFlow graph.
    if device_name is None:
        device_name = ""
    device_name = _execute.make_str(device_name, "device_name")
    if tensor_name is None:
        tensor_name = ""
    tensor_name = _execute.make_str(tensor_name, "tensor_name")
    if debug_urls is None:
        debug_urls = []
    if not isinstance(debug_urls, (list, tuple)):
        raise TypeError("Expected list for 'debug_urls' argument to "
                        "'debug_numeric_summary' Op, not %r." % debug_urls)
    debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
    if lower_bound is None:
        lower_bound = float('-inf')
    lower_bound = _execute.make_float(lower_bound, "lower_bound")
    if upper_bound is None:
        upper_bound = float('inf')
    upper_bound = _execute.make_float(upper_bound, "upper_bound")
    if mute_if_healthy is None:
        mute_if_healthy = False
    mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
    if gated_grpc is None:
        gated_grpc = False
    gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
    _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "DebugNumericSummary",
        input=input,
        device_name=device_name,
        tensor_name=tensor_name,
        debug_urls=debug_urls,
        lower_bound=lower_bound,
        upper_bound=upper_bound,
        mute_if_healthy=mute_if_healthy,
        gated_grpc=gated_grpc,
        name=name)
    _result = _outputs[:]
    if _execute.must_record_gradient():
        _attrs = ("T", _op._get_attr_type("T"), "device_name",
                  _op.get_attr("device_name"), "tensor_name",
                  _op.get_attr("tensor_name"), "debug_urls",
                  _op.get_attr("debug_urls"), "lower_bound",
                  _op.get_attr("lower_bound"), "upper_bound",
                  _op.get_attr("upper_bound"), "mute_if_healthy",
                  _op._get_attr_bool("mute_if_healthy"), "gated_grpc",
                  _op._get_attr_bool("gated_grpc"))
        _inputs_flat = _op.inputs
        _execute.record_gradient("DebugNumericSummary", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
Esempio n. 8
0
def sparse_feature_cross(indices,
                         values,
                         shapes,
                         dense,
                         hashed_output,
                         num_buckets,
                         out_type,
                         internal_type,
                         name=None):
    r"""Generates sparse cross form a list of sparse tensors.

  The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
  representing features of one feature column. It outputs a 2D `SparseTensor` with
  the batchwise crosses of these features.

  For example, if the inputs are

      inputs[0]: SparseTensor with shape = [2, 2]
      [0, 0]: "a"
      [1, 0]: "b"
      [1, 1]: "c"

      inputs[1]: SparseTensor with shape = [2, 1]
      [0, 0]: "d"
      [1, 0]: "e"

      inputs[2]: Tensor [["f"], ["g"]]

  then the output will be

      shape = [2, 2]
      [0, 0]: "a_X_d_X_f"
      [1, 0]: "b_X_e_X_g"
      [1, 1]: "c_X_e_X_g"

  if hashed_output=true then the output will be

      shape = [2, 2]
      [0, 0]: HashCombine(
                  Fingerprint64("f"), HashCombine(
                      Fingerprint64("d"), Fingerprint64("a")))
      [1, 0]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("b")))
      [1, 1]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("c")))

  Args:
    indices: A list of `Tensor` objects with type `int64`.
      2-D.  Indices of each input `SparseTensor`.
    values: A list of `Tensor` objects with types from: `int64`, `string`.
      1-D.   values of each `SparseTensor`.
    shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
      1-D.   Shapes of each `SparseTensor`.
    dense: A list of `Tensor` objects with types from: `int64`, `string`.
      2-D.    Columns represented by dense `Tensor`.
    hashed_output: A `bool`.
    num_buckets: An `int` that is `>= 0`.
    out_type: A `tf.DType` from: `tf.int64, tf.string`.
    internal_type: A `tf.DType` from: `tf.int64, tf.string`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (output_indices, output_values, output_shape).

    output_indices: A `Tensor` of type `int64`. 2-D.  Indices of the concatenated `SparseTensor`.
    output_values: A `Tensor` of type `out_type`. 1-D.  Non-empty values of the concatenated or hashed
      `SparseTensor`.
    output_shape: A `Tensor` of type `int64`. 1-D.  Shape of the concatenated `SparseTensor`.
  """
    if not isinstance(indices, (list, tuple)):
        raise TypeError("Expected list for 'indices' argument to "
                        "'sparse_feature_cross' Op, not %r." % indices)
    _attr_N = len(indices)
    if not isinstance(shapes, (list, tuple)):
        raise TypeError("Expected list for 'shapes' argument to "
                        "'sparse_feature_cross' Op, not %r." % shapes)
    if len(shapes) != _attr_N:
        raise ValueError(
            "List argument 'shapes' to 'sparse_feature_cross' Op with length %d "
            "must match length %d of argument 'indices'." %
            (len(shapes), _attr_N))
    hashed_output = _execute.make_bool(hashed_output, "hashed_output")
    num_buckets = _execute.make_int(num_buckets, "num_buckets")
    out_type = _execute.make_type(out_type, "out_type")
    internal_type = _execute.make_type(internal_type, "internal_type")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("SparseFeatureCross",
                                                 indices=indices,
                                                 values=values,
                                                 shapes=shapes,
                                                 dense=dense,
                                                 hashed_output=hashed_output,
                                                 num_buckets=num_buckets,
                                                 out_type=out_type,
                                                 internal_type=internal_type,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("N", _op.get_attr("N"), "hashed_output",
                  _op.get_attr("hashed_output"), "num_buckets",
                  _op.get_attr("num_buckets"), "sparse_types",
                  _op.get_attr("sparse_types"), "dense_types",
                  _op.get_attr("dense_types"), "out_type",
                  _op.get_attr("out_type"), "internal_type",
                  _op.get_attr("internal_type"))
    else:
        _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(
            values, _ctx)
        _attr_sparse_types = [_t.as_datatype_enum for _t in _attr_sparse_types]
        _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(
            dense, _ctx)
        _attr_dense_types = [_t.as_datatype_enum for _t in _attr_dense_types]
        indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
        shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
        _inputs_flat = list(indices) + list(values) + list(shapes) + list(
            dense)
        _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
                  num_buckets, "sparse_types", _attr_sparse_types,
                  "dense_types", _attr_dense_types, "out_type", out_type,
                  "internal_type", internal_type)
        _result = _execute.execute(b"SparseFeatureCross",
                                   3,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("SparseFeatureCross", _inputs_flat, _attrs,
                             _result, name)
    _result = _SparseFeatureCrossOutput._make(_result)
    return _result
Esempio n. 9
0
def ctc_loss(inputs,
             labels_indices,
             labels_values,
             sequence_length,
             preprocess_collapse_repeated=False,
             ctc_merge_repeated=True,
             ignore_longer_outputs_than_inputs=False,
             name=None):
    r"""Calculates the CTC Loss (log probability) for each batch entry.  Also calculates

  the gradient.  This class performs the softmax operation for you, so inputs
  should be e.g. linear projections of outputs by an LSTM.

  Args:
    inputs: A `Tensor` of type `float32`.
      3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
    labels_indices: A `Tensor` of type `int64`.
      The indices of a `SparseTensor<int32, 2>`.
      `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
      `(batch b, time t)`.
    labels_values: A `Tensor` of type `int32`.
      The values (labels) associated with the given batch and time.
    sequence_length: A `Tensor` of type `int32`.
      A vector containing sequence lengths (batch).
    preprocess_collapse_repeated: An optional `bool`. Defaults to `False`.
      Scalar, if true then repeated labels are
      collapsed prior to the CTC calculation.
    ctc_merge_repeated: An optional `bool`. Defaults to `True`.
      Scalar.  If set to false, *during* CTC calculation
      repeated non-blank labels will not be merged and are interpreted as
      individual labels.  This is a simplified version of CTC.
    ignore_longer_outputs_than_inputs: An optional `bool`. Defaults to `False`.
      Scalar. If set to true, during CTC
      calculation, items that have longer output sequences than input sequences
      are skipped: they don't contribute to the loss term and have zero-gradient.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (loss, gradient).

    loss: A `Tensor` of type `float32`.
    gradient: A `Tensor` of type `float32`.
  """
    _ctx = _context._context
    if _ctx is not None and _ctx._eager_context.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "CTCLoss", name, _ctx._post_execution_callbacks, inputs,
                labels_indices, labels_values, sequence_length,
                "preprocess_collapse_repeated", preprocess_collapse_repeated,
                "ctc_merge_repeated", ctc_merge_repeated,
                "ignore_longer_outputs_than_inputs",
                ignore_longer_outputs_than_inputs)
            _result = _CTCLossOutput._make(_result)
            return _result
        except _core._FallbackException:
            try:
                return ctc_loss_eager_fallback(
                    inputs,
                    labels_indices,
                    labels_values,
                    sequence_length,
                    preprocess_collapse_repeated=preprocess_collapse_repeated,
                    ctc_merge_repeated=ctc_merge_repeated,
                    ignore_longer_outputs_than_inputs=
                    ignore_longer_outputs_than_inputs,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if preprocess_collapse_repeated is None:
        preprocess_collapse_repeated = False
    preprocess_collapse_repeated = _execute.make_bool(
        preprocess_collapse_repeated, "preprocess_collapse_repeated")
    if ctc_merge_repeated is None:
        ctc_merge_repeated = True
    ctc_merge_repeated = _execute.make_bool(ctc_merge_repeated,
                                            "ctc_merge_repeated")
    if ignore_longer_outputs_than_inputs is None:
        ignore_longer_outputs_than_inputs = False
    ignore_longer_outputs_than_inputs = _execute.make_bool(
        ignore_longer_outputs_than_inputs, "ignore_longer_outputs_than_inputs")
    _, _, _op = _op_def_lib._apply_op_helper(
        "CTCLoss",
        inputs=inputs,
        labels_indices=labels_indices,
        labels_values=labels_values,
        sequence_length=sequence_length,
        preprocess_collapse_repeated=preprocess_collapse_repeated,
        ctc_merge_repeated=ctc_merge_repeated,
        ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("preprocess_collapse_repeated",
              _op.get_attr("preprocess_collapse_repeated"),
              "ctc_merge_repeated", _op.get_attr("ctc_merge_repeated"),
              "ignore_longer_outputs_than_inputs",
              _op.get_attr("ignore_longer_outputs_than_inputs"))
    _execute.record_gradient("CTCLoss", _inputs_flat, _attrs, _result, name)
    _result = _CTCLossOutput._make(_result)
    return _result
Esempio n. 10
0
def grow_tree_ensemble_eager_fallback(tree_ensemble_handle,
                                      stamp_token,
                                      next_stamp_token,
                                      learning_rate,
                                      dropout_seed,
                                      partition_ids,
                                      gains,
                                      splits,
                                      learner_config,
                                      center_bias,
                                      name=None,
                                      ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function grow_tree_ensemble
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(partition_ids, (list, tuple)):
        raise TypeError("Expected list for 'partition_ids' argument to "
                        "'grow_tree_ensemble' Op, not %r." % partition_ids)
    _attr_num_handlers = len(partition_ids)
    if not isinstance(gains, (list, tuple)):
        raise TypeError("Expected list for 'gains' argument to "
                        "'grow_tree_ensemble' Op, not %r." % gains)
    if len(gains) != _attr_num_handlers:
        raise ValueError(
            "List argument 'gains' to 'grow_tree_ensemble' Op with length %d "
            "must match length %d of argument 'partition_ids'." %
            (len(gains), _attr_num_handlers))
    if not isinstance(splits, (list, tuple)):
        raise TypeError("Expected list for 'splits' argument to "
                        "'grow_tree_ensemble' Op, not %r." % splits)
    if len(splits) != _attr_num_handlers:
        raise ValueError(
            "List argument 'splits' to 'grow_tree_ensemble' Op with length %d "
            "must match length %d of argument 'partition_ids'." %
            (len(splits), _attr_num_handlers))
    learner_config = _execute.make_str(learner_config, "learner_config")
    center_bias = _execute.make_bool(center_bias, "center_bias")
    tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle,
                                                  _dtypes.resource)
    stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
    next_stamp_token = _ops.convert_to_tensor(next_stamp_token, _dtypes.int64)
    learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32)
    dropout_seed = _ops.convert_to_tensor(dropout_seed, _dtypes.int64)
    partition_ids = _ops.convert_n_to_tensor(partition_ids, _dtypes.int32)
    gains = _ops.convert_n_to_tensor(gains, _dtypes.float32)
    splits = _ops.convert_n_to_tensor(splits, _dtypes.string)
    _inputs_flat = [
        tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate,
        dropout_seed
    ] + list(partition_ids) + list(gains) + list(splits)
    _attrs = ("learner_config", learner_config, "num_handlers",
              _attr_num_handlers, "center_bias", center_bias)
    _result = _execute.execute(b"GrowTreeEnsemble",
                               0,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _result = None
    return _result
Esempio n. 11
0
def _uniform_candidate_sampler(true_classes,
                               num_true,
                               num_sampled,
                               unique,
                               range_max,
                               seed=0,
                               seed2=0,
                               name=None):
    r"""Generates labels for candidate sampling with a uniform distribution.

  See explanations of candidate sampling and the data formats at

  go/candidate-sampling.

  

  For each batch, this op picks a single set of sampled candidate labels.

  

  The advantages of sampling candidates per-batch are simplicity and the

  possibility of efficient dense matrix multiplication. The disadvantage is that

  the sampled candidates must be chosen independently of the context and of the

  true labels.

  Args:
    true_classes: A `Tensor` of type `int64`.
      A batch_size * num_true matrix, in which each row contains the

      IDs of the num_true target_classes in the corresponding original label.
    num_true: An `int` that is `>= 1`. Number of true labels per context.
    num_sampled: An `int` that is `>= 1`.
      Number of candidates to randomly sample.
    unique: A `bool`.
      If unique is true, we sample with rejection, so that all sampled

      candidates in a batch are unique. This requires some approximation to

      estimate the post-rejection sampling probabilities.
    range_max: An `int` that is `>= 1`.
      The sampler will sample integers from the interval [0, range_max).
    seed: An optional `int`. Defaults to `0`.
      If either seed or seed2 are set to be non-zero, the random number

      generator is seeded by the given seed.  Otherwise, it is seeded by a

      random seed.
    seed2: An optional `int`. Defaults to `0`.
      An second seed to avoid seed collision.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count).

    sampled_candidates: A `Tensor` of type `int64`. A vector of length num_sampled, in which each element is
      the ID of a sampled candidate.
    true_expected_count: A `Tensor` of type `float32`. A batch_size * num_true matrix, representing
      the number of times each candidate is expected to occur in a batch
      of sampled candidates. If unique=true, then this is a probability.
    sampled_expected_count: A `Tensor` of type `float32`. A vector of length num_sampled, for each sampled
      candidate representing the number of times the candidate is expected
      to occur in a batch of sampled candidates.  If unique=true, then this is a
      probability.
  """
    num_true = _execute.make_int(num_true, "num_true")
    num_sampled = _execute.make_int(num_sampled, "num_sampled")
    unique = _execute.make_bool(unique, "unique")
    range_max = _execute.make_int(range_max, "range_max")
    if seed is None:
        seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
        seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("UniformCandidateSampler",
                                                 true_classes=true_classes,
                                                 num_true=num_true,
                                                 num_sampled=num_sampled,
                                                 unique=unique,
                                                 range_max=range_max,
                                                 seed=seed,
                                                 seed2=seed2,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("num_true", _op.get_attr("num_true"), "num_sampled",
                  _op.get_attr("num_sampled"),
                  "unique", _op.get_attr("unique"), "range_max",
                  _op.get_attr("range_max"), "seed", _op.get_attr("seed"),
                  "seed2", _op.get_attr("seed2"))
    else:
        true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64)
        _inputs_flat = [true_classes]
        _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique",
                  unique, "range_max", range_max, "seed", seed, "seed2", seed2)
        _result = _execute.execute(b"UniformCandidateSampler",
                                   3,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("UniformCandidateSampler", _inputs_flat, _attrs,
                             _result, name)
    _result = _UniformCandidateSamplerOutput._make(_result)
    return _result
Esempio n. 12
0
def _fixed_unigram_candidate_sampler(true_classes,
                                     num_true,
                                     num_sampled,
                                     unique,
                                     range_max,
                                     vocab_file="",
                                     distortion=1,
                                     num_reserved_ids=0,
                                     num_shards=1,
                                     shard=0,
                                     unigrams=[],
                                     seed=0,
                                     seed2=0,
                                     name=None):
    r"""Generates labels for candidate sampling with a learned unigram distribution.

  A unigram sampler could use a fixed unigram distribution read from a

  file or passed in as an in-memory array instead of building up the distribution

  from data on the fly. There is also an option to skew the distribution by

  applying a distortion power to the weights.

  

  The vocabulary file should be in CSV-like format, with the last field

  being the weight associated with the word.

  

  For each batch, this op picks a single set of sampled candidate labels.

  

  The advantages of sampling candidates per-batch are simplicity and the

  possibility of efficient dense matrix multiplication. The disadvantage is that

  the sampled candidates must be chosen independently of the context and of the

  true labels.

  Args:
    true_classes: A `Tensor` of type `int64`.
      A batch_size * num_true matrix, in which each row contains the

      IDs of the num_true target_classes in the corresponding original label.
    num_true: An `int` that is `>= 1`. Number of true labels per context.
    num_sampled: An `int` that is `>= 1`.
      Number of candidates to randomly sample.
    unique: A `bool`.
      If unique is true, we sample with rejection, so that all sampled

      candidates in a batch are unique. This requires some approximation to

      estimate the post-rejection sampling probabilities.
    range_max: An `int` that is `>= 1`.
      The sampler will sample integers from the interval [0, range_max).
    vocab_file: An optional `string`. Defaults to `""`.
      Each valid line in this file (which should have a CSV-like format)

      corresponds to a valid word ID. IDs are in sequential order, starting from

      num_reserved_ids. The last entry in each line is expected to be a value

      corresponding to the count or relative probability. Exactly one of vocab_file

      and unigrams needs to be passed to this op.
    distortion: An optional `float`. Defaults to `1`.
      The distortion is used to skew the unigram probability distribution.

      Each weight is first raised to the distortion's power before adding to the

      internal unigram distribution. As a result, distortion = 1.0 gives regular

      unigram sampling (as defined by the vocab file), and distortion = 0.0 gives

      a uniform distribution.
    num_reserved_ids: An optional `int`. Defaults to `0`.
      Optionally some reserved IDs can be added in the range [0,

      ..., num_reserved_ids) by the users. One use case is that a special unknown

      word token is used as ID 0. These IDs will have a sampling probability of 0.
    num_shards: An optional `int` that is `>= 1`. Defaults to `1`.
      A sampler can be used to sample from a subset of the original range

      in order to speed up the whole computation through parallelism. This parameter

      (together with 'shard') indicates the number of partitions that are being

      used in the overall computation.
    shard: An optional `int` that is `>= 0`. Defaults to `0`.
      A sampler can be used to sample from a subset of the original range

      in order to speed up the whole computation through parallelism. This parameter

      (together with 'num_shards') indicates the particular partition number of a

      sampler op, when partitioning is being used.
    unigrams: An optional list of `floats`. Defaults to `[]`.
      A list of unigram counts or probabilities, one per ID in sequential

      order. Exactly one of vocab_file and unigrams should be passed to this op.
    seed: An optional `int`. Defaults to `0`.
      If either seed or seed2 are set to be non-zero, the random number

      generator is seeded by the given seed.  Otherwise, it is seeded by a

      random seed.
    seed2: An optional `int`. Defaults to `0`.
      An second seed to avoid seed collision.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count).

    sampled_candidates: A `Tensor` of type `int64`. A vector of length num_sampled, in which each element is
      the ID of a sampled candidate.
    true_expected_count: A `Tensor` of type `float32`. A batch_size * num_true matrix, representing
      the number of times each candidate is expected to occur in a batch
      of sampled candidates. If unique=true, then this is a probability.
    sampled_expected_count: A `Tensor` of type `float32`. A vector of length num_sampled, for each sampled
      candidate representing the number of times the candidate is expected
      to occur in a batch of sampled candidates.  If unique=true, then this is a
      probability.
  """
    num_true = _execute.make_int(num_true, "num_true")
    num_sampled = _execute.make_int(num_sampled, "num_sampled")
    unique = _execute.make_bool(unique, "unique")
    range_max = _execute.make_int(range_max, "range_max")
    if vocab_file is None:
        vocab_file = ""
    vocab_file = _execute.make_str(vocab_file, "vocab_file")
    if distortion is None:
        distortion = 1
    distortion = _execute.make_float(distortion, "distortion")
    if num_reserved_ids is None:
        num_reserved_ids = 0
    num_reserved_ids = _execute.make_int(num_reserved_ids, "num_reserved_ids")
    if num_shards is None:
        num_shards = 1
    num_shards = _execute.make_int(num_shards, "num_shards")
    if shard is None:
        shard = 0
    shard = _execute.make_int(shard, "shard")
    if unigrams is None:
        unigrams = []
    if not isinstance(unigrams, (list, tuple)):
        raise TypeError("Expected list for 'unigrams' argument to "
                        "'fixed_unigram_candidate_sampler' Op, not %r." %
                        unigrams)
    unigrams = [_execute.make_float(_f, "unigrams") for _f in unigrams]
    if seed is None:
        seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
        seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper(
            "FixedUnigramCandidateSampler",
            true_classes=true_classes,
            num_true=num_true,
            num_sampled=num_sampled,
            unique=unique,
            range_max=range_max,
            vocab_file=vocab_file,
            distortion=distortion,
            num_reserved_ids=num_reserved_ids,
            num_shards=num_shards,
            shard=shard,
            unigrams=unigrams,
            seed=seed,
            seed2=seed2,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("num_true", _op.get_attr("num_true"), "num_sampled",
                  _op.get_attr("num_sampled"),
                  "unique", _op.get_attr("unique"), "range_max",
                  _op.get_attr("range_max"), "vocab_file",
                  _op.get_attr("vocab_file"), "distortion",
                  _op.get_attr("distortion"), "num_reserved_ids",
                  _op.get_attr("num_reserved_ids"), "num_shards",
                  _op.get_attr("num_shards"), "shard", _op.get_attr("shard"),
                  "unigrams", _op.get_attr("unigrams"), "seed",
                  _op.get_attr("seed"), "seed2", _op.get_attr("seed2"))
    else:
        true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64)
        _inputs_flat = [true_classes]
        _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique",
                  unique, "range_max", range_max, "vocab_file", vocab_file,
                  "distortion", distortion, "num_reserved_ids",
                  num_reserved_ids, "num_shards", num_shards, "shard", shard,
                  "unigrams", unigrams, "seed", seed, "seed2", seed2)
        _result = _execute.execute(b"FixedUnigramCandidateSampler",
                                   3,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("FixedUnigramCandidateSampler", _inputs_flat,
                             _attrs, _result, name)
    _result = _FixedUnigramCandidateSamplerOutput._make(_result)
    return _result
Esempio n. 13
0
def set_size(set_indices,
             set_values,
             set_shape,
             validate_indices=True,
             name=None):
    r"""Number of unique elements along last dimension of input `set`.

  Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,

  and `set_shape`. The last dimension contains values in a set, duplicates are

  allowed but ignored.

  

  If `validate_indices` is `True`, this op validates the order and range of `set`

  indices.

  Args:
    set_indices: A `Tensor` of type `int64`.
      2D `Tensor`, indices of a `SparseTensor`.
    set_values: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
      1D `Tensor`, values of a `SparseTensor`.
    set_shape: A `Tensor` of type `int64`.
      1D `Tensor`, shape of a `SparseTensor`.
    validate_indices: An optional `bool`. Defaults to `True`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `int32`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if validate_indices is None:
            validate_indices = True
        validate_indices = _execute.make_bool(validate_indices,
                                              "validate_indices")
        _, _, _op = _op_def_lib._apply_op_helper(
            "SetSize",
            set_indices=set_indices,
            set_values=set_values,
            set_shape=set_shape,
            validate_indices=validate_indices,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("validate_indices", _op.get_attr("validate_indices"), "T",
                  _op.get_attr("T"))
        _execute.record_gradient("SetSize", _inputs_flat, _attrs, _result,
                                 name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "SetSize", name, _ctx._post_execution_callbacks, set_indices,
                set_values, set_shape, "validate_indices", validate_indices)
            return _result
        except _core._FallbackException:
            return set_size_eager_fallback(set_indices,
                                           set_values,
                                           set_shape,
                                           validate_indices=validate_indices,
                                           name=name,
                                           ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 14
0
def recv(tensor_type,
         tensor_name,
         send_device,
         send_device_incarnation,
         recv_device,
         client_terminated=False,
         name=None):
    r"""Receives the named tensor from send_device on recv_device.

  Args:
    tensor_type: A `tf.DType`.
    tensor_name: A `string`. The name of the tensor to receive.
    send_device: A `string`. The name of the device sending the tensor.
    send_device_incarnation: An `int`. The current incarnation of send_device.
    recv_device: A `string`. The name of the device receiving the tensor.
    client_terminated: An optional `bool`. Defaults to `False`.
      If set to true, this indicates that the node was added
      to the graph as a result of a client-side feed or fetch of Tensor data,
      in which case the corresponding send or recv is expected to be managed
      locally by the caller.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `tensor_type`.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        try:
            _result = pywrap_tfe.TFE_Py_FastPathExecute(
                _ctx._context_handle, tld.device_name, "Recv", name,
                tld.op_callbacks, "tensor_type", tensor_type, "tensor_name",
                tensor_name, "send_device", send_device,
                "send_device_incarnation", send_device_incarnation,
                "recv_device", recv_device, "client_terminated",
                client_terminated)
            return _result
        except _core._FallbackException:
            try:
                return recv_eager_fallback(
                    tensor_type=tensor_type,
                    tensor_name=tensor_name,
                    send_device=send_device,
                    send_device_incarnation=send_device_incarnation,
                    recv_device=recv_device,
                    client_terminated=client_terminated,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            _ops.raise_from_not_ok_status(e, name)
    # Add nodes to the TensorFlow graph.
    tensor_type = _execute.make_type(tensor_type, "tensor_type")
    tensor_name = _execute.make_str(tensor_name, "tensor_name")
    send_device = _execute.make_str(send_device, "send_device")
    send_device_incarnation = _execute.make_int(send_device_incarnation,
                                                "send_device_incarnation")
    recv_device = _execute.make_str(recv_device, "recv_device")
    if client_terminated is None:
        client_terminated = False
    client_terminated = _execute.make_bool(client_terminated,
                                           "client_terminated")
    _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "Recv",
        tensor_type=tensor_type,
        tensor_name=tensor_name,
        send_device=send_device,
        send_device_incarnation=send_device_incarnation,
        recv_device=recv_device,
        client_terminated=client_terminated,
        name=name)
    _result = _outputs[:]
    if _execute.must_record_gradient():
        _attrs = ("tensor_type", _op._get_attr_type("tensor_type"),
                  "tensor_name", _op.get_attr("tensor_name"), "send_device",
                  _op.get_attr("send_device"), "send_device_incarnation",
                  _op._get_attr_int("send_device_incarnation"), "recv_device",
                  _op.get_attr("recv_device"), "client_terminated",
                  _op._get_attr_bool("client_terminated"))
        _inputs_flat = _op.inputs
        _execute.record_gradient("Recv", _inputs_flat, _attrs, _result)
    _result, = _result
    return _result
Esempio n. 15
0
def debug_nan_count(input,
                    device_name="",
                    tensor_name="",
                    debug_urls=[],
                    gated_grpc=False,
                    name=None):
    r"""Debug NaN Value Counter Op

  Counts number of NaNs in the input tensor, for debugging.

  Args:
    input: A `Tensor`. Input tensor, non-Reference type.
    device_name: An optional `string`. Defaults to `""`.
    tensor_name: An optional `string`. Defaults to `""`.
      Name of the input tensor.
    debug_urls: An optional list of `strings`. Defaults to `[]`.
      List of URLs to debug targets, e.g.,
      file:///foo/tfdbg_dump, grpc:://localhost:11011.
    gated_grpc: An optional `bool`. Defaults to `False`.
      Whether this op will be gated. If any of the debug_urls of this
      debug node is of the grpc:// scheme, when the value of this attribute is set
      to True, the data will not actually be sent via the grpc stream unless this
      debug op has been enabled at the debug_url. If all of the debug_urls of this
      debug node are of the grpc:// scheme and the debug op is enabled at none of
      them, the output will be an empty Tensor.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `int64`.
    An integer output tensor that is the number of NaNs in the input.
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        if device_name is None:
            device_name = ""
        device_name = _execute.make_str(device_name, "device_name")
        if tensor_name is None:
            tensor_name = ""
        tensor_name = _execute.make_str(tensor_name, "tensor_name")
        if debug_urls is None:
            debug_urls = []
        if not isinstance(debug_urls, (list, tuple)):
            raise TypeError("Expected list for 'debug_urls' argument to "
                            "'debug_nan_count' Op, not %r." % debug_urls)
        debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
        if gated_grpc is None:
            gated_grpc = False
        gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
        _, _, _op = _op_def_lib._apply_op_helper("DebugNanCount",
                                                 input=input,
                                                 device_name=device_name,
                                                 tensor_name=tensor_name,
                                                 debug_urls=debug_urls,
                                                 gated_grpc=gated_grpc,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("T", _op.get_attr("T"), "device_name",
                  _op.get_attr("device_name"), "tensor_name",
                  _op.get_attr("tensor_name"), "debug_urls",
                  _op.get_attr("debug_urls"), "gated_grpc",
                  _op.get_attr("gated_grpc"))
        _execute.record_gradient("DebugNanCount", _inputs_flat, _attrs,
                                 _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name, "DebugNanCount", name,
                _ctx._post_execution_callbacks, input, "device_name",
                device_name, "tensor_name", tensor_name, "debug_urls",
                debug_urls, "gated_grpc", gated_grpc)
            return _result
        except _core._FallbackException:
            return debug_nan_count_eager_fallback(input,
                                                  device_name=device_name,
                                                  tensor_name=tensor_name,
                                                  debug_urls=debug_urls,
                                                  gated_grpc=gated_grpc,
                                                  name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 16
0
def ragged_cross(ragged_values,
                 ragged_row_splits,
                 sparse_indices,
                 sparse_values,
                 sparse_shape,
                 dense_inputs,
                 input_order,
                 hashed_output,
                 num_buckets,
                 hash_key,
                 out_values_type,
                 out_row_splits_type,
                 name=None):
    r"""Generates a feature cross from a list of tensors, and returns it as a
RaggedTensor.  See `tf.ragged.cross` for more details.

  Args:
    ragged_values: A list of `Tensor` objects with types from: `int64`, `string`.
      The values tensor for each RaggedTensor input.
    ragged_row_splits: A list of `Tensor` objects with types from: `int32`, `int64`.
      The row_splits tensor for each RaggedTensor input.
    sparse_indices: A list of `Tensor` objects with type `int64`.
      The indices tensor for each SparseTensor input.
    sparse_values: A list of `Tensor` objects with types from: `int64`, `string`.
      The values tensor for each SparseTensor input.
    sparse_shape: A list with the same length as `sparse_indices` of `Tensor` objects with type `int64`.
      The dense_shape tensor for each SparseTensor input.
    dense_inputs: A list of `Tensor` objects with types from: `int64`, `string`.
      The tf.Tensor inputs.
    input_order: A `string`.
      String specifying the tensor type for each input.  The `i`th character in
      this string specifies the type of the `i`th input, and is one of: 'R' (ragged),
      'D' (dense), or 'S' (sparse).  This attr is used to ensure that the crossed
      values are combined in the order of the inputs from the call to tf.ragged.cross.
    hashed_output: A `bool`.
    num_buckets: An `int` that is `>= 0`.
    hash_key: An `int`.
    out_values_type: A `tf.DType` from: `tf.int64, tf.string`.
    out_row_splits_type: A `tf.DType` from: `tf.int32, tf.int64`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (output_values, output_row_splits).

    output_values: A `Tensor` of type `out_values_type`.
    output_row_splits: A `Tensor` of type `out_row_splits_type`.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        try:
            _result = pywrap_tfe.TFE_Py_FastPathExecute(
                _ctx, "RaggedCross", name, ragged_values, ragged_row_splits,
                sparse_indices, sparse_values, sparse_shape, dense_inputs,
                "input_order", input_order, "hashed_output", hashed_output,
                "num_buckets", num_buckets, "hash_key", hash_key,
                "out_values_type", out_values_type, "out_row_splits_type",
                out_row_splits_type)
            _result = _RaggedCrossOutput._make(_result)
            return _result
        except _core._NotOkStatusException as e:
            _ops.raise_from_not_ok_status(e, name)
        except _core._FallbackException:
            pass
        try:
            return ragged_cross_eager_fallback(
                ragged_values,
                ragged_row_splits,
                sparse_indices,
                sparse_values,
                sparse_shape,
                dense_inputs,
                input_order=input_order,
                hashed_output=hashed_output,
                num_buckets=num_buckets,
                hash_key=hash_key,
                out_values_type=out_values_type,
                out_row_splits_type=out_row_splits_type,
                name=name,
                ctx=_ctx)
        except _core._SymbolicException:
            pass  # Add nodes to the TensorFlow graph.
    # Add nodes to the TensorFlow graph.
    if not isinstance(sparse_indices, (list, tuple)):
        raise TypeError("Expected list for 'sparse_indices' argument to "
                        "'ragged_cross' Op, not %r." % sparse_indices)
    _attr_Nsparse = len(sparse_indices)
    if not isinstance(sparse_shape, (list, tuple)):
        raise TypeError("Expected list for 'sparse_shape' argument to "
                        "'ragged_cross' Op, not %r." % sparse_shape)
    if len(sparse_shape) != _attr_Nsparse:
        raise ValueError(
            "List argument 'sparse_shape' to 'ragged_cross' Op with length %d "
            "must match length %d of argument 'sparse_indices'." %
            (len(sparse_shape), _attr_Nsparse))
    input_order = _execute.make_str(input_order, "input_order")
    hashed_output = _execute.make_bool(hashed_output, "hashed_output")
    num_buckets = _execute.make_int(num_buckets, "num_buckets")
    hash_key = _execute.make_int(hash_key, "hash_key")
    out_values_type = _execute.make_type(out_values_type, "out_values_type")
    out_row_splits_type = _execute.make_type(out_row_splits_type,
                                             "out_row_splits_type")
    _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "RaggedCross",
        ragged_values=ragged_values,
        ragged_row_splits=ragged_row_splits,
        sparse_indices=sparse_indices,
        sparse_values=sparse_values,
        sparse_shape=sparse_shape,
        dense_inputs=dense_inputs,
        input_order=input_order,
        hashed_output=hashed_output,
        num_buckets=num_buckets,
        hash_key=hash_key,
        out_values_type=out_values_type,
        out_row_splits_type=out_row_splits_type,
        name=name)
    _result = _outputs[:]
    if _execute.must_record_gradient():
        _attrs = ("Nsparse", _op._get_attr_int("Nsparse"), "input_order",
                  _op.get_attr("input_order"), "hashed_output",
                  _op._get_attr_bool("hashed_output"), "num_buckets",
                  _op._get_attr_int("num_buckets"), "hash_key",
                  _op._get_attr_int("hash_key"), "ragged_values_types",
                  _op.get_attr("ragged_values_types"), "ragged_splits_types",
                  _op.get_attr("ragged_splits_types"), "sparse_values_types",
                  _op.get_attr("sparse_values_types"), "dense_types",
                  _op.get_attr("dense_types"), "out_values_type",
                  _op._get_attr_type("out_values_type"), "out_row_splits_type",
                  _op._get_attr_type("out_row_splits_type"))
        _inputs_flat = _op.inputs
        _execute.record_gradient("RaggedCross", _inputs_flat, _attrs, _result)
    _result = _RaggedCrossOutput._make(_result)
    return _result
def enter(data,
          frame_name,
          is_constant=False,
          parallel_iterations=10,
          name=None):
    r"""Creates or finds a child frame, and makes `data` available to the child frame.

  This op is used together with `Exit` to create loops in the graph.

  The unique `frame_name` is used by the `Executor` to identify frames. If

  `is_constant` is true, `output` is a constant in the child frame; otherwise

  it may be changed in the child frame. At most `parallel_iterations` iterations

  are run in parallel in the child frame.

  Args:
    data: A `Tensor`. The tensor to be made available to the child frame.
    frame_name: A `string`. The name of the child frame.
    is_constant: An optional `bool`. Defaults to `False`.
      If true, the output is constant within the child frame.
    parallel_iterations: An optional `int`. Defaults to `10`.
      The number of iterations allowed to run in parallel.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `data`. The same tensor as `data`.
  """
    frame_name = _execute.make_str(frame_name, "frame_name")
    if is_constant is None:
        is_constant = False
    is_constant = _execute.make_bool(is_constant, "is_constant")
    if parallel_iterations is None:
        parallel_iterations = 10
    parallel_iterations = _execute.make_int(parallel_iterations,
                                            "parallel_iterations")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper(
            "Enter",
            data=data,
            frame_name=frame_name,
            is_constant=is_constant,
            parallel_iterations=parallel_iterations,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("T", _op.get_attr("T"), "frame_name",
                  _op.get_attr("frame_name"), "is_constant",
                  _op.get_attr("is_constant"), "parallel_iterations",
                  _op.get_attr("parallel_iterations"))
    else:
        _attr_T, (data, ) = _execute.args_to_matching_eager([data], _ctx)
        _inputs_flat = [data]
        _attrs = ("T", _attr_T, "frame_name", frame_name, "is_constant",
                  is_constant, "parallel_iterations", parallel_iterations)
        _result = _execute.execute(b"Enter",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("Enter", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
Esempio n. 18
0
def grow_tree_ensemble(tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed, max_tree_depth, weak_learner_type, partition_ids, gains, splits, learner_config, center_bias, name=None):
  r"""Grows the tree ensemble by either adding a layer to the last tree being grown

  or by starting a new tree.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the ensemble variable.
    stamp_token: A `Tensor` of type `int64`.
      Stamp token for validating operation consistency.
    next_stamp_token: A `Tensor` of type `int64`.
      Stamp token to be used for the next iteration.
    learning_rate: A `Tensor` of type `float32`. Scalar learning rate.
    dropout_seed: A `Tensor` of type `int64`.
    max_tree_depth: A `Tensor` of type `int32`.
    weak_learner_type: A `Tensor` of type `int32`.
      The type of weak learner to use.
    partition_ids: A list of `Tensor` objects with type `int32`.
      List of Rank 1 Tensors containing partition Id per candidate.
    gains: A list with the same length as `partition_ids` of `Tensor` objects with type `float32`.
      List of Rank 1 Tensors containing gains per candidate.
    splits: A list with the same length as `partition_ids` of `Tensor` objects with type `string`.
      List of Rank 1 Tensors containing serialized SplitInfo protos per candidate.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto.
    center_bias: A `bool`.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if not isinstance(partition_ids, (list, tuple)):
      raise TypeError(
          "Expected list for 'partition_ids' argument to "
          "'grow_tree_ensemble' Op, not %r." % partition_ids)
    _attr_num_handlers = len(partition_ids)
    if not isinstance(gains, (list, tuple)):
      raise TypeError(
          "Expected list for 'gains' argument to "
          "'grow_tree_ensemble' Op, not %r." % gains)
    if len(gains) != _attr_num_handlers:
      raise ValueError(
          "List argument 'gains' to 'grow_tree_ensemble' Op with length %d "
          "must match length %d of argument 'partition_ids'." %
          (len(gains), _attr_num_handlers))
    if not isinstance(splits, (list, tuple)):
      raise TypeError(
          "Expected list for 'splits' argument to "
          "'grow_tree_ensemble' Op, not %r." % splits)
    if len(splits) != _attr_num_handlers:
      raise ValueError(
          "List argument 'splits' to 'grow_tree_ensemble' Op with length %d "
          "must match length %d of argument 'partition_ids'." %
          (len(splits), _attr_num_handlers))
    learner_config = _execute.make_str(learner_config, "learner_config")
    center_bias = _execute.make_bool(center_bias, "center_bias")
    _, _, _op = _op_def_lib._apply_op_helper(
        "GrowTreeEnsemble", tree_ensemble_handle=tree_ensemble_handle,
        stamp_token=stamp_token, next_stamp_token=next_stamp_token,
        learning_rate=learning_rate, dropout_seed=dropout_seed,
        max_tree_depth=max_tree_depth, weak_learner_type=weak_learner_type,
        partition_ids=partition_ids, gains=gains, splits=splits,
        learner_config=learner_config, center_bias=center_bias, name=name)
    return _op
    _result = None
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "GrowTreeEnsemble", name, _ctx._post_execution_callbacks,
        tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate,
        dropout_seed, max_tree_depth, weak_learner_type, partition_ids, gains,
        splits, "learner_config", learner_config, "center_bias", center_bias)
      return _result
    except _core._FallbackException:
      return grow_tree_ensemble_eager_fallback(
          tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate,
          dropout_seed, max_tree_depth, weak_learner_type, partition_ids,
          gains, splits, learner_config=learner_config,
          center_bias=center_bias, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_feature_cross(indices, values, shapes, dense, hashed_output, num_buckets, out_type, internal_type, name=None):
  r"""Generates sparse cross form a list of sparse tensors.

  The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
  representing features of one feature column. It outputs a 2D `SparseTensor` with
  the batchwise crosses of these features.

  For example, if the inputs are

      inputs[0]: SparseTensor with shape = [2, 2]
      [0, 0]: "a"
      [1, 0]: "b"
      [1, 1]: "c"

      inputs[1]: SparseTensor with shape = [2, 1]
      [0, 0]: "d"
      [1, 0]: "e"

      inputs[2]: Tensor [["f"], ["g"]]

  then the output will be

      shape = [2, 2]
      [0, 0]: "a_X_d_X_f"
      [1, 0]: "b_X_e_X_g"
      [1, 1]: "c_X_e_X_g"

  if hashed_output=true then the output will be

      shape = [2, 2]
      [0, 0]: HashCombine(
                  Fingerprint64("f"), HashCombine(
                      Fingerprint64("d"), Fingerprint64("a")))
      [1, 0]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("b")))
      [1, 1]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("c")))

  Args:
    indices: A list of `Tensor` objects with type `int64`.
      2-D.  Indices of each input `SparseTensor`.
    values: A list of `Tensor` objects with types from: `int64`, `string`.
      1-D.   values of each `SparseTensor`.
    shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
      1-D.   Shapes of each `SparseTensor`.
    dense: A list of `Tensor` objects with types from: `int64`, `string`.
      2-D.    Columns represented by dense `Tensor`.
    hashed_output: A `bool`.
    num_buckets: An `int` that is `>= 0`.
    out_type: A `tf.DType` from: `tf.int64, tf.string`.
    internal_type: A `tf.DType` from: `tf.int64, tf.string`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (output_indices, output_values, output_shape).

    output_indices: A `Tensor` of type `int64`. 2-D.  Indices of the concatenated `SparseTensor`.
    output_values: A `Tensor` of type `out_type`. 1-D.  Non-empty values of the concatenated or hashed
      `SparseTensor`.
    output_shape: A `Tensor` of type `int64`. 1-D.  Shape of the concatenated `SparseTensor`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if not isinstance(indices, (list, tuple)):
      raise TypeError(
          "Expected list for 'indices' argument to "
          "'sparse_feature_cross' Op, not %r." % indices)
    _attr_N = len(indices)
    if not isinstance(shapes, (list, tuple)):
      raise TypeError(
          "Expected list for 'shapes' argument to "
          "'sparse_feature_cross' Op, not %r." % shapes)
    if len(shapes) != _attr_N:
      raise ValueError(
          "List argument 'shapes' to 'sparse_feature_cross' Op with length %d "
          "must match length %d of argument 'indices'." %
          (len(shapes), _attr_N))
    hashed_output = _execute.make_bool(hashed_output, "hashed_output")
    num_buckets = _execute.make_int(num_buckets, "num_buckets")
    out_type = _execute.make_type(out_type, "out_type")
    internal_type = _execute.make_type(internal_type, "internal_type")
    _, _, _op = _op_def_lib._apply_op_helper(
        "SparseFeatureCross", indices=indices, values=values, shapes=shapes,
        dense=dense, hashed_output=hashed_output, num_buckets=num_buckets,
        out_type=out_type, internal_type=internal_type, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("N", _op.get_attr("N"), "hashed_output",
              _op.get_attr("hashed_output"), "num_buckets",
              _op.get_attr("num_buckets"), "sparse_types",
              _op.get_attr("sparse_types"), "dense_types",
              _op.get_attr("dense_types"), "out_type",
              _op.get_attr("out_type"), "internal_type",
              _op.get_attr("internal_type"))
    _execute.record_gradient(
      "SparseFeatureCross", _inputs_flat, _attrs, _result, name)
    _result = _SparseFeatureCrossOutput._make(_result)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "SparseFeatureCross", name, _ctx._post_execution_callbacks, indices,
        values, shapes, dense, "hashed_output", hashed_output, "num_buckets",
        num_buckets, "out_type", out_type, "internal_type", internal_type)
      _result = _SparseFeatureCrossOutput._make(_result)
      return _result
    except _core._FallbackException:
      return sparse_feature_cross_eager_fallback(
          indices, values, shapes, dense, hashed_output=hashed_output,
          num_buckets=num_buckets, out_type=out_type,
          internal_type=internal_type, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 20
0
def try_rpc(address, method, request, protocol="", fail_fast=True, timeout_in_ms=0, name=None):
  r"""TODO: add doc.

  Args:
    address: A `Tensor` of type `string`.
    method: A `Tensor` of type `string`.
    request: A `Tensor` of type `string`.
    protocol: An optional `string`. Defaults to `""`.
    fail_fast: An optional `bool`. Defaults to `True`.
    timeout_in_ms: An optional `int`. Defaults to `0`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (response, status_code, status_message).

    response: A `Tensor` of type `string`.
    status_code: A `Tensor` of type `int32`.
    status_message: A `Tensor` of type `string`.
  """
  _ctx = _context._context
  if _ctx is not None and _ctx._eager_context.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name, "TryRpc", name,
        _ctx._post_execution_callbacks, address, method, request, "protocol",
        protocol, "fail_fast", fail_fast, "timeout_in_ms", timeout_in_ms)
      _result = _TryRpcOutput._make(_result)
      return _result
    except _core._FallbackException:
      try:
        return try_rpc_eager_fallback(
            address, method, request, protocol=protocol, fail_fast=fail_fast,
            timeout_in_ms=timeout_in_ms, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              try_rpc, address=address, method=method, request=request,
                       protocol=protocol, fail_fast=fail_fast,
                       timeout_in_ms=timeout_in_ms, name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  if protocol is None:
    protocol = ""
  protocol = _execute.make_str(protocol, "protocol")
  if fail_fast is None:
    fail_fast = True
  fail_fast = _execute.make_bool(fail_fast, "fail_fast")
  if timeout_in_ms is None:
    timeout_in_ms = 0
  timeout_in_ms = _execute.make_int(timeout_in_ms, "timeout_in_ms")
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "TryRpc", address=address, method=method, request=request,
                  protocol=protocol, fail_fast=fail_fast,
                  timeout_in_ms=timeout_in_ms, name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          try_rpc, address=address, method=method, request=request,
                   protocol=protocol, fail_fast=fail_fast,
                   timeout_in_ms=timeout_in_ms, name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("protocol", _op.get_attr("protocol"), "fail_fast",
            _op.get_attr("fail_fast"), "timeout_in_ms",
            _op.get_attr("timeout_in_ms"))
  _execute.record_gradient(
      "TryRpc", _inputs_flat, _attrs, _result, name)
  _result = _TryRpcOutput._make(_result)
  return _result
Esempio n. 21
0
def ctc_greedy_decoder(inputs,
                       sequence_length,
                       merge_repeated=False,
                       name=None):
    r"""Performs greedy decoding on the logits given in inputs.

  A note about the attribute merge_repeated: if enabled, when
  consecutive logits' maximum indices are the same, only the first of
  these is emitted.  Labeling the blank '*', the sequence "A B B * B B"
  becomes "A B B" if merge_repeated = True and "A B B B B" if
  merge_repeated = False.

  Regardless of the value of merge_repeated, if the maximum index of a given
  time and batch corresponds to the blank, index `(num_classes - 1)`, no new
  element is emitted.

  Args:
    inputs: A `Tensor` of type `float32`.
      3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
    sequence_length: A `Tensor` of type `int32`.
      A vector containing sequence lengths, size `(batch_size)`.
    merge_repeated: An optional `bool`. Defaults to `False`.
      If True, merge repeated classes in output.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (decoded_indices, decoded_values, decoded_shape, log_probability).

    decoded_indices: A `Tensor` of type `int64`.
    decoded_values: A `Tensor` of type `int64`.
    decoded_shape: A `Tensor` of type `int64`.
    log_probability: A `Tensor` of type `float32`.
  """
    _ctx = _context._context
    if _ctx is not None and _ctx._eager_context.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "CTCGreedyDecoder", name, _ctx._post_execution_callbacks,
                inputs, sequence_length, "merge_repeated", merge_repeated)
            _result = _CTCGreedyDecoderOutput._make(_result)
            return _result
        except _core._FallbackException:
            try:
                return ctc_greedy_decoder_eager_fallback(
                    inputs,
                    sequence_length,
                    merge_repeated=merge_repeated,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if merge_repeated is None:
        merge_repeated = False
    merge_repeated = _execute.make_bool(merge_repeated, "merge_repeated")
    _, _, _op = _op_def_lib._apply_op_helper("CTCGreedyDecoder",
                                             inputs=inputs,
                                             sequence_length=sequence_length,
                                             merge_repeated=merge_repeated,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("merge_repeated", _op.get_attr("merge_repeated"))
    _execute.record_gradient("CTCGreedyDecoder", _inputs_flat, _attrs, _result,
                             name)
    _result = _CTCGreedyDecoderOutput._make(_result)
    return _result
Esempio n. 22
0
def as_string(input, precision=-1, scientific=False, shortest=False, width=-1, fill="", name=None):
  r"""Converts each entry in the given tensor to strings.  Supports many numeric

  types and boolean.

  Args:
    input: A `Tensor`. Must be one of the following types: `int32`, `int64`, `complex64`, `float32`, `float64`, `bool`, `int8`.
    precision: An optional `int`. Defaults to `-1`.
      The post-decimal precision to use for floating point numbers.
      Only used if precision > -1.
    scientific: An optional `bool`. Defaults to `False`.
      Use scientific notation for floating point numbers.
    shortest: An optional `bool`. Defaults to `False`.
      Use shortest representation (either scientific or standard) for
      floating point numbers.
    width: An optional `int`. Defaults to `-1`.
      Pad pre-decimal numbers to this width.
      Applies to both floating point and integer numbers.
      Only used if width > -1.
    fill: An optional `string`. Defaults to `""`.
      The value to pad if width > -1.  If empty, pads with spaces.
      Another typical value is '0'.  String cannot be longer than 1 character.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if precision is None:
      precision = -1
    precision = _execute.make_int(precision, "precision")
    if scientific is None:
      scientific = False
    scientific = _execute.make_bool(scientific, "scientific")
    if shortest is None:
      shortest = False
    shortest = _execute.make_bool(shortest, "shortest")
    if width is None:
      width = -1
    width = _execute.make_int(width, "width")
    if fill is None:
      fill = ""
    fill = _execute.make_str(fill, "fill")
    _, _, _op = _op_def_lib._apply_op_helper(
        "AsString", input=input, precision=precision, scientific=scientific,
        shortest=shortest, width=width, fill=fill, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "precision", _op.get_attr("precision"),
              "scientific", _op.get_attr("scientific"), "shortest",
              _op.get_attr("shortest"), "width", _op.get_attr("width"),
              "fill", _op.get_attr("fill"))
    _execute.record_gradient(
      "AsString", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "AsString", name,
        _ctx._post_execution_callbacks, input, "precision", precision,
        "scientific", scientific, "shortest", shortest, "width", width,
        "fill", fill)
      return _result
    except _core._FallbackException:
      return as_string_eager_fallback(
          input, precision=precision, scientific=scientific,
          shortest=shortest, width=width, fill=fill, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 23
0
def ctc_beam_search_decoder(inputs,
                            sequence_length,
                            beam_width,
                            top_paths,
                            merge_repeated=True,
                            name=None):
    r"""Performs beam search decoding on the logits given in input.

  A note about the attribute merge_repeated: For the beam search decoder,
  this means that if consecutive entries in a beam are the same, only
  the first of these is emitted.  That is, when the top path is "A B B B B",
  "A B" is returned if merge_repeated = True but "A B B B B" is
  returned if merge_repeated = False.

  Args:
    inputs: A `Tensor` of type `float32`.
      3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
    sequence_length: A `Tensor` of type `int32`.
      A vector containing sequence lengths, size `(batch)`.
    beam_width: An `int` that is `>= 1`.
      A scalar >= 0 (beam search beam width).
    top_paths: An `int` that is `>= 1`.
      A scalar >= 0, <= beam_width (controls output size).
    merge_repeated: An optional `bool`. Defaults to `True`.
      If true, merge repeated classes in output.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (decoded_indices, decoded_values, decoded_shape, log_probability).

    decoded_indices: A list of `top_paths` `Tensor` objects with type `int64`.
    decoded_values: A list of `top_paths` `Tensor` objects with type `int64`.
    decoded_shape: A list of `top_paths` `Tensor` objects with type `int64`.
    log_probability: A `Tensor` of type `float32`.
  """
    _ctx = _context._context
    if _ctx is not None and _ctx._eager_context.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "CTCBeamSearchDecoder", name, _ctx._post_execution_callbacks,
                inputs, sequence_length, "beam_width", beam_width, "top_paths",
                top_paths, "merge_repeated", merge_repeated)
            _result = _CTCBeamSearchDecoderOutput._make(_result)
            return _result
        except _core._FallbackException:
            try:
                return ctc_beam_search_decoder_eager_fallback(
                    inputs,
                    sequence_length,
                    beam_width=beam_width,
                    top_paths=top_paths,
                    merge_repeated=merge_repeated,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    beam_width = _execute.make_int(beam_width, "beam_width")
    top_paths = _execute.make_int(top_paths, "top_paths")
    if merge_repeated is None:
        merge_repeated = True
    merge_repeated = _execute.make_bool(merge_repeated, "merge_repeated")
    _, _, _op = _op_def_lib._apply_op_helper("CTCBeamSearchDecoder",
                                             inputs=inputs,
                                             sequence_length=sequence_length,
                                             beam_width=beam_width,
                                             top_paths=top_paths,
                                             merge_repeated=merge_repeated,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("beam_width", _op.get_attr("beam_width"), "top_paths",
              _op.get_attr("top_paths"), "merge_repeated",
              _op.get_attr("merge_repeated"))
    _execute.record_gradient("CTCBeamSearchDecoder", _inputs_flat, _attrs,
                             _result, name)
    _result = [_result[:top_paths]] + _result[top_paths:]
    _result = _result[:1] + [_result[1:1 + top_paths]
                             ] + _result[1 + top_paths:]
    _result = _result[:2] + [_result[2:2 + top_paths]
                             ] + _result[2 + top_paths:]
    _result = _CTCBeamSearchDecoderOutput._make(_result)
    return _result
Esempio n. 24
0
def reduce_join(inputs, reduction_indices, keep_dims=False, separator="", name=None):
  r"""Joins a string Tensor across the given dimensions.

  Computes the string join across dimensions in the given string Tensor of shape
  `[d_0, d_1, ..., d_n-1]`.  Returns a new Tensor created by joining the input
  strings with the given separator (default: empty string).  Negative indices are
  counted backwards from the end, with `-1` being equivalent to `n - 1`.
  
  For example:
  
  ```python
  # tensor `a` is [["a", "b"], ["c", "d"]]
  tf.reduce_join(a, 0) ==> ["ac", "bd"]
  tf.reduce_join(a, 1) ==> ["ab", "cd"]
  tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
  tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
  tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
  tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
  tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
  tf.reduce_join(a, [0, 1]) ==> ["acbd"]
  tf.reduce_join(a, [1, 0]) ==> ["abcd"]
  tf.reduce_join(a, []) ==> ["abcd"]
  ```

  Args:
    inputs: A `Tensor` of type `string`.
      The input to be joined.  All reduced indices must have non-zero size.
    reduction_indices: A `Tensor` of type `int32`.
      The dimensions to reduce over.  Dimensions are reduced in the
      order specified.  Omitting `reduction_indices` is equivalent to passing
      `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
    keep_dims: An optional `bool`. Defaults to `False`.
      If `True`, retain reduced dimensions with length `1`.
    separator: An optional `string`. Defaults to `""`.
      The separator to use when joining.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if keep_dims is None:
      keep_dims = False
    keep_dims = _execute.make_bool(keep_dims, "keep_dims")
    if separator is None:
      separator = ""
    separator = _execute.make_str(separator, "separator")
    _, _, _op = _op_def_lib._apply_op_helper(
        "ReduceJoin", inputs=inputs, reduction_indices=reduction_indices,
        keep_dims=keep_dims, separator=separator, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("keep_dims", _op.get_attr("keep_dims"), "separator",
              _op.get_attr("separator"))
    _execute.record_gradient(
      "ReduceJoin", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "ReduceJoin", name,
        _ctx._post_execution_callbacks, inputs, reduction_indices,
        "keep_dims", keep_dims, "separator", separator)
      return _result
    except _core._FallbackException:
      return reduce_join_eager_fallback(
          inputs, reduction_indices, keep_dims=keep_dims, separator=separator,
          name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 25
0
def debug_nan_count(input,
                    device_name="",
                    tensor_name="",
                    debug_urls=[],
                    gated_grpc=False,
                    name=None):
    r"""Debug NaN Value Counter Op.

  Counts number of NaNs in the input tensor, for debugging.

  Args:
    input: A `Tensor`. Input tensor, non-Reference type.
    device_name: An optional `string`. Defaults to `""`.
    tensor_name: An optional `string`. Defaults to `""`.
      Name of the input tensor.
    debug_urls: An optional list of `strings`. Defaults to `[]`.
      List of URLs to debug targets, e.g.,
        file:///foo/tfdbg_dump, grpc:://localhost:11011.
    gated_grpc: An optional `bool`. Defaults to `False`.
       Whether this op will be gated. If any of the debug_urls of this
        debug node is of the grpc:// scheme, when the value of this attribute is set
        to True, the data will not actually be sent via the grpc stream unless this
        debug op has been enabled at the debug_url. If all of the debug_urls of this
        debug node are of the grpc:// scheme and the debug op is enabled at none of
        them, the output will be an empty Tensor.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `int64`.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        try:
            _result = pywrap_tfe.TFE_Py_FastPathExecute(
                _ctx._context_handle, tld.device_name, "DebugNanCount", name,
                tld.op_callbacks, input, "device_name", device_name,
                "tensor_name", tensor_name, "debug_urls", debug_urls,
                "gated_grpc", gated_grpc)
            return _result
        except _core._NotOkStatusException as e:
            _ops.raise_from_not_ok_status(e, name)
        except _core._FallbackException:
            pass
        try:
            return debug_nan_count_eager_fallback(input,
                                                  device_name=device_name,
                                                  tensor_name=tensor_name,
                                                  debug_urls=debug_urls,
                                                  gated_grpc=gated_grpc,
                                                  name=name,
                                                  ctx=_ctx)
        except _core._SymbolicException:
            pass  # Add nodes to the TensorFlow graph.
    # Add nodes to the TensorFlow graph.
    if device_name is None:
        device_name = ""
    device_name = _execute.make_str(device_name, "device_name")
    if tensor_name is None:
        tensor_name = ""
    tensor_name = _execute.make_str(tensor_name, "tensor_name")
    if debug_urls is None:
        debug_urls = []
    if not isinstance(debug_urls, (list, tuple)):
        raise TypeError("Expected list for 'debug_urls' argument to "
                        "'debug_nan_count' Op, not %r." % debug_urls)
    debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
    if gated_grpc is None:
        gated_grpc = False
    gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
    _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "DebugNanCount",
        input=input,
        device_name=device_name,
        tensor_name=tensor_name,
        debug_urls=debug_urls,
        gated_grpc=gated_grpc,
        name=name)
    _result = _outputs[:]
    if _execute.must_record_gradient():
        _attrs = ("T", _op._get_attr_type("T"), "device_name",
                  _op.get_attr("device_name"), "tensor_name",
                  _op.get_attr("tensor_name"), "debug_urls",
                  _op.get_attr("debug_urls"), "gated_grpc",
                  _op._get_attr_bool("gated_grpc"))
        _inputs_flat = _op.inputs
        _execute.record_gradient("DebugNanCount", _inputs_flat, _attrs,
                                 _result)
    _result, = _result
    return _result
Esempio n. 26
0
def string_split(input, delimiter, skip_empty=True, name=None):
  r"""Split elements of `input` based on `delimiter` into a `SparseTensor`.

  Let N be the size of source (typically N will be the batch size). Split each
  element of `input` based on `delimiter` and return a `SparseTensor`
  containing the splitted tokens. Empty tokens are ignored.
  
  `delimiter` can be empty, or a string of split characters. If `delimiter` is an
   empty string, each element of `input` is split into individual single-byte
   character strings, including splitting of UTF-8 multibyte sequences. Otherwise
   every character of `delimiter` is a potential split point.
  
  For example:
    N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
    will be
  
    indices = [0, 0;
               0, 1;
               1, 0;
               1, 1;
               1, 2]
    shape = [2, 3]
    values = ['hello', 'world', 'a', 'b', 'c']

  Args:
    input: A `Tensor` of type `string`. 1-D. Strings to split.
    delimiter: A `Tensor` of type `string`.
      0-D. Delimiter characters (bytes), or empty string.
    skip_empty: An optional `bool`. Defaults to `True`.
      A `bool`. If `True`, skip the empty strings from the result.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (indices, values, shape).

    indices: A `Tensor` of type `int64`.
    values: A `Tensor` of type `string`.
    shape: A `Tensor` of type `int64`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if skip_empty is None:
      skip_empty = True
    skip_empty = _execute.make_bool(skip_empty, "skip_empty")
    _, _, _op = _op_def_lib._apply_op_helper(
        "StringSplit", input=input, delimiter=delimiter,
        skip_empty=skip_empty, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("skip_empty", _op.get_attr("skip_empty"))
    _execute.record_gradient(
      "StringSplit", _inputs_flat, _attrs, _result, name)
    _result = _StringSplitOutput._make(_result)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "StringSplit", name,
        _ctx._post_execution_callbacks, input, delimiter, "skip_empty",
        skip_empty)
      _result = _StringSplitOutput._make(_result)
      return _result
    except _core._FallbackException:
      return string_split_eager_fallback(
          input, delimiter, skip_empty=skip_empty, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 27
0
def audio_spectrogram(input, window_size, stride, magnitude_squared=False, name=None):
  r"""Produces a visualization of audio data over time.

  Spectrograms are a standard way of representing audio information as a series of
  slices of frequency information, one slice for each window of time. By joining
  these together into a sequence, they form a distinctive fingerprint of the sound
  over time.

  This op expects to receive audio data as an input, stored as floats in the range
  -1 to 1, together with a window width in samples, and a stride specifying how
  far to move the window between slices. From this it generates a three
  dimensional output. The first dimension is for the channels in the input, so a
  stereo audio input would have two here for example. The second dimension is time, 
  with successive frequency slices. The third dimension has an amplitude value for 
  each frequency during that time slice.

  This means the layout when converted and saved as an image is rotated 90 degrees
  clockwise from a typical spectrogram. Time is descending down the Y axis, and
  the frequency decreases from left to right.

  Each value in the result represents the square root of the sum of the real and
  imaginary parts of an FFT on the current window of samples. In this way, the
  lowest dimension represents the power of each frequency in the current window,
  and adjacent windows are concatenated in the next dimension.

  To get a more intuitive and visual look at what this operation does, you can run
  tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
  resulting spectrogram as a PNG image.

  Args:
    input: A `Tensor` of type `float32`. Float representation of audio data.
    window_size: An `int`.
      How wide the input window is in samples. For the highest efficiency
      this should be a power of two, but other values are accepted.
    stride: An `int`.
      How widely apart the center of adjacent sample windows should be.
    magnitude_squared: An optional `bool`. Defaults to `False`.
      Whether to return the squared magnitude or just the
      magnitude. Using squared magnitude can avoid extra calculations.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "AudioSpectrogram", name,
        tld.op_callbacks, input, "window_size", window_size, "stride", stride,
        "magnitude_squared", magnitude_squared)
      return _result
    except _core._FallbackException:
      try:
        return audio_spectrogram_eager_fallback(
            input, window_size=window_size, stride=stride,
            magnitude_squared=magnitude_squared, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
  # Add nodes to the TensorFlow graph.
  window_size = _execute.make_int(window_size, "window_size")
  stride = _execute.make_int(stride, "stride")
  if magnitude_squared is None:
    magnitude_squared = False
  magnitude_squared = _execute.make_bool(magnitude_squared, "magnitude_squared")
  _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "AudioSpectrogram", input=input, window_size=window_size,
                            stride=stride,
                            magnitude_squared=magnitude_squared, name=name)
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("window_size", _op._get_attr_int("window_size"), "stride",
              _op._get_attr_int("stride"), "magnitude_squared",
              _op._get_attr_bool("magnitude_squared"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "AudioSpectrogram", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
def sparse_feature_cross(indices, values, shapes, dense, hashed_output, num_buckets, out_type, internal_type, name=None):
  r"""Generates sparse cross form a list of sparse tensors.

  The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
  representing features of one feature column. It outputs a 2D `SparseTensor` with
  the batchwise crosses of these features.

  For example, if the inputs are

      inputs[0]: SparseTensor with shape = [2, 2]
      [0, 0]: "a"
      [1, 0]: "b"
      [1, 1]: "c"

      inputs[1]: SparseTensor with shape = [2, 1]
      [0, 0]: "d"
      [1, 0]: "e"

      inputs[2]: Tensor [["f"], ["g"]]

  then the output will be

      shape = [2, 2]
      [0, 0]: "a_X_d_X_f"
      [1, 0]: "b_X_e_X_g"
      [1, 1]: "c_X_e_X_g"

  if hashed_output=true then the output will be

      shape = [2, 2]
      [0, 0]: HashCombine(
                  Fingerprint64("f"), HashCombine(
                      Fingerprint64("d"), Fingerprint64("a")))
      [1, 0]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("b")))
      [1, 1]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("c")))

  Args:
    indices: A list of `Tensor` objects with type `int64`.
      2-D.  Indices of each input `SparseTensor`.
    values: A list of `Tensor` objects with types from: `int64`, `string`.
      1-D.   values of each `SparseTensor`.
    shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
      1-D.   Shapes of each `SparseTensor`.
    dense: A list of `Tensor` objects with types from: `int64`, `string`.
      2-D.    Columns represented by dense `Tensor`.
    hashed_output: A `bool`.
    num_buckets: An `int` that is `>= 0`.
    out_type: A `tf.DType` from: `tf.int64, tf.string`.
    internal_type: A `tf.DType` from: `tf.int64, tf.string`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (output_indices, output_values, output_shape).

    output_indices: A `Tensor` of type `int64`. 2-D.  Indices of the concatenated `SparseTensor`.
    output_values: A `Tensor` of type `out_type`. 1-D.  Non-empty values of the concatenated or hashed
      `SparseTensor`.
    output_shape: A `Tensor` of type `int64`. 1-D.  Shape of the concatenated `SparseTensor`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "SparseFeatureCross", name, _ctx._post_execution_callbacks, indices,
        values, shapes, dense, "hashed_output", hashed_output, "num_buckets",
        num_buckets, "out_type", out_type, "internal_type", internal_type)
      _result = _SparseFeatureCrossOutput._make(_result)
      return _result
    except _core._FallbackException:
      try:
        return sparse_feature_cross_eager_fallback(
            indices, values, shapes, dense, hashed_output=hashed_output,
            num_buckets=num_buckets, out_type=out_type,
            internal_type=internal_type, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              sparse_feature_cross, indices=indices, values=values,
                                    shapes=shapes, dense=dense,
                                    hashed_output=hashed_output,
                                    num_buckets=num_buckets,
                                    out_type=out_type,
                                    internal_type=internal_type, name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  if not isinstance(indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'indices' argument to "
        "'sparse_feature_cross' Op, not %r." % indices)
  _attr_N = len(indices)
  if not isinstance(shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'shapes' argument to "
        "'sparse_feature_cross' Op, not %r." % shapes)
  if len(shapes) != _attr_N:
    raise ValueError(
        "List argument 'shapes' to 'sparse_feature_cross' Op with length %d "
        "must match length %d of argument 'indices'." %
        (len(shapes), _attr_N))
  hashed_output = _execute.make_bool(hashed_output, "hashed_output")
  num_buckets = _execute.make_int(num_buckets, "num_buckets")
  out_type = _execute.make_type(out_type, "out_type")
  internal_type = _execute.make_type(internal_type, "internal_type")
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "SparseFeatureCross", indices=indices, values=values, shapes=shapes,
                              dense=dense, hashed_output=hashed_output,
                              num_buckets=num_buckets, out_type=out_type,
                              internal_type=internal_type, name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          sparse_feature_cross, indices=indices, values=values, shapes=shapes,
                                dense=dense, hashed_output=hashed_output,
                                num_buckets=num_buckets, out_type=out_type,
                                internal_type=internal_type, name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("N", _op.get_attr("N"), "hashed_output",
            _op.get_attr("hashed_output"), "num_buckets",
            _op.get_attr("num_buckets"), "sparse_types",
            _op.get_attr("sparse_types"), "dense_types",
            _op.get_attr("dense_types"), "out_type", _op.get_attr("out_type"),
            "internal_type", _op.get_attr("internal_type"))
  _execute.record_gradient(
      "SparseFeatureCross", _inputs_flat, _attrs, _result, name)
  _result = _SparseFeatureCrossOutput._make(_result)
  return _result
def sdca_optimizer(sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data, loss_type, l1, l2, num_loss_partitions, num_inner_iterations, adaptative=False, name=None):
  r"""Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for

  linear models with L1 + L2 regularization. As global optimization objective is
  strongly-convex, the optimizer optimizes the dual objective at each step. The
  optimizer applies each update one example at a time. Examples are sampled
  uniformly, and the optimizer is learning rate free and enjoys linear convergence
  rate.
  
  [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
  Shai Shalev-Shwartz, Tong Zhang. 2012
  
  $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
  
  [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
  Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
  Peter Richtarik, Martin Takac. 2015
  
  [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
  Dominik Csiba, Zheng Qu, Peter Richtarik. 2015

  Args:
    sparse_example_indices: A list of `Tensor` objects with type `int64`.
      a list of vectors which contain example indices.
    sparse_feature_indices: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `int64`.
      a list of vectors which contain feature indices.
    sparse_feature_values: A list of `Tensor` objects with type `float32`.
      a list of vectors which contains feature value
      associated with each feature group.
    dense_features: A list of `Tensor` objects with type `float32`.
      a list of matrices which contains the dense feature values.
    example_weights: A `Tensor` of type `float32`.
      a vector which contains the weight associated with each
      example.
    example_labels: A `Tensor` of type `float32`.
      a vector which contains the label/target associated with each
      example.
    sparse_indices: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `int64`.
      a list of vectors where each value is the indices which has
      corresponding weights in sparse_weights. This field maybe omitted for the
      dense approach.
    sparse_weights: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `float32`.
      a list of vectors where each value is the weight associated with
      a sparse feature group.
    dense_weights: A list with the same length as `dense_features` of `Tensor` objects with type `float32`.
      a list of vectors where the values are the weights associated
      with a dense feature group.
    example_state_data: A `Tensor` of type `float32`.
      a list of vectors containing the example state data.
    loss_type: A `string` from: `"logistic_loss", "squared_loss", "hinge_loss", "smooth_hinge_loss"`.
      Type of the primal loss. Currently SdcaSolver supports logistic,
      squared and hinge losses.
    l1: A `float`. Symmetric l1 regularization strength.
    l2: A `float`. Symmetric l2 regularization strength.
    num_loss_partitions: An `int` that is `>= 1`.
      Number of partitions of the global loss function.
    num_inner_iterations: An `int` that is `>= 1`.
      Number of iterations per mini-batch.
    adaptative: An optional `bool`. Defaults to `False`.
      Whether to use Adapative SDCA for the inner loop.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights).

    out_example_state_data: A `Tensor` of type `float32`.
    out_delta_sparse_weights: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `float32`.
    out_delta_dense_weights: A list with the same length as `dense_features` of `Tensor` objects with type `float32`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if not isinstance(sparse_example_indices, (list, tuple)):
      raise TypeError(
          "Expected list for 'sparse_example_indices' argument to "
          "'sdca_optimizer' Op, not %r." % sparse_example_indices)
    _attr_num_sparse_features = len(sparse_example_indices)
    if not isinstance(sparse_feature_indices, (list, tuple)):
      raise TypeError(
          "Expected list for 'sparse_feature_indices' argument to "
          "'sdca_optimizer' Op, not %r." % sparse_feature_indices)
    if len(sparse_feature_indices) != _attr_num_sparse_features:
      raise ValueError(
          "List argument 'sparse_feature_indices' to 'sdca_optimizer' Op with length %d "
          "must match length %d of argument 'sparse_example_indices'." %
          (len(sparse_feature_indices), _attr_num_sparse_features))
    if not isinstance(sparse_indices, (list, tuple)):
      raise TypeError(
          "Expected list for 'sparse_indices' argument to "
          "'sdca_optimizer' Op, not %r." % sparse_indices)
    if len(sparse_indices) != _attr_num_sparse_features:
      raise ValueError(
          "List argument 'sparse_indices' to 'sdca_optimizer' Op with length %d "
          "must match length %d of argument 'sparse_example_indices'." %
          (len(sparse_indices), _attr_num_sparse_features))
    if not isinstance(sparse_weights, (list, tuple)):
      raise TypeError(
          "Expected list for 'sparse_weights' argument to "
          "'sdca_optimizer' Op, not %r." % sparse_weights)
    if len(sparse_weights) != _attr_num_sparse_features:
      raise ValueError(
          "List argument 'sparse_weights' to 'sdca_optimizer' Op with length %d "
          "must match length %d of argument 'sparse_example_indices'." %
          (len(sparse_weights), _attr_num_sparse_features))
    if not isinstance(sparse_feature_values, (list, tuple)):
      raise TypeError(
          "Expected list for 'sparse_feature_values' argument to "
          "'sdca_optimizer' Op, not %r." % sparse_feature_values)
    _attr_num_sparse_features_with_values = len(sparse_feature_values)
    if not isinstance(dense_features, (list, tuple)):
      raise TypeError(
          "Expected list for 'dense_features' argument to "
          "'sdca_optimizer' Op, not %r." % dense_features)
    _attr_num_dense_features = len(dense_features)
    if not isinstance(dense_weights, (list, tuple)):
      raise TypeError(
          "Expected list for 'dense_weights' argument to "
          "'sdca_optimizer' Op, not %r." % dense_weights)
    if len(dense_weights) != _attr_num_dense_features:
      raise ValueError(
          "List argument 'dense_weights' to 'sdca_optimizer' Op with length %d "
          "must match length %d of argument 'dense_features'." %
          (len(dense_weights), _attr_num_dense_features))
    loss_type = _execute.make_str(loss_type, "loss_type")
    l1 = _execute.make_float(l1, "l1")
    l2 = _execute.make_float(l2, "l2")
    num_loss_partitions = _execute.make_int(num_loss_partitions, "num_loss_partitions")
    num_inner_iterations = _execute.make_int(num_inner_iterations, "num_inner_iterations")
    if adaptative is None:
      adaptative = False
    adaptative = _execute.make_bool(adaptative, "adaptative")
    _, _, _op = _op_def_lib._apply_op_helper(
        "SdcaOptimizer", sparse_example_indices=sparse_example_indices,
        sparse_feature_indices=sparse_feature_indices,
        sparse_feature_values=sparse_feature_values,
        dense_features=dense_features, example_weights=example_weights,
        example_labels=example_labels, sparse_indices=sparse_indices,
        sparse_weights=sparse_weights, dense_weights=dense_weights,
        example_state_data=example_state_data, loss_type=loss_type, l1=l1,
        l2=l2, num_loss_partitions=num_loss_partitions,
        num_inner_iterations=num_inner_iterations, adaptative=adaptative,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("loss_type", _op.get_attr("loss_type"), "adaptative",
              _op.get_attr("adaptative"), "num_sparse_features",
              _op.get_attr("num_sparse_features"),
              "num_sparse_features_with_values",
              _op.get_attr("num_sparse_features_with_values"),
              "num_dense_features", _op.get_attr("num_dense_features"), "l1",
              _op.get_attr("l1"), "l2", _op.get_attr("l2"),
              "num_loss_partitions", _op.get_attr("num_loss_partitions"),
              "num_inner_iterations", _op.get_attr("num_inner_iterations"))
    _execute.record_gradient(
      "SdcaOptimizer", _inputs_flat, _attrs, _result, name)
    _result = _result[:1] + [_result[1:1 + _attr_num_sparse_features]] + _result[1 + _attr_num_sparse_features:]
    _result = _result[:2] + [_result[2:]]
    _result = _SdcaOptimizerOutput._make(_result)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "SdcaOptimizer", name,
        _ctx._post_execution_callbacks, sparse_example_indices,
        sparse_feature_indices, sparse_feature_values, dense_features,
        example_weights, example_labels, sparse_indices, sparse_weights,
        dense_weights, example_state_data, "loss_type", loss_type,
        "adaptative", adaptative, "l1", l1, "l2", l2, "num_loss_partitions",
        num_loss_partitions, "num_inner_iterations", num_inner_iterations)
      _result = _SdcaOptimizerOutput._make(_result)
      return _result
    except _core._FallbackException:
      return sdca_optimizer_eager_fallback(
          sparse_example_indices, sparse_feature_indices,
          sparse_feature_values, dense_features, example_weights,
          example_labels, sparse_indices, sparse_weights, dense_weights,
          example_state_data, loss_type=loss_type, adaptative=adaptative,
          l1=l1, l2=l2, num_loss_partitions=num_loss_partitions,
          num_inner_iterations=num_inner_iterations, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def single_image_random_dot_stereograms_eager_fallback(
        depth_values,
        hidden_surface_removal=True,
        convergence_dots_size=8,
        dots_per_inch=72,
        eye_separation=2.5,
        mu=0.3333,
        normalize=True,
        normalize_max=-100,
        normalize_min=100,
        border_level=0,
        number_colors=256,
        output_image_shape=[1024, 768, 1],
        output_data_window=[1022, 757],
        name=None,
        ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function single_image_random_dot_stereograms
  """
    _ctx = ctx if ctx else _context.context()
    if hidden_surface_removal is None:
        hidden_surface_removal = True
    hidden_surface_removal = _execute.make_bool(hidden_surface_removal,
                                                "hidden_surface_removal")
    if convergence_dots_size is None:
        convergence_dots_size = 8
    convergence_dots_size = _execute.make_int(convergence_dots_size,
                                              "convergence_dots_size")
    if dots_per_inch is None:
        dots_per_inch = 72
    dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
    if eye_separation is None:
        eye_separation = 2.5
    eye_separation = _execute.make_float(eye_separation, "eye_separation")
    if mu is None:
        mu = 0.3333
    mu = _execute.make_float(mu, "mu")
    if normalize is None:
        normalize = True
    normalize = _execute.make_bool(normalize, "normalize")
    if normalize_max is None:
        normalize_max = -100
    normalize_max = _execute.make_float(normalize_max, "normalize_max")
    if normalize_min is None:
        normalize_min = 100
    normalize_min = _execute.make_float(normalize_min, "normalize_min")
    if border_level is None:
        border_level = 0
    border_level = _execute.make_float(border_level, "border_level")
    if number_colors is None:
        number_colors = 256
    number_colors = _execute.make_int(number_colors, "number_colors")
    if output_image_shape is None:
        output_image_shape = [1024, 768, 1]
    output_image_shape = _execute.make_shape(output_image_shape,
                                             "output_image_shape")
    if output_data_window is None:
        output_data_window = [1022, 757]
    output_data_window = _execute.make_shape(output_data_window,
                                             "output_data_window")
    _attr_T, (depth_values, ) = _execute.args_to_matching_eager([depth_values],
                                                                _ctx)
    _inputs_flat = [depth_values]
    _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal,
              "convergence_dots_size", convergence_dots_size, "dots_per_inch",
              dots_per_inch, "eye_separation", eye_separation, "mu", mu,
              "normalize", normalize, "normalize_max", normalize_max,
              "normalize_min", normalize_min, "border_level", border_level,
              "number_colors", number_colors, "output_image_shape",
              output_image_shape, "output_data_window", output_data_window)
    _result = _execute.execute(b"SingleImageRandomDotStereograms",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("SingleImageRandomDotStereograms", _inputs_flat,
                             _attrs, _result, name)
    _result, = _result
    return _result
def ragged_tensor_to_variant(rt_nested_splits,
                             rt_dense_values,
                             batched_input,
                             name=None):
    r"""Encodes a `RaggedTensor` into a `variant` Tensor.


  

  Encodes the given `RaggedTensor` and returns a `variant` Tensor. If

  `batched_input` is True, then input `RaggedTensor` is unbatched along the

  zero-th dimension, each component `RaggedTensor` is encoded into a scalar

  `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.

  If `batched_input` is False, then the input `RaggedTensor` is encoded as is and

  a scalar `variant` Tensor is returned. A `RaggedTensor` is encoded by first

  creating a 1-D `variant` Tensor with `ragged_rank + 1` elements, containing the

  splits and values Tensors of the `RaggedTensor`. Then the 1-D `variant` Tensor

  is wrapped in a scalar `variant` Tensor. See `RaggedTensorFromVariant` for the

  corresponding decoding logic.

  Args:
    rt_nested_splits: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`.
      A list of one or more Tensors representing the splits of the input

      `RaggedTensor`.
    rt_dense_values: A `Tensor`.
      A Tensor representing the values of the input `RaggedTensor`.
    batched_input: A `bool`.
      A `bool` denoting whether the input is a batched `RaggedTensor`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `variant`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "RaggedTensorToVariant", name, _ctx._post_execution_callbacks,
                rt_nested_splits, rt_dense_values, "batched_input",
                batched_input)
            return _result
        except _core._FallbackException:
            try:
                return ragged_tensor_to_variant_eager_fallback(
                    rt_nested_splits,
                    rt_dense_values,
                    batched_input=batched_input,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if not isinstance(rt_nested_splits, (list, tuple)):
        raise TypeError("Expected list for 'rt_nested_splits' argument to "
                        "'ragged_tensor_to_variant' Op, not %r." %
                        rt_nested_splits)
    _attr_RAGGED_RANK = len(rt_nested_splits)
    batched_input = _execute.make_bool(batched_input, "batched_input")
    _, _, _op = _op_def_lib._apply_op_helper("RaggedTensorToVariant",
                                             rt_nested_splits=rt_nested_splits,
                                             rt_dense_values=rt_dense_values,
                                             batched_input=batched_input,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("RAGGED_RANK", _op.get_attr("RAGGED_RANK"), "Tvalues",
              _op.get_attr("Tvalues"), "Tsplits", _op.get_attr("Tsplits"),
              "batched_input", _op.get_attr("batched_input"))
    _execute.record_gradient("RaggedTensorToVariant", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
def single_image_random_dot_stereograms(depth_values,
                                        hidden_surface_removal=True,
                                        convergence_dots_size=8,
                                        dots_per_inch=72,
                                        eye_separation=2.5,
                                        mu=0.3333,
                                        normalize=True,
                                        normalize_max=-100,
                                        normalize_min=100,
                                        border_level=0,
                                        number_colors=256,
                                        output_image_shape=[1024, 768, 1],
                                        output_data_window=[1022, 757],
                                        name=None):
    r"""Outputs a single image random dot stereogram for export via encode_PNG/JPG OP.

  Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
  encode 3-D data into a 2-D image.  The output of this Op is suitable for the
  encode_PNG/JPG ops.  Be careful with image compression as this may corrupt the
  encode 3-D data within the image.

  This Op is based upon:
  'http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper'

  Example use which outputs a SIRDS image as picture_out.png:
  ```python
  img=[[1,2,3,3,2,1],
       [1,2,3,4,5,2],
       [1,2,3,4,5,3],
       [1,2,3,4,5,4],
       [6,5,4,4,5,5]]

  session = tf.InteractiveSession()

  sirds = single_image_random_dot_stereograms(img,convergence_dots_size=8,number_colors=256,normalize=True)

  out = sirds.eval()

  png = tf.image.encode_png(out).eval()

  with open('picture_out.png', 'wb') as f:
      f.write(png)
  ```

  Args:
    depth_values: A `Tensor`. Must be one of the following types: `float64`, `float32`, `int64`, `int32`.
      Z values of data to encode into 'output_data_window' window,
      lower values are further away {0.0 floor(far), 1.0 ceiling(near) after normalization}, must be 2-D tensor
    hidden_surface_removal: An optional `bool`. Defaults to `True`.
      Activate hidden surface removal
    convergence_dots_size: An optional `int`. Defaults to `8`.
      Black dot size in pixels to help view converge image, drawn on bottom of image
    dots_per_inch: An optional `int`. Defaults to `72`.
      Output device in dots/inch
    eye_separation: An optional `float`. Defaults to `2.5`.
      Separation between eyes in inches
    mu: An optional `float`. Defaults to `0.3333`.
      Depth of field, Fraction of viewing distance (eg. 1/3 = .3333)
    normalize: An optional `bool`. Defaults to `True`.
      Normalize input data to [0.0, 1.0]
    normalize_max: An optional `float`. Defaults to `-100`.
      Fix MAX value for Normalization - if < MIN, autoscale
    normalize_min: An optional `float`. Defaults to `100`.
      Fix MIN value for Normalization - if > MAX, autoscale
    border_level: An optional `float`. Defaults to `0`.
      Value of border depth 0.0 {far} to 1.0 {near}
    number_colors: An optional `int`. Defaults to `256`.
      2 (Black & White),256 (grayscale), and Numbers > 256 (Full Color) are all that are supported currently
    output_image_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1024, 768, 1]`.
      Output size of returned image in X,Y, Channels 1-grayscale, 3 color (1024, 768, 1),
      channels will be updated to 3 if 'number_colors' > 256
    output_data_window: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1022, 757]`.
      Size of "DATA" window, must be equal to or smaller than 'output_image_shape', will be centered
      and use 'convergence_dots_size' for best fit to avoid overlap if possible
    name: A name for the operation (optional).

  Returns:
    A tensor of size 'output_image_shape' with the encoded 'depth_values'
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "SingleImageRandomDotStereograms", name,
                _ctx._post_execution_callbacks, depth_values,
                "hidden_surface_removal", hidden_surface_removal,
                "convergence_dots_size", convergence_dots_size,
                "dots_per_inch", dots_per_inch, "eye_separation",
                eye_separation, "mu", mu, "normalize", normalize,
                "normalize_max", normalize_max, "normalize_min", normalize_min,
                "border_level", border_level, "number_colors", number_colors,
                "output_image_shape", output_image_shape, "output_data_window",
                output_data_window)
            return _result
        except _core._FallbackException:
            try:
                return single_image_random_dot_stereograms_eager_fallback(
                    depth_values,
                    hidden_surface_removal=hidden_surface_removal,
                    convergence_dots_size=convergence_dots_size,
                    dots_per_inch=dots_per_inch,
                    eye_separation=eye_separation,
                    mu=mu,
                    normalize=normalize,
                    normalize_max=normalize_max,
                    normalize_min=normalize_min,
                    border_level=border_level,
                    number_colors=number_colors,
                    output_image_shape=output_image_shape,
                    output_data_window=output_data_window,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
            except (TypeError, ValueError):
                result = _dispatch.dispatch(
                    single_image_random_dot_stereograms,
                    depth_values=depth_values,
                    hidden_surface_removal=hidden_surface_removal,
                    convergence_dots_size=convergence_dots_size,
                    dots_per_inch=dots_per_inch,
                    eye_separation=eye_separation,
                    mu=mu,
                    normalize=normalize,
                    normalize_max=normalize_max,
                    normalize_min=normalize_min,
                    border_level=border_level,
                    number_colors=number_colors,
                    output_image_shape=output_image_shape,
                    output_data_window=output_data_window,
                    name=name)
                if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                    return result
                raise
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if hidden_surface_removal is None:
        hidden_surface_removal = True
    hidden_surface_removal = _execute.make_bool(hidden_surface_removal,
                                                "hidden_surface_removal")
    if convergence_dots_size is None:
        convergence_dots_size = 8
    convergence_dots_size = _execute.make_int(convergence_dots_size,
                                              "convergence_dots_size")
    if dots_per_inch is None:
        dots_per_inch = 72
    dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
    if eye_separation is None:
        eye_separation = 2.5
    eye_separation = _execute.make_float(eye_separation, "eye_separation")
    if mu is None:
        mu = 0.3333
    mu = _execute.make_float(mu, "mu")
    if normalize is None:
        normalize = True
    normalize = _execute.make_bool(normalize, "normalize")
    if normalize_max is None:
        normalize_max = -100
    normalize_max = _execute.make_float(normalize_max, "normalize_max")
    if normalize_min is None:
        normalize_min = 100
    normalize_min = _execute.make_float(normalize_min, "normalize_min")
    if border_level is None:
        border_level = 0
    border_level = _execute.make_float(border_level, "border_level")
    if number_colors is None:
        number_colors = 256
    number_colors = _execute.make_int(number_colors, "number_colors")
    if output_image_shape is None:
        output_image_shape = [1024, 768, 1]
    output_image_shape = _execute.make_shape(output_image_shape,
                                             "output_image_shape")
    if output_data_window is None:
        output_data_window = [1022, 757]
    output_data_window = _execute.make_shape(output_data_window,
                                             "output_data_window")
    try:
        _, _, _op = _op_def_lib._apply_op_helper(
            "SingleImageRandomDotStereograms",
            depth_values=depth_values,
            hidden_surface_removal=hidden_surface_removal,
            convergence_dots_size=convergence_dots_size,
            dots_per_inch=dots_per_inch,
            eye_separation=eye_separation,
            mu=mu,
            normalize=normalize,
            normalize_max=normalize_max,
            normalize_min=normalize_min,
            border_level=border_level,
            number_colors=number_colors,
            output_image_shape=output_image_shape,
            output_data_window=output_data_window,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(
            single_image_random_dot_stereograms,
            depth_values=depth_values,
            hidden_surface_removal=hidden_surface_removal,
            convergence_dots_size=convergence_dots_size,
            dots_per_inch=dots_per_inch,
            eye_separation=eye_separation,
            mu=mu,
            normalize=normalize,
            normalize_max=normalize_max,
            normalize_min=normalize_min,
            border_level=border_level,
            number_colors=number_colors,
            output_image_shape=output_image_shape,
            output_data_window=output_data_window,
            name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "hidden_surface_removal",
              _op.get_attr("hidden_surface_removal"), "convergence_dots_size",
              _op.get_attr("convergence_dots_size"), "dots_per_inch",
              _op.get_attr("dots_per_inch"), "eye_separation",
              _op.get_attr("eye_separation"), "mu", _op.get_attr("mu"),
              "normalize", _op.get_attr("normalize"), "normalize_max",
              _op.get_attr("normalize_max"), "normalize_min",
              _op.get_attr("normalize_min"), "border_level",
              _op.get_attr("border_level"), "number_colors",
              _op.get_attr("number_colors"), "output_image_shape",
              _op.get_attr("output_image_shape"), "output_data_window",
              _op.get_attr("output_data_window"))
    _execute.record_gradient("SingleImageRandomDotStereograms", _inputs_flat,
                             _attrs, _result, name)
    _result, = _result
    return _result
def dense_to_dense_set_operation(set1, set2, set_operation, validate_indices=True, name=None):
  r"""Applies set operation along last dimension of 2 `Tensor` inputs.

  See SetOperationOp::SetOperationFromContext for values of `set_operation`.
  
  Output `result` is a `SparseTensor` represented by `result_indices`,
  `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
  has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
  dimension contains the result of `set_operation` applied to the corresponding
  `[0...n-1]` dimension of `set`.

  Args:
    set1: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
      `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
      Dimension `n` contains values in a set, duplicates are allowed but ignored.
    set2: A `Tensor`. Must have the same type as `set1`.
      `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
      Dimension `n` contains values in a set, duplicates are allowed but ignored.
    set_operation: A `string`.
    validate_indices: An optional `bool`. Defaults to `True`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (result_indices, result_values, result_shape).

    result_indices: A `Tensor` of type `int64`.
    result_values: A `Tensor`. Has the same type as `set1`.
    result_shape: A `Tensor` of type `int64`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    set_operation = _execute.make_str(set_operation, "set_operation")
    if validate_indices is None:
      validate_indices = True
    validate_indices = _execute.make_bool(validate_indices, "validate_indices")
    _, _, _op = _op_def_lib._apply_op_helper(
        "DenseToDenseSetOperation", set1=set1, set2=set2,
        set_operation=set_operation, validate_indices=validate_indices,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("set_operation", _op.get_attr("set_operation"),
              "validate_indices", _op.get_attr("validate_indices"), "T",
              _op.get_attr("T"))
    _execute.record_gradient(
      "DenseToDenseSetOperation", _inputs_flat, _attrs, _result, name)
    _result = _DenseToDenseSetOperationOutput._make(_result)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "DenseToDenseSetOperation", name,
        _ctx._post_execution_callbacks, set1, set2, "set_operation",
        set_operation, "validate_indices", validate_indices)
      _result = _DenseToDenseSetOperationOutput._make(_result)
      return _result
    except _core._FallbackException:
      return dense_to_dense_set_operation_eager_fallback(
          set1, set2, set_operation=set_operation,
          validate_indices=validate_indices, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def single_image_random_dot_stereograms(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None):
  r"""Outputs a single image random dot stereogram for export via encode_PNG/JPG OP.

  Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
  encode 3-D data into a 2-D image.  The output of this Op is suitable for the
  encode_PNG/JPG ops.  Be careful with image compression as this may corrupt the
  encode 3-D data within the image.

  This Op is based upon:
  'http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper'

  Example use which outputs a SIRDS image as picture_out.png:
  ```python
  img=[[1,2,3,3,2,1],
       [1,2,3,4,5,2],
       [1,2,3,4,5,3],
       [1,2,3,4,5,4],
       [6,5,4,4,5,5]]

  session = tf.InteractiveSession()

  sirds = single_image_random_dot_stereograms(img,convergence_dots_size=8,number_colors=256,normalize=True)

  out = sirds.eval()

  png = tf.image.encode_png(out).eval()

  with open('picture_out.png', 'wb') as f:
      f.write(png)
  ```

  Args:
    depth_values: A `Tensor`. Must be one of the following types: `float64`, `float32`, `int64`, `int32`.
      Z values of data to encode into 'output_data_window' window,
      lower values are further away {0.0 floor(far), 1.0 ceiling(near) after normalization}, must be 2-D tensor
    hidden_surface_removal: An optional `bool`. Defaults to `True`.
      Activate hidden surface removal
    convergence_dots_size: An optional `int`. Defaults to `8`.
      Black dot size in pixels to help view converge image, drawn on bottom of image
    dots_per_inch: An optional `int`. Defaults to `72`.
      Output device in dots/inch
    eye_separation: An optional `float`. Defaults to `2.5`.
      Separation between eyes in inches
    mu: An optional `float`. Defaults to `0.3333`.
      Depth of field, Fraction of viewing distance (eg. 1/3 = .3333)
    normalize: An optional `bool`. Defaults to `True`.
      Normalize input data to [0.0, 1.0]
    normalize_max: An optional `float`. Defaults to `-100`.
      Fix MAX value for Normalization - if < MIN, autoscale
    normalize_min: An optional `float`. Defaults to `100`.
      Fix MIN value for Normalization - if > MAX, autoscale
    border_level: An optional `float`. Defaults to `0`.
      Value of border depth 0.0 {far} to 1.0 {near}
    number_colors: An optional `int`. Defaults to `256`.
      2 (Black & White),256 (grayscale), and Numbers > 256 (Full Color) are all that are supported currently
    output_image_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1024, 768, 1]`.
      Output size of returned image in X,Y, Channels 1-grayscale, 3 color (1024, 768, 1),
      channels will be updated to 3 if 'number_colors' > 256
    output_data_window: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1022, 757]`.
      Size of "DATA" window, must be equal to or smaller than 'output_image_shape', will be centered
      and use 'convergence_dots_size' for best fit to avoid overlap if possible
    name: A name for the operation (optional).

  Returns:
    A tensor of size 'output_image_shape' with the encoded 'depth_values'
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if hidden_surface_removal is None:
      hidden_surface_removal = True
    hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
    if convergence_dots_size is None:
      convergence_dots_size = 8
    convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
    if dots_per_inch is None:
      dots_per_inch = 72
    dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
    if eye_separation is None:
      eye_separation = 2.5
    eye_separation = _execute.make_float(eye_separation, "eye_separation")
    if mu is None:
      mu = 0.3333
    mu = _execute.make_float(mu, "mu")
    if normalize is None:
      normalize = True
    normalize = _execute.make_bool(normalize, "normalize")
    if normalize_max is None:
      normalize_max = -100
    normalize_max = _execute.make_float(normalize_max, "normalize_max")
    if normalize_min is None:
      normalize_min = 100
    normalize_min = _execute.make_float(normalize_min, "normalize_min")
    if border_level is None:
      border_level = 0
    border_level = _execute.make_float(border_level, "border_level")
    if number_colors is None:
      number_colors = 256
    number_colors = _execute.make_int(number_colors, "number_colors")
    if output_image_shape is None:
      output_image_shape = [1024, 768, 1]
    output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
    if output_data_window is None:
      output_data_window = [1022, 757]
    output_data_window = _execute.make_shape(output_data_window, "output_data_window")
    _, _, _op = _op_def_lib._apply_op_helper(
        "SingleImageRandomDotStereograms", depth_values=depth_values,
        hidden_surface_removal=hidden_surface_removal,
        convergence_dots_size=convergence_dots_size,
        dots_per_inch=dots_per_inch, eye_separation=eye_separation, mu=mu,
        normalize=normalize, normalize_max=normalize_max,
        normalize_min=normalize_min, border_level=border_level,
        number_colors=number_colors, output_image_shape=output_image_shape,
        output_data_window=output_data_window, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "hidden_surface_removal",
              _op.get_attr("hidden_surface_removal"), "convergence_dots_size",
              _op.get_attr("convergence_dots_size"), "dots_per_inch",
              _op.get_attr("dots_per_inch"), "eye_separation",
              _op.get_attr("eye_separation"), "mu", _op.get_attr("mu"),
              "normalize", _op.get_attr("normalize"), "normalize_max",
              _op.get_attr("normalize_max"), "normalize_min",
              _op.get_attr("normalize_min"), "border_level",
              _op.get_attr("border_level"), "number_colors",
              _op.get_attr("number_colors"), "output_image_shape",
              _op.get_attr("output_image_shape"), "output_data_window",
              _op.get_attr("output_data_window"))
    _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "SingleImageRandomDotStereograms", name,
        _ctx._post_execution_callbacks, depth_values,
        "hidden_surface_removal", hidden_surface_removal,
        "convergence_dots_size", convergence_dots_size, "dots_per_inch",
        dots_per_inch, "eye_separation", eye_separation, "mu", mu,
        "normalize", normalize, "normalize_max", normalize_max,
        "normalize_min", normalize_min, "border_level", border_level,
        "number_colors", number_colors, "output_image_shape",
        output_image_shape, "output_data_window", output_data_window)
      return _result
    except _core._FallbackException:
      return single_image_random_dot_stereograms_eager_fallback(
          depth_values, hidden_surface_removal=hidden_surface_removal,
          convergence_dots_size=convergence_dots_size,
          dots_per_inch=dots_per_inch, eye_separation=eye_separation, mu=mu,
          normalize=normalize, normalize_max=normalize_max,
          normalize_min=normalize_min, border_level=border_level,
          number_colors=number_colors, output_image_shape=output_image_shape,
          output_data_window=output_data_window, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Esempio n. 35
0
def enter(data,
          frame_name,
          is_constant=False,
          parallel_iterations=10,
          name=None):
    r"""Creates or finds a child frame, and makes `data` available to the child frame.

  This op is used together with `Exit` to create loops in the graph.
  The unique `frame_name` is used by the `Executor` to identify frames. If
  `is_constant` is true, `output` is a constant in the child frame; otherwise
  it may be changed in the child frame. At most `parallel_iterations` iterations
  are run in parallel in the child frame.

  Args:
    data: A `Tensor`. The tensor to be made available to the child frame.
    frame_name: A `string`. The name of the child frame.
    is_constant: An optional `bool`. Defaults to `False`.
      If true, the output is constant within the child frame.
    parallel_iterations: An optional `int`. Defaults to `10`.
      The number of iterations allowed to run in parallel.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `data`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "Enter", name, _ctx.post_execution_callbacks, data,
                "frame_name", frame_name, "is_constant", is_constant,
                "parallel_iterations", parallel_iterations)
            return _result
        except _core._FallbackException:
            try:
                return enter_eager_fallback(
                    data,
                    frame_name=frame_name,
                    is_constant=is_constant,
                    parallel_iterations=parallel_iterations,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    frame_name = _execute.make_str(frame_name, "frame_name")
    if is_constant is None:
        is_constant = False
    is_constant = _execute.make_bool(is_constant, "is_constant")
    if parallel_iterations is None:
        parallel_iterations = 10
    parallel_iterations = _execute.make_int(parallel_iterations,
                                            "parallel_iterations")
    _, _, _op = _op_def_lib._apply_op_helper(
        "Enter",
        data=data,
        frame_name=frame_name,
        is_constant=is_constant,
        parallel_iterations=parallel_iterations,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op._get_attr_type("T"), "frame_name",
              _op.get_attr("frame_name"), "is_constant",
              _op.get_attr("is_constant"), "parallel_iterations",
              _op.get_attr("parallel_iterations"))
    _execute.record_gradient("Enter", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
Esempio n. 36
0
def grow_tree_ensemble(tree_ensemble_handle,
                       stamp_token,
                       next_stamp_token,
                       learning_rate,
                       dropout_seed,
                       partition_ids,
                       gains,
                       splits,
                       learner_config,
                       center_bias,
                       name=None):
    r"""Grows the tree ensemble by either adding a layer to the last tree being grown

  or by starting a new tree.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the ensemble variable.
    stamp_token: A `Tensor` of type `int64`.
      Stamp token for validating operation consistency.
    next_stamp_token: A `Tensor` of type `int64`.
      Stamp token to be used for the next iteration.
    learning_rate: A `Tensor` of type `float32`. Scalar learning rate.
    dropout_seed: A `Tensor` of type `int64`.
    partition_ids: A list of `Tensor` objects with type `int32`.
      List of Rank 1 Tensors containing partition Id per candidate.
    gains: A list with the same length as `partition_ids` of `Tensor` objects with type `float32`.
      List of Rank 1 Tensors containing gains per candidate.
    splits: A list with the same length as `partition_ids` of `Tensor` objects with type `string`.
      List of Rank 1 Tensors containing serialized SplitInfo protos per candidate.
    learner_config: A `string`.
      Config for the learner of type LearnerConfig proto.
    center_bias: A `bool`.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if not isinstance(partition_ids, (list, tuple)):
            raise TypeError("Expected list for 'partition_ids' argument to "
                            "'grow_tree_ensemble' Op, not %r." % partition_ids)
        _attr_num_handlers = len(partition_ids)
        if not isinstance(gains, (list, tuple)):
            raise TypeError("Expected list for 'gains' argument to "
                            "'grow_tree_ensemble' Op, not %r." % gains)
        if len(gains) != _attr_num_handlers:
            raise ValueError(
                "List argument 'gains' to 'grow_tree_ensemble' Op with length %d "
                "must match length %d of argument 'partition_ids'." %
                (len(gains), _attr_num_handlers))
        if not isinstance(splits, (list, tuple)):
            raise TypeError("Expected list for 'splits' argument to "
                            "'grow_tree_ensemble' Op, not %r." % splits)
        if len(splits) != _attr_num_handlers:
            raise ValueError(
                "List argument 'splits' to 'grow_tree_ensemble' Op with length %d "
                "must match length %d of argument 'partition_ids'." %
                (len(splits), _attr_num_handlers))
        learner_config = _execute.make_str(learner_config, "learner_config")
        center_bias = _execute.make_bool(center_bias, "center_bias")
        _, _, _op = _op_def_lib._apply_op_helper(
            "GrowTreeEnsemble",
            tree_ensemble_handle=tree_ensemble_handle,
            stamp_token=stamp_token,
            next_stamp_token=next_stamp_token,
            learning_rate=learning_rate,
            dropout_seed=dropout_seed,
            partition_ids=partition_ids,
            gains=gains,
            splits=splits,
            learner_config=learner_config,
            center_bias=center_bias,
            name=name)
        return _op
        _result = None
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "GrowTreeEnsemble", name, _ctx._post_execution_callbacks,
                tree_ensemble_handle, stamp_token, next_stamp_token,
                learning_rate, dropout_seed, partition_ids, gains, splits,
                "learner_config", learner_config, "center_bias", center_bias)
            return _result
        except _core._FallbackException:
            return grow_tree_ensemble_eager_fallback(
                tree_ensemble_handle,
                stamp_token,
                next_stamp_token,
                learning_rate,
                dropout_seed,
                partition_ids,
                gains,
                splits,
                learner_config=learner_config,
                center_bias=center_bias,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)