def single_image_random_dot_stereograms_eager_fallback(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function single_image_random_dot_stereograms
  """
  _ctx = ctx if ctx else _context.context()
  if hidden_surface_removal is None:
    hidden_surface_removal = True
  hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
  if convergence_dots_size is None:
    convergence_dots_size = 8
  convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
  if dots_per_inch is None:
    dots_per_inch = 72
  dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
  if eye_separation is None:
    eye_separation = 2.5
  eye_separation = _execute.make_float(eye_separation, "eye_separation")
  if mu is None:
    mu = 0.3333
  mu = _execute.make_float(mu, "mu")
  if normalize is None:
    normalize = True
  normalize = _execute.make_bool(normalize, "normalize")
  if normalize_max is None:
    normalize_max = -100
  normalize_max = _execute.make_float(normalize_max, "normalize_max")
  if normalize_min is None:
    normalize_min = 100
  normalize_min = _execute.make_float(normalize_min, "normalize_min")
  if border_level is None:
    border_level = 0
  border_level = _execute.make_float(border_level, "border_level")
  if number_colors is None:
    number_colors = 256
  number_colors = _execute.make_int(number_colors, "number_colors")
  if output_image_shape is None:
    output_image_shape = [1024, 768, 1]
  output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
  if output_data_window is None:
    output_data_window = [1022, 757]
  output_data_window = _execute.make_shape(output_data_window, "output_data_window")
  _attr_T, (depth_values,) = _execute.args_to_matching_eager([depth_values], _ctx)
  _inputs_flat = [depth_values]
  _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal,
  "convergence_dots_size", convergence_dots_size, "dots_per_inch",
  dots_per_inch, "eye_separation", eye_separation, "mu", mu, "normalize",
  normalize, "normalize_max", normalize_max, "normalize_min", normalize_min,
  "border_level", border_level, "number_colors", number_colors,
  "output_image_shape", output_image_shape, "output_data_window",
  output_data_window)
  _result = _execute.execute(b"SingleImageRandomDotStereograms", 1,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Exemple #2
0
def range_decode(encoded, shape, cdf, precision, name=None):
  r"""Decodes a range-coded `code` into an int32 tensor of shape `shape`.

  This is the reverse op of RangeEncode. The shape of the tensor that was encoded
  should be known by the caller.

  Implementation notes:

  - If wrong input was given (e.g., corrupt `encoded` string, or `cdf` or
  `precision` do not match encoder), the decode is unsuccessful. Because of
  potential performance issues, the decoder does not return error status.

  Args:
    encoded: A `Tensor` of type `string`.
      A scalar string tensor from RangeEncode.
    shape: A `Tensor` of type `int32`.
      An int32 1-D tensor representing the shape of the data encoded by
      RangeEncode.
    cdf: A `Tensor` of type `int32`.
    precision: An `int` that is `>= 1`.
      The number of bits for probability quantization. Must be <= 16, and
      must match the precision used by RangeEncode that produced `encoded`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `int16`. An int16 tensor with shape equal to `shape`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    precision = _execute.make_int(precision, "precision")
    _, _, _op = _op_def_lib._apply_op_helper(
        "RangeDecode", encoded=encoded, shape=shape, cdf=cdf,
        precision=precision, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("precision", _op.get_attr("precision"))
    _execute.record_gradient(
      "RangeDecode", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name, "RangeDecode",
        name, _ctx._post_execution_callbacks, encoded, shape, cdf,
        "precision", precision)
      return _result
    except _core._FallbackException:
      return range_decode_eager_fallback(
          encoded, shape, cdf, precision=precision, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #3
0
def tree_ensemble_used_handlers(tree_ensemble_handle, stamp_token, num_all_handlers, name=None):
  r"""Returns the mask of used handlers along with the number of non-zero elements in

  this mask. Used in feature selection.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the tree ensemble.
    stamp_token: A `Tensor` of type `int64`.
      Token to use as the new value of the resource stamp.
    num_all_handlers: An `int` that is `>= 0`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (num_used_handlers, used_handlers_mask).

    num_used_handlers: A `Tensor` of type `int64`. number of feature column handlers used in the model.
    used_handlers_mask: A `Tensor` of type `bool`. A boolean vector of showing which handlers are used in the
      model.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    num_all_handlers = _execute.make_int(num_all_handlers, "num_all_handlers")
    _, _, _op = _op_def_lib._apply_op_helper(
        "TreeEnsembleUsedHandlers", tree_ensemble_handle=tree_ensemble_handle,
        stamp_token=stamp_token, num_all_handlers=num_all_handlers, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("num_all_handlers", _op.get_attr("num_all_handlers"))
    _execute.record_gradient(
      "TreeEnsembleUsedHandlers", _inputs_flat, _attrs, _result, name)
    _result = _TreeEnsembleUsedHandlersOutput._make(_result)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "TreeEnsembleUsedHandlers", name, _ctx._post_execution_callbacks,
        tree_ensemble_handle, stamp_token, "num_all_handlers",
        num_all_handlers)
      _result = _TreeEnsembleUsedHandlersOutput._make(_result)
      return _result
    except _core._FallbackException:
      return tree_ensemble_used_handlers_eager_fallback(
          tree_ensemble_handle, stamp_token,
          num_all_handlers=num_all_handlers, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #4
0
def pmf_to_quantized_cdf(pmf, precision, name=None):
  r"""Converts PMF to quantized CDF. This op uses floating-point operations

  internally. Therefore the quantized output may not be consistent across multiple
  platforms. For entropy encoders and decoders to have the same quantized CDF on
  different platforms, the quantized CDF should be produced once and saved, then
  the saved quantized CDF should be used everywhere.

  After quantization, if PMF does not sum to 2^precision, then some values of PMF
  are increased or decreased to adjust the sum to equal to 2^precision.

  Note that the input PMF is pre-quantization. The input PMF is not normalized
  by this op prior to quantization. Therefore the user is responsible for
  normalizing PMF if necessary.

  Args:
    pmf: A `Tensor` of type `float32`.
    precision: An `int` that is `>= 1`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `int32`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    precision = _execute.make_int(precision, "precision")
    _, _, _op = _op_def_lib._apply_op_helper(
        "PmfToQuantizedCdf", pmf=pmf, precision=precision, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("precision", _op.get_attr("precision"))
    _execute.record_gradient(
      "PmfToQuantizedCdf", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "PmfToQuantizedCdf", name, _ctx._post_execution_callbacks, pmf,
        "precision", precision)
      return _result
    except _core._FallbackException:
      return pmf_to_quantized_cdf_eager_fallback(
          pmf, precision=precision, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_feature_cross_v2_eager_fallback(indices, values, shapes, dense, hashed_output, num_buckets, hash_key, out_type, internal_type, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function sparse_feature_cross_v2
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'indices' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % indices)
  _attr_N = len(indices)
  if not isinstance(shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'shapes' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % shapes)
  if len(shapes) != _attr_N:
    raise ValueError(
        "List argument 'shapes' to 'sparse_feature_cross_v2' Op with length %d "
        "must match length %d of argument 'indices'." %
        (len(shapes), _attr_N))
  hashed_output = _execute.make_bool(hashed_output, "hashed_output")
  num_buckets = _execute.make_int(num_buckets, "num_buckets")
  hash_key = _execute.make_int(hash_key, "hash_key")
  out_type = _execute.make_type(out_type, "out_type")
  internal_type = _execute.make_type(internal_type, "internal_type")
  _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
  _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(dense, _ctx)
  indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
  shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
  _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense)
  _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
  num_buckets, "hash_key", hash_key, "sparse_types", _attr_sparse_types,
  "dense_types", _attr_dense_types, "out_type", out_type, "internal_type",
  internal_type)
  _result = _execute.execute(b"SparseFeatureCrossV2", 3, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "SparseFeatureCrossV2", _inputs_flat, _attrs, _result, name)
  _result = _SparseFeatureCrossV2Output._make(_result)
  return _result
Exemple #6
0
def pmf_to_quantized_cdf_eager_fallback(pmf, precision, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function pmf_to_quantized_cdf
  """
  _ctx = ctx if ctx else _context.context()
  precision = _execute.make_int(precision, "precision")
  pmf = _ops.convert_to_tensor(pmf, _dtypes.float32)
  _inputs_flat = [pmf]
  _attrs = ("precision", precision)
  _result = _execute.execute(b"PmfToQuantizedCdf", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "PmfToQuantizedCdf", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Exemple #7
0
def range_encode_eager_fallback(data, cdf, precision, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function range_encode
  """
  _ctx = ctx if ctx else _context.context()
  precision = _execute.make_int(precision, "precision")
  data = _ops.convert_to_tensor(data, _dtypes.int16)
  cdf = _ops.convert_to_tensor(cdf, _dtypes.int32)
  _inputs_flat = [data, cdf]
  _attrs = ("precision", precision)
  _result = _execute.execute(b"RangeEncode", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "RangeEncode", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Exemple #8
0
def tree_ensemble_used_handlers_eager_fallback(tree_ensemble_handle, stamp_token, num_all_handlers, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function tree_ensemble_used_handlers
  """
  _ctx = ctx if ctx else _context.context()
  num_all_handlers = _execute.make_int(num_all_handlers, "num_all_handlers")
  tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
  stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
  _inputs_flat = [tree_ensemble_handle, stamp_token]
  _attrs = ("num_all_handlers", num_all_handlers)
  _result = _execute.execute(b"TreeEnsembleUsedHandlers", 2,
                             inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
                             name=name)
  _execute.record_gradient(
      "TreeEnsembleUsedHandlers", _inputs_flat, _attrs, _result, name)
  _result = _TreeEnsembleUsedHandlersOutput._make(_result)
  return _result
Exemple #9
0
def bipartite_match_eager_fallback(distance_mat, num_valid_rows, top_k=-1, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function bipartite_match
  """
  _ctx = ctx if ctx else _context.context()
  if top_k is None:
    top_k = -1
  top_k = _execute.make_int(top_k, "top_k")
  distance_mat = _ops.convert_to_tensor(distance_mat, _dtypes.float32)
  num_valid_rows = _ops.convert_to_tensor(num_valid_rows, _dtypes.float32)
  _inputs_flat = [distance_mat, num_valid_rows]
  _attrs = ("top_k", top_k)
  _result = _execute.execute(b"BipartiteMatch", 2, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "BipartiteMatch", _inputs_flat, _attrs, _result, name)
  _result = _BipartiteMatchOutput._make(_result)
  return _result
Exemple #10
0
def rpc(address, method, request, protocol="", fail_fast=True, timeout_in_ms=0, name=None):
  r"""TODO: add doc.

  Args:
    address: A `Tensor` of type `string`.
    method: A `Tensor` of type `string`.
    request: A `Tensor` of type `string`.
    protocol: An optional `string`. Defaults to `""`.
    fail_fast: An optional `bool`. Defaults to `True`.
    timeout_in_ms: An optional `int`. Defaults to `0`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
  _ctx = _context._context
  if _ctx is not None and _ctx._eager_context.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name, "Rpc", name,
        _ctx._post_execution_callbacks, address, method, request, "protocol",
        protocol, "fail_fast", fail_fast, "timeout_in_ms", timeout_in_ms)
      return _result
    except _core._FallbackException:
      try:
        return rpc_eager_fallback(
            address, method, request, protocol=protocol, fail_fast=fail_fast,
            timeout_in_ms=timeout_in_ms, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              rpc, address=address, method=method, request=request,
                   protocol=protocol, fail_fast=fail_fast,
                   timeout_in_ms=timeout_in_ms, name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  if protocol is None:
    protocol = ""
  protocol = _execute.make_str(protocol, "protocol")
  if fail_fast is None:
    fail_fast = True
  fail_fast = _execute.make_bool(fail_fast, "fail_fast")
  if timeout_in_ms is None:
    timeout_in_ms = 0
  timeout_in_ms = _execute.make_int(timeout_in_ms, "timeout_in_ms")
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "Rpc", address=address, method=method, request=request,
               protocol=protocol, fail_fast=fail_fast,
               timeout_in_ms=timeout_in_ms, name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          rpc, address=address, method=method, request=request,
               protocol=protocol, fail_fast=fail_fast,
               timeout_in_ms=timeout_in_ms, name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("protocol", _op.get_attr("protocol"), "fail_fast",
            _op.get_attr("fail_fast"), "timeout_in_ms",
            _op.get_attr("timeout_in_ms"))
  _execute.record_gradient(
      "Rpc", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Exemple #11
0
def collective_reduce(input,
                      group_size,
                      group_key,
                      instance_key,
                      merge_op,
                      final_op,
                      subdiv_offsets,
                      name=None):
    r"""Mutually reduces multiple tensors of identical type and shape.

  Args:
    input: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`, `int32`, `int64`.
    group_size: An `int`.
    group_key: An `int`.
    instance_key: An `int`.
    merge_op: A `string` from: `"Min", "Max", "Mul", "Add"`.
    final_op: A `string` from: `"Id", "Div"`.
    subdiv_offsets: A list of `ints`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
    _ctx = _context._context
    if _ctx is not None and _ctx._eager_context.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "CollectiveReduce", name, _ctx._post_execution_callbacks,
                input, "group_size", group_size, "group_key", group_key,
                "instance_key", instance_key, "merge_op", merge_op, "final_op",
                final_op, "subdiv_offsets", subdiv_offsets)
            return _result
        except _core._FallbackException:
            try:
                return collective_reduce_eager_fallback(
                    input,
                    group_size=group_size,
                    group_key=group_key,
                    instance_key=instance_key,
                    merge_op=merge_op,
                    final_op=final_op,
                    subdiv_offsets=subdiv_offsets,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    merge_op = _execute.make_str(merge_op, "merge_op")
    final_op = _execute.make_str(final_op, "final_op")
    if not isinstance(subdiv_offsets, (list, tuple)):
        raise TypeError("Expected list for 'subdiv_offsets' argument to "
                        "'collective_reduce' Op, not %r." % subdiv_offsets)
    subdiv_offsets = [
        _execute.make_int(_i, "subdiv_offsets") for _i in subdiv_offsets
    ]
    _, _, _op = _op_def_lib._apply_op_helper("CollectiveReduce",
                                             input=input,
                                             group_size=group_size,
                                             group_key=group_key,
                                             instance_key=instance_key,
                                             merge_op=merge_op,
                                             final_op=final_op,
                                             subdiv_offsets=subdiv_offsets,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "group_size", _op.get_attr("group_size"),
              "group_key", _op.get_attr("group_key"), "instance_key",
              _op.get_attr("instance_key"),
              "merge_op", _op.get_attr("merge_op"), "final_op",
              _op.get_attr("final_op"), "subdiv_offsets",
              _op.get_attr("subdiv_offsets"))
    _execute.record_gradient("CollectiveReduce", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Exemple #12
0
def mfcc(spectrogram,
         sample_rate,
         upper_frequency_limit=4000,
         lower_frequency_limit=20,
         filterbank_channel_count=40,
         dct_coefficient_count=13,
         name=None):
    r"""Transforms a spectrogram into a form that's useful for speech recognition.

  Mel Frequency Cepstral Coefficients are a way of representing audio data that's

  been effective as an input feature for machine learning. They are created by

  taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the

  higher frequencies that are less significant to the human ear. They have a long

  history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum

  is a good resource to learn more.

  Args:
    spectrogram: A `Tensor` of type `float32`.
      Typically produced by the Spectrogram op, with magnitude_squared

      set to true.
    sample_rate: A `Tensor` of type `int32`.
      How many samples per second the source audio used.
    upper_frequency_limit: An optional `float`. Defaults to `4000`.
      The highest frequency to use when calculating the

      ceptstrum.
    lower_frequency_limit: An optional `float`. Defaults to `20`.
      The lowest frequency to use when calculating the

      ceptstrum.
    filterbank_channel_count: An optional `int`. Defaults to `40`.
      Resolution of the Mel bank used internally.
    dct_coefficient_count: An optional `int`. Defaults to `13`.
      How many output channels to produce per time slice.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32`.
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        if upper_frequency_limit is None:
            upper_frequency_limit = 4000
        upper_frequency_limit = _execute.make_float(upper_frequency_limit,
                                                    "upper_frequency_limit")
        if lower_frequency_limit is None:
            lower_frequency_limit = 20
        lower_frequency_limit = _execute.make_float(lower_frequency_limit,
                                                    "lower_frequency_limit")
        if filterbank_channel_count is None:
            filterbank_channel_count = 40
        filterbank_channel_count = _execute.make_int(
            filterbank_channel_count, "filterbank_channel_count")
        if dct_coefficient_count is None:
            dct_coefficient_count = 13
        dct_coefficient_count = _execute.make_int(dct_coefficient_count,
                                                  "dct_coefficient_count")
        _, _, _op = _op_def_lib._apply_op_helper(
            "Mfcc",
            spectrogram=spectrogram,
            sample_rate=sample_rate,
            upper_frequency_limit=upper_frequency_limit,
            lower_frequency_limit=lower_frequency_limit,
            filterbank_channel_count=filterbank_channel_count,
            dct_coefficient_count=dct_coefficient_count,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("upper_frequency_limit",
                  _op.get_attr("upper_frequency_limit"),
                  "lower_frequency_limit",
                  _op.get_attr("lower_frequency_limit"),
                  "filterbank_channel_count",
                  _op.get_attr("filterbank_channel_count"),
                  "dct_coefficient_count",
                  _op.get_attr("dct_coefficient_count"))
        _execute.record_gradient("Mfcc", _inputs_flat, _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name, "Mfcc", name,
                _ctx._post_execution_callbacks, spectrogram, sample_rate,
                "upper_frequency_limit", upper_frequency_limit,
                "lower_frequency_limit", lower_frequency_limit,
                "filterbank_channel_count", filterbank_channel_count,
                "dct_coefficient_count", dct_coefficient_count)
            return _result
        except _core._FallbackException:
            return mfcc_eager_fallback(
                spectrogram,
                sample_rate,
                upper_frequency_limit=upper_frequency_limit,
                lower_frequency_limit=lower_frequency_limit,
                filterbank_channel_count=filterbank_channel_count,
                dct_coefficient_count=dct_coefficient_count,
                name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #13
0
def decode_wav(contents, desired_channels=-1, desired_samples=-1, name=None):
    r"""Decode a 16-bit PCM WAV file to a float tensor.

  The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.

  

  When desired_channels is set, if the input contains fewer channels than this

  then the last channel will be duplicated to give the requested number, else if

  the input has more channels than requested then the additional channels will be

  ignored.

  

  If desired_samples is set, then the audio will be cropped or padded with zeroes

  to the requested length.

  

  The first output contains a Tensor with the content of the audio samples. The

  lowest dimension will be the number of channels, and the second will be the

  number of samples. For example, a ten-sample-long stereo WAV file should give an

  output shape of [10, 2].

  Args:
    contents: A `Tensor` of type `string`.
      The WAV-encoded audio, usually from a file.
    desired_channels: An optional `int`. Defaults to `-1`.
      Number of sample channels wanted.
    desired_samples: An optional `int`. Defaults to `-1`.
      Length of audio requested.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (audio, sample_rate).

    audio: A `Tensor` of type `float32`.
    sample_rate: A `Tensor` of type `int32`.
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        if desired_channels is None:
            desired_channels = -1
        desired_channels = _execute.make_int(desired_channels,
                                             "desired_channels")
        if desired_samples is None:
            desired_samples = -1
        desired_samples = _execute.make_int(desired_samples, "desired_samples")
        _, _, _op = _op_def_lib._apply_op_helper(
            "DecodeWav",
            contents=contents,
            desired_channels=desired_channels,
            desired_samples=desired_samples,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("desired_channels", _op.get_attr("desired_channels"),
                  "desired_samples", _op.get_attr("desired_samples"))
        _execute.record_gradient("DecodeWav", _inputs_flat, _attrs, _result,
                                 name)
        _result = _DecodeWavOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name, "DecodeWav", name,
                _ctx._post_execution_callbacks, contents, "desired_channels",
                desired_channels, "desired_samples", desired_samples)
            _result = _DecodeWavOutput._make(_result)
            return _result
        except _core._FallbackException:
            return decode_wav_eager_fallback(contents,
                                             desired_channels=desired_channels,
                                             desired_samples=desired_samples,
                                             name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #14
0
def sparse_feature_cross(indices,
                         values,
                         shapes,
                         dense,
                         hashed_output,
                         num_buckets,
                         out_type,
                         internal_type,
                         name=None):
    r"""Generates sparse cross form a list of sparse tensors.

  The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
  representing features of one feature column. It outputs a 2D `SparseTensor` with
  the batchwise crosses of these features.

  For example, if the inputs are

      inputs[0]: SparseTensor with shape = [2, 2]
      [0, 0]: "a"
      [1, 0]: "b"
      [1, 1]: "c"

      inputs[1]: SparseTensor with shape = [2, 1]
      [0, 0]: "d"
      [1, 0]: "e"

      inputs[2]: Tensor [["f"], ["g"]]

  then the output will be

      shape = [2, 2]
      [0, 0]: "a_X_d_X_f"
      [1, 0]: "b_X_e_X_g"
      [1, 1]: "c_X_e_X_g"

  if hashed_output=true then the output will be

      shape = [2, 2]
      [0, 0]: HashCombine(
                  Fingerprint64("f"), HashCombine(
                      Fingerprint64("d"), Fingerprint64("a")))
      [1, 0]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("b")))
      [1, 1]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("c")))

  Args:
    indices: A list of `Tensor` objects with type `int64`.
      2-D.  Indices of each input `SparseTensor`.
    values: A list of `Tensor` objects with types from: `int64`, `string`.
      1-D.   values of each `SparseTensor`.
    shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
      1-D.   Shapes of each `SparseTensor`.
    dense: A list of `Tensor` objects with types from: `int64`, `string`.
      2-D.    Columns represented by dense `Tensor`.
    hashed_output: A `bool`.
    num_buckets: An `int` that is `>= 0`.
    out_type: A `tf.DType` from: `tf.int64, tf.string`.
    internal_type: A `tf.DType` from: `tf.int64, tf.string`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (output_indices, output_values, output_shape).

    output_indices: A `Tensor` of type `int64`. 2-D.  Indices of the concatenated `SparseTensor`.
    output_values: A `Tensor` of type `out_type`. 1-D.  Non-empty values of the concatenated or hashed
      `SparseTensor`.
    output_shape: A `Tensor` of type `int64`. 1-D.  Shape of the concatenated `SparseTensor`.
  """
    if not isinstance(indices, (list, tuple)):
        raise TypeError("Expected list for 'indices' argument to "
                        "'sparse_feature_cross' Op, not %r." % indices)
    _attr_N = len(indices)
    if not isinstance(shapes, (list, tuple)):
        raise TypeError("Expected list for 'shapes' argument to "
                        "'sparse_feature_cross' Op, not %r." % shapes)
    if len(shapes) != _attr_N:
        raise ValueError(
            "List argument 'shapes' to 'sparse_feature_cross' Op with length %d "
            "must match length %d of argument 'indices'." %
            (len(shapes), _attr_N))
    hashed_output = _execute.make_bool(hashed_output, "hashed_output")
    num_buckets = _execute.make_int(num_buckets, "num_buckets")
    out_type = _execute.make_type(out_type, "out_type")
    internal_type = _execute.make_type(internal_type, "internal_type")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("SparseFeatureCross",
                                                 indices=indices,
                                                 values=values,
                                                 shapes=shapes,
                                                 dense=dense,
                                                 hashed_output=hashed_output,
                                                 num_buckets=num_buckets,
                                                 out_type=out_type,
                                                 internal_type=internal_type,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("N", _op.get_attr("N"), "hashed_output",
                  _op.get_attr("hashed_output"), "num_buckets",
                  _op.get_attr("num_buckets"), "sparse_types",
                  _op.get_attr("sparse_types"), "dense_types",
                  _op.get_attr("dense_types"), "out_type",
                  _op.get_attr("out_type"), "internal_type",
                  _op.get_attr("internal_type"))
    else:
        _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(
            values, _ctx)
        _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(
            dense, _ctx)
        indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
        shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
        _inputs_flat = list(indices) + list(values) + list(shapes) + list(
            dense)
        _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
                  num_buckets, "sparse_types", _attr_sparse_types,
                  "dense_types", _attr_dense_types, "out_type", out_type,
                  "internal_type", internal_type)
        _result = _execute.execute(b"SparseFeatureCross",
                                   3,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("SparseFeatureCross", _inputs_flat, _attrs,
                             _result, name)
    _result = _SparseFeatureCrossOutput._make(_result)
    return _result
def generate_vocab_remapping(new_vocab_file,
                             old_vocab_file,
                             new_vocab_offset,
                             num_new_vocab,
                             old_vocab_size=-1,
                             name=None):
    r"""Given a path to new and old vocabulary files, returns a remapping Tensor of

  length `num_new_vocab`, where `remapping[i]` contains the row number in the old

  vocabulary that corresponds to row `i` in the new vocabulary (starting at line

  `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`

  in the new vocabulary is not in the old vocabulary.  The old vocabulary is

  constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the

  default value of -1.

  

  `num_vocab_offset` enables

  use in the partitioned variable case, and should generally be set through

  examining partitioning info.  The format of the files should be a text file,

  with each line containing a single entity within the vocabulary.

  

  For example, with `new_vocab_file` a text file containing each of the following

  elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],

  `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be

  `[0, -1, 2]`.

  

  The op also returns a count of how many entries in the new vocabulary

  were present in the old vocabulary, which is used to calculate the number of

  values to initialize in a weight matrix remapping

  

  This functionality can be used to remap both row vocabularies (typically,

  features) and column vocabularies (typically, classes) from TensorFlow

  checkpoints.  Note that the partitioning logic relies on contiguous vocabularies

  corresponding to div-partitioned variables.  Moreover, the underlying remapping

  uses an IndexTable (as opposed to an inexact CuckooTable), so client code should

  use the corresponding index_table_from_file() as the FeatureColumn framework

  does (as opposed to tf.feature_to_id(), which uses a CuckooTable).

  Args:
    new_vocab_file: A `Tensor` of type `string`. Path to the new vocab file.
    old_vocab_file: A `Tensor` of type `string`. Path to the old vocab file.
    new_vocab_offset: An `int` that is `>= 0`.
      How many entries into the new vocab file to start reading.
    num_new_vocab: An `int` that is `>= 0`.
      Number of entries in the new vocab file to remap.
    old_vocab_size: An optional `int` that is `>= -1`. Defaults to `-1`.
      Number of entries in the old vocab file to consider.  If -1,

      use the entire old vocabulary.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (remapping, num_present).

    remapping: A `Tensor` of type `int64`.
    num_present: A `Tensor` of type `int32`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        new_vocab_offset = _execute.make_int(new_vocab_offset,
                                             "new_vocab_offset")
        num_new_vocab = _execute.make_int(num_new_vocab, "num_new_vocab")
        if old_vocab_size is None:
            old_vocab_size = -1
        old_vocab_size = _execute.make_int(old_vocab_size, "old_vocab_size")
        _, _, _op = _op_def_lib._apply_op_helper(
            "GenerateVocabRemapping",
            new_vocab_file=new_vocab_file,
            old_vocab_file=old_vocab_file,
            new_vocab_offset=new_vocab_offset,
            num_new_vocab=num_new_vocab,
            old_vocab_size=old_vocab_size,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("new_vocab_offset", _op.get_attr("new_vocab_offset"),
                  "num_new_vocab", _op.get_attr("num_new_vocab"),
                  "old_vocab_size", _op.get_attr("old_vocab_size"))
        _execute.record_gradient("GenerateVocabRemapping", _inputs_flat,
                                 _attrs, _result, name)
        _result = _GenerateVocabRemappingOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "GenerateVocabRemapping", name, _ctx._post_execution_callbacks,
                new_vocab_file, old_vocab_file, "new_vocab_offset",
                new_vocab_offset, "num_new_vocab", num_new_vocab,
                "old_vocab_size", old_vocab_size)
            _result = _GenerateVocabRemappingOutput._make(_result)
            return _result
        except _core._FallbackException:
            return generate_vocab_remapping_eager_fallback(
                new_vocab_file,
                old_vocab_file,
                new_vocab_offset=new_vocab_offset,
                num_new_vocab=num_new_vocab,
                old_vocab_size=old_vocab_size,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #16
0
def _compute_accidental_hits(true_classes,
                             sampled_candidates,
                             num_true,
                             seed=0,
                             seed2=0,
                             name=None):
    r"""Computes the ids of the positions in sampled_candidates that match true_labels.

  When doing log-odds NCE, the result of this op should be passed through a

  SparseToDense op, then added to the logits of the sampled candidates. This has

  the effect of 'removing' the sampled labels that match the true labels by

  making the classifier sure that they are sampled labels.

  Args:
    true_classes: A `Tensor` of type `int64`.
      The true_classes output of UnpackSparseLabels.
    sampled_candidates: A `Tensor` of type `int64`.
      The sampled_candidates output of CandidateSampler.
    num_true: An `int`. Number of true labels per context.
    seed: An optional `int`. Defaults to `0`.
      If either seed or seed2 are set to be non-zero, the random number

      generator is seeded by the given seed.  Otherwise, it is seeded by a

      random seed.
    seed2: An optional `int`. Defaults to `0`.
      An second seed to avoid seed collision.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (indices, ids, weights).

    indices: A `Tensor` of type `int32`. A vector of indices corresponding to rows of true_candidates.
    ids: A `Tensor` of type `int64`. A vector of IDs of positions in sampled_candidates that match a true_label
      for the row with the corresponding index in indices.
    weights: A `Tensor` of type `float32`. A vector of the same length as indices and ids, in which each element
      is -FLOAT_MAX.
  """
    num_true = _execute.make_int(num_true, "num_true")
    if seed is None:
        seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
        seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper(
            "ComputeAccidentalHits",
            true_classes=true_classes,
            sampled_candidates=sampled_candidates,
            num_true=num_true,
            seed=seed,
            seed2=seed2,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("num_true", _op.get_attr("num_true"), "seed",
                  _op.get_attr("seed"), "seed2", _op.get_attr("seed2"))
    else:
        true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64)
        sampled_candidates = _ops.convert_to_tensor(sampled_candidates,
                                                    _dtypes.int64)
        _inputs_flat = [true_classes, sampled_candidates]
        _attrs = ("num_true", num_true, "seed", seed, "seed2", seed2)
        _result = _execute.execute(b"ComputeAccidentalHits",
                                   3,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("ComputeAccidentalHits", _inputs_flat, _attrs,
                             _result, name)
    _result = _ComputeAccidentalHitsOutput._make(_result)
    return _result
def random_uniform_int(shape, minval, maxval, seed=0, seed2=0, name=None):
  r"""Outputs random integers from a uniform distribution.

  The generated values are uniform integers in the range `[minval, maxval)`.
  The lower bound `minval` is included in the range, while the upper bound
  `maxval` is excluded.

  The random integers are slightly biased unless `maxval - minval` is an exact
  power of two.  The bias is small for values of `maxval - minval` significantly
  smaller than the range of the output (either `2^32` or `2^64`).

  Args:
    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      The shape of the output tensor.
    minval: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      0-D.  Inclusive lower bound on the generated integers.
    maxval: A `Tensor`. Must have the same type as `minval`.
      0-D.  Exclusive upper bound on the generated integers.
    seed: An optional `int`. Defaults to `0`.
      If either `seed` or `seed2` are set to be non-zero, the random number
      generator is seeded by the given seed.  Otherwise, it is seeded by a
      random seed.
    seed2: An optional `int`. Defaults to `0`.
      A second seed to avoid seed collision.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `minval`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if seed is None:
      seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
      seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    _, _, _op = _op_def_lib._apply_op_helper(
        "RandomUniformInt", shape=shape, minval=minval, maxval=maxval,
        seed=seed, seed2=seed2, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("seed", _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
              "Tout", _op.get_attr("Tout"), "T", _op.get_attr("T"))
    _execute.record_gradient(
      "RandomUniformInt", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "RandomUniformInt", name,
        _ctx._post_execution_callbacks, shape, minval, maxval, "seed", seed,
        "seed2", seed2)
      return _result
    except _core._FallbackException:
      return random_uniform_int_eager_fallback(
          shape, minval, maxval, seed=seed, seed2=seed2, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def enter(data, frame_name, is_constant=False, parallel_iterations=10, name=None):
  r"""Creates or finds a child frame, and makes `data` available to the child frame.

  This op is used together with `Exit` to create loops in the graph.
  The unique `frame_name` is used by the `Executor` to identify frames. If
  `is_constant` is true, `output` is a constant in the child frame; otherwise
  it may be changed in the child frame. At most `parallel_iterations` iterations
  are run in parallel in the child frame.

  Args:
    data: A `Tensor`. The tensor to be made available to the child frame.
    frame_name: A `string`. The name of the child frame.
    is_constant: An optional `bool`. Defaults to `False`.
      If true, the output is constant within the child frame.
    parallel_iterations: An optional `int`. Defaults to `10`.
      The number of iterations allowed to run in parallel.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `data`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name, "Enter",
        name, _ctx._post_execution_callbacks, data, "frame_name", frame_name,
        "is_constant", is_constant, "parallel_iterations",
        parallel_iterations)
      return _result
    except _core._FallbackException:
      try:
        return enter_eager_fallback(
            data, frame_name=frame_name, is_constant=is_constant,
            parallel_iterations=parallel_iterations, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  frame_name = _execute.make_str(frame_name, "frame_name")
  if is_constant is None:
    is_constant = False
  is_constant = _execute.make_bool(is_constant, "is_constant")
  if parallel_iterations is None:
    parallel_iterations = 10
  parallel_iterations = _execute.make_int(parallel_iterations, "parallel_iterations")
  _, _, _op = _op_def_lib._apply_op_helper(
        "Enter", data=data, frame_name=frame_name, is_constant=is_constant,
                 parallel_iterations=parallel_iterations, name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("T", _op.get_attr("T"), "frame_name", _op.get_attr("frame_name"),
            "is_constant", _op.get_attr("is_constant"), "parallel_iterations",
            _op.get_attr("parallel_iterations"))
  _execute.record_gradient(
      "Enter", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def random_poisson_v2(shape, rate, seed=0, seed2=0, dtype=_dtypes.int64, name=None):
  r"""Outputs random values from the Poisson distribution(s) described by rate.

  This op uses two algorithms, depending on rate. If rate >= 10, then
  the algorithm by Hormann is used to acquire samples via
  transformation-rejection.
  See http://www.sciencedirect.com/science/article/pii/0167668793909974.

  Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
  random variables.
  See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
  Programming, Volume 2. Addison Wesley

  Args:
    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      1-D integer tensor. Shape of independent samples to draw from each
      distribution described by the shape parameters given in rate.
    rate: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`.
      A tensor in which each scalar is a "rate" parameter describing the
      associated poisson distribution.
    seed: An optional `int`. Defaults to `0`.
      If either `seed` or `seed2` are set to be non-zero, the random number
      generator is seeded by the given seed.  Otherwise, it is seeded by a
      random seed.
    seed2: An optional `int`. Defaults to `0`.
      A second seed to avoid seed collision.
    dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.int64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `dtype`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if seed is None:
      seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
      seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    if dtype is None:
      dtype = _dtypes.int64
    dtype = _execute.make_type(dtype, "dtype")
    _, _, _op = _op_def_lib._apply_op_helper(
        "RandomPoissonV2", shape=shape, rate=rate, seed=seed, seed2=seed2,
        dtype=dtype, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("seed", _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
              "S", _op.get_attr("S"), "R", _op.get_attr("R"), "dtype",
              _op.get_attr("dtype"))
    _execute.record_gradient(
      "RandomPoissonV2", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "RandomPoissonV2", name,
        _ctx._post_execution_callbacks, shape, rate, "seed", seed, "seed2",
        seed2, "dtype", dtype)
      return _result
    except _core._FallbackException:
      return random_poisson_v2_eager_fallback(
          shape, rate, seed=seed, seed2=seed2, dtype=dtype, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def multinomial(logits, num_samples, seed=0, seed2=0, output_dtype=_dtypes.int64, name=None):
  r"""Draws samples from a multinomial distribution.

  Args:
    logits: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
      2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
      represents the unnormalized log probabilities for all classes.
    num_samples: A `Tensor` of type `int32`.
      0-D.  Number of independent samples to draw for each row slice.
    seed: An optional `int`. Defaults to `0`.
      If either seed or seed2 is set to be non-zero, the internal random number
      generator is seeded by the given seed.  Otherwise, a random seed is used.
    seed2: An optional `int`. Defaults to `0`.
      A second seed to avoid seed collision.
    output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `output_dtype`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if seed is None:
      seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
      seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    if output_dtype is None:
      output_dtype = _dtypes.int64
    output_dtype = _execute.make_type(output_dtype, "output_dtype")
    _, _, _op = _op_def_lib._apply_op_helper(
        "Multinomial", logits=logits, num_samples=num_samples, seed=seed,
        seed2=seed2, output_dtype=output_dtype, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("seed", _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
              "T", _op.get_attr("T"), "output_dtype",
              _op.get_attr("output_dtype"))
    _execute.record_gradient(
      "Multinomial", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "Multinomial", name,
        _ctx._post_execution_callbacks, logits, num_samples, "seed", seed,
        "seed2", seed2, "output_dtype", output_dtype)
      return _result
    except _core._FallbackException:
      return multinomial_eager_fallback(
          logits, num_samples, seed=seed, seed2=seed2,
          output_dtype=output_dtype, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #21
0
def bipartite_match(distance_mat, num_valid_rows, top_k=-1, name=None):
  r"""Find bipartite matching based on a given distance matrix.

  A greedy bi-partite matching algorithm is used to obtain the matching with the
  (greedy) minimum distance.

  Args:
    distance_mat: A `Tensor` of type `float32`.
      A 2-D float tensor of shape `[num_rows, num_columns]`. It is a
      pair-wise distance matrix between the entities represented by each row and
      each column. It is an asymmetric matrix. The smaller the distance is, the more
      similar the pairs are. The bipartite matching is to minimize the distances.
    num_valid_rows: A `Tensor` of type `float32`.
      A scalar or a 1-D tensor with one element describing the
      number of valid rows of distance_mat to consider for the bipartite matching.
      If set to be negative, then all rows from `distance_mat` are used.
    top_k: An optional `int`. Defaults to `-1`.
      A scalar that specifies the number of top-k matches to retrieve.
      If set to be negative, then is set according to the maximum number of
      matches from `distance_mat`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (row_to_col_match_indices, col_to_row_match_indices).

    row_to_col_match_indices: A `Tensor` of type `int32`. A vector of length num_rows, which is the number of
      rows of the input `distance_matrix`.
      If `row_to_col_match_indices[i]` is not -1, row i is matched to column
      `row_to_col_match_indices[i]`.
    col_to_row_match_indices: A `Tensor` of type `int32`. A vector of length num_columns, which is the number
      of columns of the input distance matrix.
      If `col_to_row_match_indices[j]` is not -1, column j is matched to row
      `col_to_row_match_indices[j]`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if top_k is None:
      top_k = -1
    top_k = _execute.make_int(top_k, "top_k")
    _, _, _op = _op_def_lib._apply_op_helper(
        "BipartiteMatch", distance_mat=distance_mat,
        num_valid_rows=num_valid_rows, top_k=top_k, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("top_k", _op.get_attr("top_k"))
    _execute.record_gradient(
      "BipartiteMatch", _inputs_flat, _attrs, _result, name)
    _result = _BipartiteMatchOutput._make(_result)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "BipartiteMatch", name, _ctx._post_execution_callbacks, distance_mat,
        num_valid_rows, "top_k", top_k)
      _result = _BipartiteMatchOutput._make(_result)
      return _result
    except _core._FallbackException:
      return bipartite_match_eager_fallback(
          distance_mat, num_valid_rows, top_k=top_k, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #22
0
def sdca_optimizer(sparse_example_indices,
                   sparse_feature_indices,
                   sparse_feature_values,
                   dense_features,
                   example_weights,
                   example_labels,
                   sparse_indices,
                   sparse_weights,
                   dense_weights,
                   example_state_data,
                   loss_type,
                   l1,
                   l2,
                   num_loss_partitions,
                   num_inner_iterations,
                   adaptative=True,
                   name=None):
    r"""Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for

  linear models with L1 + L2 regularization. As global optimization objective is
  strongly-convex, the optimizer optimizes the dual objective at each step. The
  optimizer applies each update one example at a time. Examples are sampled
  uniformly, and the optimizer is learning rate free and enjoys linear convergence
  rate.

  [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
  Shai Shalev-Shwartz, Tong Zhang. 2012

  $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$

  [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
  Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
  Peter Richtarik, Martin Takac. 2015

  [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
  Dominik Csiba, Zheng Qu, Peter Richtarik. 2015

  Args:
    sparse_example_indices: A list of `Tensor` objects with type `int64`.
      a list of vectors which contain example indices.
    sparse_feature_indices: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `int64`.
      a list of vectors which contain feature indices.
    sparse_feature_values: A list of `Tensor` objects with type `float32`.
      a list of vectors which contains feature value
      associated with each feature group.
    dense_features: A list of `Tensor` objects with type `float32`.
      a list of matrices which contains the dense feature values.
    example_weights: A `Tensor` of type `float32`.
      a vector which contains the weight associated with each
      example.
    example_labels: A `Tensor` of type `float32`.
      a vector which contains the label/target associated with each
      example.
    sparse_indices: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `int64`.
      a list of vectors where each value is the indices which has
      corresponding weights in sparse_weights. This field maybe omitted for the
      dense approach.
    sparse_weights: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `float32`.
      a list of vectors where each value is the weight associated with
      a sparse feature group.
    dense_weights: A list with the same length as `dense_features` of `Tensor` objects with type `float32`.
      a list of vectors where the values are the weights associated
      with a dense feature group.
    example_state_data: A `Tensor` of type `float32`.
      a list of vectors containing the example state data.
    loss_type: A `string` from: `"logistic_loss", "squared_loss", "hinge_loss", "smooth_hinge_loss", "poisson_loss"`.
      Type of the primal loss. Currently SdcaSolver supports logistic,
      squared and hinge losses.
    l1: A `float`. Symmetric l1 regularization strength.
    l2: A `float`. Symmetric l2 regularization strength.
    num_loss_partitions: An `int` that is `>= 1`.
      Number of partitions of the global loss function.
    num_inner_iterations: An `int` that is `>= 1`.
      Number of iterations per mini-batch.
    adaptative: An optional `bool`. Defaults to `True`.
      Whether to use Adaptive SDCA for the inner loop.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights).

    out_example_state_data: A `Tensor` of type `float32`.
    out_delta_sparse_weights: A list with the same length as `sparse_example_indices` of `Tensor` objects with type `float32`.
    out_delta_dense_weights: A list with the same length as `dense_features` of `Tensor` objects with type `float32`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if not isinstance(sparse_example_indices, (list, tuple)):
            raise TypeError(
                "Expected list for 'sparse_example_indices' argument to "
                "'sdca_optimizer' Op, not %r." % sparse_example_indices)
        _attr_num_sparse_features = len(sparse_example_indices)
        if not isinstance(sparse_feature_indices, (list, tuple)):
            raise TypeError(
                "Expected list for 'sparse_feature_indices' argument to "
                "'sdca_optimizer' Op, not %r." % sparse_feature_indices)
        if len(sparse_feature_indices) != _attr_num_sparse_features:
            raise ValueError(
                "List argument 'sparse_feature_indices' to 'sdca_optimizer' Op with length %d "
                "must match length %d of argument 'sparse_example_indices'." %
                (len(sparse_feature_indices), _attr_num_sparse_features))
        if not isinstance(sparse_indices, (list, tuple)):
            raise TypeError("Expected list for 'sparse_indices' argument to "
                            "'sdca_optimizer' Op, not %r." % sparse_indices)
        if len(sparse_indices) != _attr_num_sparse_features:
            raise ValueError(
                "List argument 'sparse_indices' to 'sdca_optimizer' Op with length %d "
                "must match length %d of argument 'sparse_example_indices'." %
                (len(sparse_indices), _attr_num_sparse_features))
        if not isinstance(sparse_weights, (list, tuple)):
            raise TypeError("Expected list for 'sparse_weights' argument to "
                            "'sdca_optimizer' Op, not %r." % sparse_weights)
        if len(sparse_weights) != _attr_num_sparse_features:
            raise ValueError(
                "List argument 'sparse_weights' to 'sdca_optimizer' Op with length %d "
                "must match length %d of argument 'sparse_example_indices'." %
                (len(sparse_weights), _attr_num_sparse_features))
        if not isinstance(sparse_feature_values, (list, tuple)):
            raise TypeError(
                "Expected list for 'sparse_feature_values' argument to "
                "'sdca_optimizer' Op, not %r." % sparse_feature_values)
        _attr_num_sparse_features_with_values = len(sparse_feature_values)
        if not isinstance(dense_features, (list, tuple)):
            raise TypeError("Expected list for 'dense_features' argument to "
                            "'sdca_optimizer' Op, not %r." % dense_features)
        _attr_num_dense_features = len(dense_features)
        if not isinstance(dense_weights, (list, tuple)):
            raise TypeError("Expected list for 'dense_weights' argument to "
                            "'sdca_optimizer' Op, not %r." % dense_weights)
        if len(dense_weights) != _attr_num_dense_features:
            raise ValueError(
                "List argument 'dense_weights' to 'sdca_optimizer' Op with length %d "
                "must match length %d of argument 'dense_features'." %
                (len(dense_weights), _attr_num_dense_features))
        loss_type = _execute.make_str(loss_type, "loss_type")
        l1 = _execute.make_float(l1, "l1")
        l2 = _execute.make_float(l2, "l2")
        num_loss_partitions = _execute.make_int(num_loss_partitions,
                                                "num_loss_partitions")
        num_inner_iterations = _execute.make_int(num_inner_iterations,
                                                 "num_inner_iterations")
        if adaptative is None:
            adaptative = True
        adaptative = _execute.make_bool(adaptative, "adaptative")
        _, _, _op = _op_def_lib._apply_op_helper(
            "SdcaOptimizer",
            sparse_example_indices=sparse_example_indices,
            sparse_feature_indices=sparse_feature_indices,
            sparse_feature_values=sparse_feature_values,
            dense_features=dense_features,
            example_weights=example_weights,
            example_labels=example_labels,
            sparse_indices=sparse_indices,
            sparse_weights=sparse_weights,
            dense_weights=dense_weights,
            example_state_data=example_state_data,
            loss_type=loss_type,
            l1=l1,
            l2=l2,
            num_loss_partitions=num_loss_partitions,
            num_inner_iterations=num_inner_iterations,
            adaptative=adaptative,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("loss_type", _op.get_attr("loss_type"), "adaptative",
                  _op.get_attr("adaptative"), "num_sparse_features",
                  _op.get_attr("num_sparse_features"),
                  "num_sparse_features_with_values",
                  _op.get_attr("num_sparse_features_with_values"),
                  "num_dense_features", _op.get_attr("num_dense_features"),
                  "l1", _op.get_attr("l1"), "l2",
                  _op.get_attr("l2"), "num_loss_partitions",
                  _op.get_attr("num_loss_partitions"), "num_inner_iterations",
                  _op.get_attr("num_inner_iterations"))
        _execute.record_gradient("SdcaOptimizer", _inputs_flat, _attrs,
                                 _result, name)
        _result = _result[:1] + [_result[1:1 + _attr_num_sparse_features]
                                 ] + _result[1 + _attr_num_sparse_features:]
        _result = _result[:2] + [_result[2:]]
        _result = _SdcaOptimizerOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "SdcaOptimizer", name, _ctx._post_execution_callbacks,
                sparse_example_indices, sparse_feature_indices,
                sparse_feature_values, dense_features, example_weights,
                example_labels, sparse_indices, sparse_weights, dense_weights,
                example_state_data, "loss_type", loss_type, "adaptative",
                adaptative, "l1", l1, "l2", l2, "num_loss_partitions",
                num_loss_partitions, "num_inner_iterations",
                num_inner_iterations)
            _result = _SdcaOptimizerOutput._make(_result)
            return _result
        except _core._FallbackException:
            return sdca_optimizer_eager_fallback(
                sparse_example_indices,
                sparse_feature_indices,
                sparse_feature_values,
                dense_features,
                example_weights,
                example_labels,
                sparse_indices,
                sparse_weights,
                dense_weights,
                example_state_data,
                loss_type=loss_type,
                adaptative=adaptative,
                l1=l1,
                l2=l2,
                num_loss_partitions=num_loss_partitions,
                num_inner_iterations=num_inner_iterations,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #23
0
def _fixed_unigram_candidate_sampler(true_classes,
                                     num_true,
                                     num_sampled,
                                     unique,
                                     range_max,
                                     vocab_file="",
                                     distortion=1,
                                     num_reserved_ids=0,
                                     num_shards=1,
                                     shard=0,
                                     unigrams=[],
                                     seed=0,
                                     seed2=0,
                                     name=None):
    r"""Generates labels for candidate sampling with a learned unigram distribution.

  A unigram sampler could use a fixed unigram distribution read from a

  file or passed in as an in-memory array instead of building up the distribution

  from data on the fly. There is also an option to skew the distribution by

  applying a distortion power to the weights.

  

  The vocabulary file should be in CSV-like format, with the last field

  being the weight associated with the word.

  

  For each batch, this op picks a single set of sampled candidate labels.

  

  The advantages of sampling candidates per-batch are simplicity and the

  possibility of efficient dense matrix multiplication. The disadvantage is that

  the sampled candidates must be chosen independently of the context and of the

  true labels.

  Args:
    true_classes: A `Tensor` of type `int64`.
      A batch_size * num_true matrix, in which each row contains the

      IDs of the num_true target_classes in the corresponding original label.
    num_true: An `int` that is `>= 1`. Number of true labels per context.
    num_sampled: An `int` that is `>= 1`.
      Number of candidates to randomly sample.
    unique: A `bool`.
      If unique is true, we sample with rejection, so that all sampled

      candidates in a batch are unique. This requires some approximation to

      estimate the post-rejection sampling probabilities.
    range_max: An `int` that is `>= 1`.
      The sampler will sample integers from the interval [0, range_max).
    vocab_file: An optional `string`. Defaults to `""`.
      Each valid line in this file (which should have a CSV-like format)

      corresponds to a valid word ID. IDs are in sequential order, starting from

      num_reserved_ids. The last entry in each line is expected to be a value

      corresponding to the count or relative probability. Exactly one of vocab_file

      and unigrams needs to be passed to this op.
    distortion: An optional `float`. Defaults to `1`.
      The distortion is used to skew the unigram probability distribution.

      Each weight is first raised to the distortion's power before adding to the

      internal unigram distribution. As a result, distortion = 1.0 gives regular

      unigram sampling (as defined by the vocab file), and distortion = 0.0 gives

      a uniform distribution.
    num_reserved_ids: An optional `int`. Defaults to `0`.
      Optionally some reserved IDs can be added in the range [0,

      ..., num_reserved_ids) by the users. One use case is that a special unknown

      word token is used as ID 0. These IDs will have a sampling probability of 0.
    num_shards: An optional `int` that is `>= 1`. Defaults to `1`.
      A sampler can be used to sample from a subset of the original range

      in order to speed up the whole computation through parallelism. This parameter

      (together with 'shard') indicates the number of partitions that are being

      used in the overall computation.
    shard: An optional `int` that is `>= 0`. Defaults to `0`.
      A sampler can be used to sample from a subset of the original range

      in order to speed up the whole computation through parallelism. This parameter

      (together with 'num_shards') indicates the particular partition number of a

      sampler op, when partitioning is being used.
    unigrams: An optional list of `floats`. Defaults to `[]`.
      A list of unigram counts or probabilities, one per ID in sequential

      order. Exactly one of vocab_file and unigrams should be passed to this op.
    seed: An optional `int`. Defaults to `0`.
      If either seed or seed2 are set to be non-zero, the random number

      generator is seeded by the given seed.  Otherwise, it is seeded by a

      random seed.
    seed2: An optional `int`. Defaults to `0`.
      An second seed to avoid seed collision.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count).

    sampled_candidates: A `Tensor` of type `int64`. A vector of length num_sampled, in which each element is
      the ID of a sampled candidate.
    true_expected_count: A `Tensor` of type `float32`. A batch_size * num_true matrix, representing
      the number of times each candidate is expected to occur in a batch
      of sampled candidates. If unique=true, then this is a probability.
    sampled_expected_count: A `Tensor` of type `float32`. A vector of length num_sampled, for each sampled
      candidate representing the number of times the candidate is expected
      to occur in a batch of sampled candidates.  If unique=true, then this is a
      probability.
  """
    num_true = _execute.make_int(num_true, "num_true")
    num_sampled = _execute.make_int(num_sampled, "num_sampled")
    unique = _execute.make_bool(unique, "unique")
    range_max = _execute.make_int(range_max, "range_max")
    if vocab_file is None:
        vocab_file = ""
    vocab_file = _execute.make_str(vocab_file, "vocab_file")
    if distortion is None:
        distortion = 1
    distortion = _execute.make_float(distortion, "distortion")
    if num_reserved_ids is None:
        num_reserved_ids = 0
    num_reserved_ids = _execute.make_int(num_reserved_ids, "num_reserved_ids")
    if num_shards is None:
        num_shards = 1
    num_shards = _execute.make_int(num_shards, "num_shards")
    if shard is None:
        shard = 0
    shard = _execute.make_int(shard, "shard")
    if unigrams is None:
        unigrams = []
    if not isinstance(unigrams, (list, tuple)):
        raise TypeError("Expected list for 'unigrams' argument to "
                        "'fixed_unigram_candidate_sampler' Op, not %r." %
                        unigrams)
    unigrams = [_execute.make_float(_f, "unigrams") for _f in unigrams]
    if seed is None:
        seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
        seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper(
            "FixedUnigramCandidateSampler",
            true_classes=true_classes,
            num_true=num_true,
            num_sampled=num_sampled,
            unique=unique,
            range_max=range_max,
            vocab_file=vocab_file,
            distortion=distortion,
            num_reserved_ids=num_reserved_ids,
            num_shards=num_shards,
            shard=shard,
            unigrams=unigrams,
            seed=seed,
            seed2=seed2,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("num_true", _op.get_attr("num_true"), "num_sampled",
                  _op.get_attr("num_sampled"),
                  "unique", _op.get_attr("unique"), "range_max",
                  _op.get_attr("range_max"), "vocab_file",
                  _op.get_attr("vocab_file"), "distortion",
                  _op.get_attr("distortion"), "num_reserved_ids",
                  _op.get_attr("num_reserved_ids"), "num_shards",
                  _op.get_attr("num_shards"), "shard", _op.get_attr("shard"),
                  "unigrams", _op.get_attr("unigrams"), "seed",
                  _op.get_attr("seed"), "seed2", _op.get_attr("seed2"))
    else:
        true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64)
        _inputs_flat = [true_classes]
        _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique",
                  unique, "range_max", range_max, "vocab_file", vocab_file,
                  "distortion", distortion, "num_reserved_ids",
                  num_reserved_ids, "num_shards", num_shards, "shard", shard,
                  "unigrams", unigrams, "seed", seed, "seed2", seed2)
        _result = _execute.execute(b"FixedUnigramCandidateSampler",
                                   3,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("FixedUnigramCandidateSampler", _inputs_flat,
                             _attrs, _result, name)
    _result = _FixedUnigramCandidateSamplerOutput._make(_result)
    return _result
def single_image_random_dot_stereograms(depth_values, hidden_surface_removal=True, convergence_dots_size=8, dots_per_inch=72, eye_separation=2.5, mu=0.3333, normalize=True, normalize_max=-100, normalize_min=100, border_level=0, number_colors=256, output_image_shape=[1024, 768, 1], output_data_window=[1022, 757], name=None):
  r"""Outputs a single image random dot stereogram for export via encode_PNG/JPG OP.

  Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
  encode 3-D data into a 2-D image.  The output of this Op is suitable for the
  encode_PNG/JPG ops.  Be careful with image compression as this may corrupt the
  encode 3-D data within the image.

  This Op is based upon:
  'http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper'

  Example use which outputs a SIRDS image as picture_out.png:
  ```python
  img=[[1,2,3,3,2,1],
       [1,2,3,4,5,2],
       [1,2,3,4,5,3],
       [1,2,3,4,5,4],
       [6,5,4,4,5,5]]

  session = tf.InteractiveSession()

  sirds = single_image_random_dot_stereograms(img,convergence_dots_size=8,number_colors=256,normalize=True)

  out = sirds.eval()

  png = tf.image.encode_png(out).eval()

  with open('picture_out.png', 'wb') as f:
      f.write(png)
  ```

  Args:
    depth_values: A `Tensor`. Must be one of the following types: `float64`, `float32`, `int64`, `int32`.
      Z values of data to encode into 'output_data_window' window,
      lower values are further away {0.0 floor(far), 1.0 ceiling(near) after normalization}, must be 2-D tensor
    hidden_surface_removal: An optional `bool`. Defaults to `True`.
      Activate hidden surface removal
    convergence_dots_size: An optional `int`. Defaults to `8`.
      Black dot size in pixels to help view converge image, drawn on bottom of image
    dots_per_inch: An optional `int`. Defaults to `72`.
      Output device in dots/inch
    eye_separation: An optional `float`. Defaults to `2.5`.
      Separation between eyes in inches
    mu: An optional `float`. Defaults to `0.3333`.
      Depth of field, Fraction of viewing distance (eg. 1/3 = .3333)
    normalize: An optional `bool`. Defaults to `True`.
      Normalize input data to [0.0, 1.0]
    normalize_max: An optional `float`. Defaults to `-100`.
      Fix MAX value for Normalization - if < MIN, autoscale
    normalize_min: An optional `float`. Defaults to `100`.
      Fix MIN value for Normalization - if > MAX, autoscale
    border_level: An optional `float`. Defaults to `0`.
      Value of border depth 0.0 {far} to 1.0 {near}
    number_colors: An optional `int`. Defaults to `256`.
      2 (Black & White),256 (grayscale), and Numbers > 256 (Full Color) are all that are supported currently
    output_image_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1024, 768, 1]`.
      Output size of returned image in X,Y, Channels 1-grayscale, 3 color (1024, 768, 1),
      channels will be updated to 3 if 'number_colors' > 256
    output_data_window: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1022, 757]`.
      Size of "DATA" window, must be equal to or smaller than 'output_image_shape', will be centered
      and use 'convergence_dots_size' for best fit to avoid overlap if possible
    name: A name for the operation (optional).

  Returns:
    A tensor of size 'output_image_shape' with the encoded 'depth_values'
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if hidden_surface_removal is None:
      hidden_surface_removal = True
    hidden_surface_removal = _execute.make_bool(hidden_surface_removal, "hidden_surface_removal")
    if convergence_dots_size is None:
      convergence_dots_size = 8
    convergence_dots_size = _execute.make_int(convergence_dots_size, "convergence_dots_size")
    if dots_per_inch is None:
      dots_per_inch = 72
    dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
    if eye_separation is None:
      eye_separation = 2.5
    eye_separation = _execute.make_float(eye_separation, "eye_separation")
    if mu is None:
      mu = 0.3333
    mu = _execute.make_float(mu, "mu")
    if normalize is None:
      normalize = True
    normalize = _execute.make_bool(normalize, "normalize")
    if normalize_max is None:
      normalize_max = -100
    normalize_max = _execute.make_float(normalize_max, "normalize_max")
    if normalize_min is None:
      normalize_min = 100
    normalize_min = _execute.make_float(normalize_min, "normalize_min")
    if border_level is None:
      border_level = 0
    border_level = _execute.make_float(border_level, "border_level")
    if number_colors is None:
      number_colors = 256
    number_colors = _execute.make_int(number_colors, "number_colors")
    if output_image_shape is None:
      output_image_shape = [1024, 768, 1]
    output_image_shape = _execute.make_shape(output_image_shape, "output_image_shape")
    if output_data_window is None:
      output_data_window = [1022, 757]
    output_data_window = _execute.make_shape(output_data_window, "output_data_window")
    _, _, _op = _op_def_lib._apply_op_helper(
        "SingleImageRandomDotStereograms", depth_values=depth_values,
        hidden_surface_removal=hidden_surface_removal,
        convergence_dots_size=convergence_dots_size,
        dots_per_inch=dots_per_inch, eye_separation=eye_separation, mu=mu,
        normalize=normalize, normalize_max=normalize_max,
        normalize_min=normalize_min, border_level=border_level,
        number_colors=number_colors, output_image_shape=output_image_shape,
        output_data_window=output_data_window, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "hidden_surface_removal",
              _op.get_attr("hidden_surface_removal"), "convergence_dots_size",
              _op.get_attr("convergence_dots_size"), "dots_per_inch",
              _op.get_attr("dots_per_inch"), "eye_separation",
              _op.get_attr("eye_separation"), "mu", _op.get_attr("mu"),
              "normalize", _op.get_attr("normalize"), "normalize_max",
              _op.get_attr("normalize_max"), "normalize_min",
              _op.get_attr("normalize_min"), "border_level",
              _op.get_attr("border_level"), "number_colors",
              _op.get_attr("number_colors"), "output_image_shape",
              _op.get_attr("output_image_shape"), "output_data_window",
              _op.get_attr("output_data_window"))
    _execute.record_gradient(
      "SingleImageRandomDotStereograms", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "SingleImageRandomDotStereograms", name,
        _ctx._post_execution_callbacks, depth_values,
        "hidden_surface_removal", hidden_surface_removal,
        "convergence_dots_size", convergence_dots_size, "dots_per_inch",
        dots_per_inch, "eye_separation", eye_separation, "mu", mu,
        "normalize", normalize, "normalize_max", normalize_max,
        "normalize_min", normalize_min, "border_level", border_level,
        "number_colors", number_colors, "output_image_shape",
        output_image_shape, "output_data_window", output_data_window)
      return _result
    except _core._FallbackException:
      return single_image_random_dot_stereograms_eager_fallback(
          depth_values, hidden_surface_removal=hidden_surface_removal,
          convergence_dots_size=convergence_dots_size,
          dots_per_inch=dots_per_inch, eye_separation=eye_separation, mu=mu,
          normalize=normalize, normalize_max=normalize_max,
          normalize_min=normalize_min, border_level=border_level,
          number_colors=number_colors, output_image_shape=output_image_shape,
          output_data_window=output_data_window, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #25
0
def _uniform_candidate_sampler(true_classes,
                               num_true,
                               num_sampled,
                               unique,
                               range_max,
                               seed=0,
                               seed2=0,
                               name=None):
    r"""Generates labels for candidate sampling with a uniform distribution.

  See explanations of candidate sampling and the data formats at

  go/candidate-sampling.

  

  For each batch, this op picks a single set of sampled candidate labels.

  

  The advantages of sampling candidates per-batch are simplicity and the

  possibility of efficient dense matrix multiplication. The disadvantage is that

  the sampled candidates must be chosen independently of the context and of the

  true labels.

  Args:
    true_classes: A `Tensor` of type `int64`.
      A batch_size * num_true matrix, in which each row contains the

      IDs of the num_true target_classes in the corresponding original label.
    num_true: An `int` that is `>= 1`. Number of true labels per context.
    num_sampled: An `int` that is `>= 1`.
      Number of candidates to randomly sample.
    unique: A `bool`.
      If unique is true, we sample with rejection, so that all sampled

      candidates in a batch are unique. This requires some approximation to

      estimate the post-rejection sampling probabilities.
    range_max: An `int` that is `>= 1`.
      The sampler will sample integers from the interval [0, range_max).
    seed: An optional `int`. Defaults to `0`.
      If either seed or seed2 are set to be non-zero, the random number

      generator is seeded by the given seed.  Otherwise, it is seeded by a

      random seed.
    seed2: An optional `int`. Defaults to `0`.
      An second seed to avoid seed collision.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count).

    sampled_candidates: A `Tensor` of type `int64`. A vector of length num_sampled, in which each element is
      the ID of a sampled candidate.
    true_expected_count: A `Tensor` of type `float32`. A batch_size * num_true matrix, representing
      the number of times each candidate is expected to occur in a batch
      of sampled candidates. If unique=true, then this is a probability.
    sampled_expected_count: A `Tensor` of type `float32`. A vector of length num_sampled, for each sampled
      candidate representing the number of times the candidate is expected
      to occur in a batch of sampled candidates.  If unique=true, then this is a
      probability.
  """
    num_true = _execute.make_int(num_true, "num_true")
    num_sampled = _execute.make_int(num_sampled, "num_sampled")
    unique = _execute.make_bool(unique, "unique")
    range_max = _execute.make_int(range_max, "range_max")
    if seed is None:
        seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
        seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("UniformCandidateSampler",
                                                 true_classes=true_classes,
                                                 num_true=num_true,
                                                 num_sampled=num_sampled,
                                                 unique=unique,
                                                 range_max=range_max,
                                                 seed=seed,
                                                 seed2=seed2,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("num_true", _op.get_attr("num_true"), "num_sampled",
                  _op.get_attr("num_sampled"),
                  "unique", _op.get_attr("unique"), "range_max",
                  _op.get_attr("range_max"), "seed", _op.get_attr("seed"),
                  "seed2", _op.get_attr("seed2"))
    else:
        true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64)
        _inputs_flat = [true_classes]
        _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique",
                  unique, "range_max", range_max, "seed", seed, "seed2", seed2)
        _result = _execute.execute(b"UniformCandidateSampler",
                                   3,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("UniformCandidateSampler", _inputs_flat, _attrs,
                             _result, name)
    _result = _UniformCandidateSamplerOutput._make(_result)
    return _result
def load_and_remap_matrix(ckpt_path,
                          old_tensor_name,
                          row_remapping,
                          col_remapping,
                          initializing_values,
                          num_rows,
                          num_cols,
                          max_rows_in_memory=-1,
                          name=None):
    r"""Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint

  at `ckpt_path` and potentially reorders its rows and columns using the

  specified remappings.

  

  Most users should use one of the wrapper initializers (such as

  `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this

  function directly.

  

  The remappings are 1-D tensors with the following properties:

  

  * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output

    matrix will be initialized from the row corresponding to index

    `row_remapping[i]` in the old `Tensor` from the checkpoint.

  * `col_remapping` must have either 0 entries (indicating that no column

    reordering is needed) or `num_cols` entries. If specified, column `j` of the

    output matrix will be initialized from the column corresponding to index

    `col_remapping[j]` in the old `Tensor` from the checkpoint.

  * A value of -1 in either of the remappings signifies a "missing" entry. In that

    case, values from the `initializing_values` tensor will be used to fill that

    missing row or column. If `row_remapping` has `r` missing entries and

    `col_remapping` has `c` missing entries, then the following condition must be

    true:

  

  `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`

  

  The remapping tensors can be generated using the GenerateVocabRemapping op.

  

  As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],

  initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing

  the value from row i, column j of the old tensor in the checkpoint, the output

  matrix will look like the following:

  

  [[w(1, 0),  w(1, 2),  0.5],

   [w(0, 0),  w(0, 2), -0.5],

   [0.25,    -0.25,      42]]

  Args:
    ckpt_path: A `Tensor` of type `string`.
      Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from

      which the old matrix `Tensor` will be loaded.
    old_tensor_name: A `Tensor` of type `string`.
      Name of the 2-D `Tensor` to load from checkpoint.
    row_remapping: A `Tensor` of type `int64`.
      An int `Tensor` of row remappings (generally created by

      `generate_vocab_remapping`).  Even if no row remapping is needed, this must

      still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted

      index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
    col_remapping: A `Tensor` of type `int64`.
      An int `Tensor` of column remappings (generally created by

      `generate_vocab_remapping`).  May be a size-0 `Tensor` if only row remapping

      is to be done (e.g. column ordering is the same).
    initializing_values: A `Tensor` of type `float32`.
      A float `Tensor` containing  values to fill in for cells

      in the output matrix that are not loaded from the checkpoint. Length must be

      exactly the same as the number of missing / new cells.
    num_rows: An `int` that is `>= 0`.
      Number of rows (length of the 1st dimension) in the output matrix.
    num_cols: An `int` that is `>= 1`.
      Number of columns (length of the 2nd dimension) in the output matrix.
    max_rows_in_memory: An optional `int`. Defaults to `-1`.
      The maximum number of rows to load from the checkpoint at

      once. If less than or equal to 0, the entire matrix will be loaded into

      memory. Setting this arg trades increased disk reads for lower memory usage.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        num_rows = _execute.make_int(num_rows, "num_rows")
        num_cols = _execute.make_int(num_cols, "num_cols")
        if max_rows_in_memory is None:
            max_rows_in_memory = -1
        max_rows_in_memory = _execute.make_int(max_rows_in_memory,
                                               "max_rows_in_memory")
        _, _, _op = _op_def_lib._apply_op_helper(
            "LoadAndRemapMatrix",
            ckpt_path=ckpt_path,
            old_tensor_name=old_tensor_name,
            row_remapping=row_remapping,
            col_remapping=col_remapping,
            initializing_values=initializing_values,
            num_rows=num_rows,
            num_cols=num_cols,
            max_rows_in_memory=max_rows_in_memory,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("num_rows", _op.get_attr("num_rows"), "num_cols",
                  _op.get_attr("num_cols"), "max_rows_in_memory",
                  _op.get_attr("max_rows_in_memory"))
        _execute.record_gradient("LoadAndRemapMatrix", _inputs_flat, _attrs,
                                 _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "LoadAndRemapMatrix", name, _ctx._post_execution_callbacks,
                ckpt_path, old_tensor_name, row_remapping, col_remapping,
                initializing_values, "num_rows", num_rows, "num_cols",
                num_cols, "max_rows_in_memory", max_rows_in_memory)
            return _result
        except _core._FallbackException:
            return load_and_remap_matrix_eager_fallback(
                ckpt_path,
                old_tensor_name,
                row_remapping,
                col_remapping,
                initializing_values,
                num_rows=num_rows,
                num_cols=num_cols,
                max_rows_in_memory=max_rows_in_memory,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #27
0
def write_image_summary(writer,
                        step,
                        tag,
                        tensor,
                        bad_color,
                        max_images=3,
                        name=None):
    r"""TODO: add doc.

  Args:
    writer: A `Tensor` of type `resource`.
    step: A `Tensor` of type `int64`.
    tag: A `Tensor` of type `string`.
    tensor: A `Tensor`. Must be one of the following types: `uint8`, `float32`, `half`.
    bad_color: A `Tensor` of type `uint8`.
    max_images: An optional `int` that is `>= 1`. Defaults to `3`.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "WriteImageSummary", name, _ctx._post_execution_callbacks,
                writer, step, tag, tensor, bad_color, "max_images", max_images)
            return _result
        except _core._FallbackException:
            try:
                return write_image_summary_eager_fallback(
                    writer,
                    step,
                    tag,
                    tensor,
                    bad_color,
                    max_images=max_images,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if max_images is None:
        max_images = 3
    max_images = _execute.make_int(max_images, "max_images")
    _, _, _op = _op_def_lib._apply_op_helper("WriteImageSummary",
                                             writer=writer,
                                             step=step,
                                             tag=tag,
                                             tensor=tensor,
                                             bad_color=bad_color,
                                             max_images=max_images,
                                             name=name)
    return _op
    _result = None
    return _result
def decode_wav(contents, desired_channels=-1, desired_samples=-1, name=None):
    r"""Decode a 16-bit PCM WAV file to a float tensor.

  The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.

  When desired_channels is set, if the input contains fewer channels than this
  then the last channel will be duplicated to give the requested number, else if
  the input has more channels than requested then the additional channels will be
  ignored.

  If desired_samples is set, then the audio will be cropped or padded with zeroes
  to the requested length.

  The first output contains a Tensor with the content of the audio samples. The
  lowest dimension will be the number of channels, and the second will be the
  number of samples. For example, a ten-sample-long stereo WAV file should give an
  output shape of [10, 2].

  Args:
    contents: A `Tensor` of type `string`.
      The WAV-encoded audio, usually from a file.
    desired_channels: An optional `int`. Defaults to `-1`.
      Number of sample channels wanted.
    desired_samples: An optional `int`. Defaults to `-1`.
      Length of audio requested.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (audio, sample_rate).

    audio: A `Tensor` of type `float32`.
    sample_rate: A `Tensor` of type `int32`.
  """
    _ctx = _context._context or _context.context()
    tld = _ctx._thread_local_data
    if tld.is_eager:
        try:
            _result = pywrap_tfe.TFE_Py_FastPathExecute(
                _ctx._context_handle, tld.device_name, "DecodeWav", name,
                tld.op_callbacks, contents, "desired_channels",
                desired_channels, "desired_samples", desired_samples)
            _result = _DecodeWavOutput._make(_result)
            return _result
        except _core._NotOkStatusException as e:
            _ops.raise_from_not_ok_status(e, name)
        except _core._FallbackException:
            pass
        try:
            return decode_wav_eager_fallback(contents,
                                             desired_channels=desired_channels,
                                             desired_samples=desired_samples,
                                             name=name,
                                             ctx=_ctx)
        except _core._SymbolicException:
            pass  # Add nodes to the TensorFlow graph.
        except (TypeError, ValueError):
            result = _dispatch.dispatch(
                decode_wav, (),
                dict(contents=contents,
                     desired_channels=desired_channels,
                     desired_samples=desired_samples,
                     name=name))
            if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                return result
            raise
    # Add nodes to the TensorFlow graph.
    if desired_channels is None:
        desired_channels = -1
    desired_channels = _execute.make_int(desired_channels, "desired_channels")
    if desired_samples is None:
        desired_samples = -1
    desired_samples = _execute.make_int(desired_samples, "desired_samples")
    try:
        _, _, _op, _outputs = _op_def_library._apply_op_helper(
            "DecodeWav",
            contents=contents,
            desired_channels=desired_channels,
            desired_samples=desired_samples,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(
            decode_wav, (),
            dict(contents=contents,
                 desired_channels=desired_channels,
                 desired_samples=desired_samples,
                 name=name))
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _outputs[:]
    if _execute.must_record_gradient():
        _attrs = ("desired_channels", _op._get_attr_int("desired_channels"),
                  "desired_samples", _op._get_attr_int("desired_samples"))
        _inputs_flat = _op.inputs
        _execute.record_gradient("DecodeWav", _inputs_flat, _attrs, _result)
    _result = _DecodeWavOutput._make(_result)
    return _result
Exemple #29
0
def try_rpc(address,
            method,
            request,
            protocol="",
            fail_fast=True,
            timeout_in_ms=0,
            name=None):
    r"""TODO: add doc.

  Args:
    address: A `Tensor` of type `string`.
    method: A `Tensor` of type `string`.
    request: A `Tensor` of type `string`.
    protocol: An optional `string`. Defaults to `""`.
    fail_fast: An optional `bool`. Defaults to `True`.
    timeout_in_ms: An optional `int`. Defaults to `0`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (response, status_code, status_message).

    response: A `Tensor` of type `string`.
    status_code: A `Tensor` of type `int32`.
    status_message: A `Tensor` of type `string`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if protocol is None:
            protocol = ""
        protocol = _execute.make_str(protocol, "protocol")
        if fail_fast is None:
            fail_fast = True
        fail_fast = _execute.make_bool(fail_fast, "fail_fast")
        if timeout_in_ms is None:
            timeout_in_ms = 0
        timeout_in_ms = _execute.make_int(timeout_in_ms, "timeout_in_ms")
        _, _, _op = _op_def_lib._apply_op_helper("TryRpc",
                                                 address=address,
                                                 method=method,
                                                 request=request,
                                                 protocol=protocol,
                                                 fail_fast=fail_fast,
                                                 timeout_in_ms=timeout_in_ms,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("protocol", _op.get_attr("protocol"), "fail_fast",
                  _op.get_attr("fail_fast"), "timeout_in_ms",
                  _op.get_attr("timeout_in_ms"))
        _execute.record_gradient("TryRpc", _inputs_flat, _attrs, _result, name)
        _result = _TryRpcOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "TryRpc", name, _ctx._post_execution_callbacks, address,
                method, request, "protocol", protocol, "fail_fast", fail_fast,
                "timeout_in_ms", timeout_in_ms)
            _result = _TryRpcOutput._make(_result)
            return _result
        except _core._FallbackException:
            return try_rpc_eager_fallback(address,
                                          method,
                                          request,
                                          protocol=protocol,
                                          fail_fast=fail_fast,
                                          timeout_in_ms=timeout_in_ms,
                                          name=name,
                                          ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
def process_input_v4(tree_handle, stats_handle, input_data, sparse_input_indices, sparse_input_values, sparse_input_shape, input_labels, input_weights, leaf_ids, random_seed, input_spec, params, name=None):
  r"""Add labels to stats after traversing the tree for each example.

  Outputs node ids that are finished.

  Args:
    tree_handle: A `Tensor` of type `resource`. The handle to the tree.
    stats_handle: A `Tensor` of type `resource`. The handle to the stats.
    input_data: A `Tensor` of type `float32`.
      The training batch's features as a 2-d tensor; `input_data[i][j]`
      gives the j-th feature of the i-th input.
    sparse_input_indices: A `Tensor` of type `int64`.
      The indices tensor from the SparseTensor input.
    sparse_input_values: A `Tensor` of type `float32`.
      The values tensor from the SparseTensor input.
    sparse_input_shape: A `Tensor` of type `int64`.
      The shape tensor from the SparseTensor input.
    input_labels: A `Tensor` of type `float32`.
      The training batch's labels as a 1 or 2-d tensor.
      'input_labels[i][j]' gives the j-th label/target for the i-th input.
    input_weights: A `Tensor` of type `float32`.
      The training batch's eample weights as a 1-d tensor.
      'input_weights[i]' gives the weight for the i-th input.
    leaf_ids: A `Tensor` of type `int32`.
      `leaf_ids[i]` is the leaf id for input i.
    random_seed: An `int`.
    input_spec: A `string`.
    params: A `string`. A serialized TensorForestParams proto.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `int32`.
    A 1-d tensor of node ids that have finished and are ready to
    grow.
  """
  random_seed = _execute.make_int(random_seed, "random_seed")
  input_spec = _execute.make_str(input_spec, "input_spec")
  params = _execute.make_str(params, "params")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "ProcessInputV4", tree_handle=tree_handle, stats_handle=stats_handle,
        input_data=input_data, sparse_input_indices=sparse_input_indices,
        sparse_input_values=sparse_input_values,
        sparse_input_shape=sparse_input_shape, input_labels=input_labels,
        input_weights=input_weights, leaf_ids=leaf_ids,
        random_seed=random_seed, input_spec=input_spec, params=params,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("random_seed", _op.get_attr("random_seed"), "input_spec",
              _op.get_attr("input_spec"), "params", _op.get_attr("params"))
  else:
    tree_handle = _ops.convert_to_tensor(tree_handle, _dtypes.resource)
    stats_handle = _ops.convert_to_tensor(stats_handle, _dtypes.resource)
    input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)
    sparse_input_indices = _ops.convert_to_tensor(sparse_input_indices, _dtypes.int64)
    sparse_input_values = _ops.convert_to_tensor(sparse_input_values, _dtypes.float32)
    sparse_input_shape = _ops.convert_to_tensor(sparse_input_shape, _dtypes.int64)
    input_labels = _ops.convert_to_tensor(input_labels, _dtypes.float32)
    input_weights = _ops.convert_to_tensor(input_weights, _dtypes.float32)
    leaf_ids = _ops.convert_to_tensor(leaf_ids, _dtypes.int32)
    _inputs_flat = [tree_handle, stats_handle, input_data, sparse_input_indices, sparse_input_values, sparse_input_shape, input_labels, input_weights, leaf_ids]
    _attrs = ("random_seed", random_seed, "input_spec", input_spec, "params",
              params)
    _result = _execute.execute(b"ProcessInputV4", 1, inputs=_inputs_flat,
                               attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "ProcessInputV4", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Exemple #31
0
def nccl_all_reduce(input, reduction, num_devices, shared_name, name=None):
    r"""Outputs a tensor containing the reduction across all input tensors.

  Outputs a tensor containing the reduction across all input tensors passed to ops
  within the same `shared_name.

  The graph should be constructed so if one op runs with shared_name value `c`,
  then `num_devices` ops will run with shared_name value `c`.  Failure to do so
  will cause the graph execution to fail to complete.

  input: the input to the reduction
  data: the value of the reduction across all `num_devices` devices.
  reduction: the reduction operation to perform.
  num_devices: The number of devices participating in this reduction.
  shared_name: Identifier that shared between ops of the same reduction.

  Args:
    input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`.
    reduction: A `string` from: `"min", "max", "prod", "sum"`.
    num_devices: An `int`.
    shared_name: A `string`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "NcclAllReduce", name, _ctx.post_execution_callbacks, input,
                "reduction", reduction, "num_devices", num_devices,
                "shared_name", shared_name)
            return _result
        except _core._FallbackException:
            try:
                return nccl_all_reduce_eager_fallback(input,
                                                      reduction=reduction,
                                                      num_devices=num_devices,
                                                      shared_name=shared_name,
                                                      name=name,
                                                      ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    reduction = _execute.make_str(reduction, "reduction")
    num_devices = _execute.make_int(num_devices, "num_devices")
    shared_name = _execute.make_str(shared_name, "shared_name")
    _, _, _op = _op_def_lib._apply_op_helper("NcclAllReduce",
                                             input=input,
                                             reduction=reduction,
                                             num_devices=num_devices,
                                             shared_name=shared_name,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("reduction", _op.get_attr("reduction"), "T",
              _op._get_attr_type("T"), "num_devices",
              _op.get_attr("num_devices"), "shared_name",
              _op.get_attr("shared_name"))
    _execute.record_gradient("NcclAllReduce", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Exemple #32
0
def audio_spectrogram(input,
                      window_size,
                      stride,
                      magnitude_squared=False,
                      name=None):
    r"""Produces a visualization of audio data over time.

  Spectrograms are a standard way of representing audio information as a series of

  slices of frequency information, one slice for each window of time. By joining

  these together into a sequence, they form a distinctive fingerprint of the sound

  over time.

  

  This op expects to receive audio data as an input, stored as floats in the range

  -1 to 1, together with a window width in samples, and a stride specifying how

  far to move the window between slices. From this it generates a three

  dimensional output. The lowest dimension has an amplitude value for each

  frequency during that time slice. The next dimension is time, with successive

  frequency slices. The final dimension is for the channels in the input, so a

  stereo audio input would have two here for example.

  

  This means the layout when converted and saved as an image is rotated 90 degrees

  clockwise from a typical spectrogram. Time is descending down the Y axis, and

  the frequency decreases from left to right.

  

  Each value in the result represents the square root of the sum of the real and

  imaginary parts of an FFT on the current window of samples. In this way, the

  lowest dimension represents the power of each frequency in the current window,

  and adjacent windows are concatenated in the next dimension.

  

  To get a more intuitive and visual look at what this operation does, you can run

  tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the

  resulting spectrogram as a PNG image.

  Args:
    input: A `Tensor` of type `float32`. Float representation of audio data.
    window_size: An `int`.
      How wide the input window is in samples. For the highest efficiency

      this should be a power of two, but other values are accepted.
    stride: An `int`.
      How widely apart the center of adjacent sample windows should be.
    magnitude_squared: An optional `bool`. Defaults to `False`.
      Whether to return the squared magnitude or just the

      magnitude. Using squared magnitude can avoid extra calculations.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32`.
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        window_size = _execute.make_int(window_size, "window_size")
        stride = _execute.make_int(stride, "stride")
        if magnitude_squared is None:
            magnitude_squared = False
        magnitude_squared = _execute.make_bool(magnitude_squared,
                                               "magnitude_squared")
        _, _, _op = _op_def_lib._apply_op_helper(
            "AudioSpectrogram",
            input=input,
            window_size=window_size,
            stride=stride,
            magnitude_squared=magnitude_squared,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("window_size", _op.get_attr("window_size"), "stride",
                  _op.get_attr("stride"), "magnitude_squared",
                  _op.get_attr("magnitude_squared"))
        _execute.record_gradient("AudioSpectrogram", _inputs_flat, _attrs,
                                 _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name, "AudioSpectrogram", name,
                _ctx._post_execution_callbacks, input, "window_size",
                window_size, "stride", stride, "magnitude_squared",
                magnitude_squared)
            return _result
        except _core._FallbackException:
            return audio_spectrogram_eager_fallback(
                input,
                window_size=window_size,
                stride=stride,
                magnitude_squared=magnitude_squared,
                name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #33
0
def range_encode(data, cdf, precision, name=None):
  r"""Using the provided cumulative distribution functions (CDF) inside `cdf`, returns

  a range-code of `data`.

  The shape of `cdf` should have one more axis than the shape of `data`, and the
  prefix `cdf.shape[:-1]` should be broadcastable to `data.shape`. That is, for
  every `i = 0,...,rank(data) - 1`, the op requires that either
  `cdf.shape[i] == 1` or `cdf.shape[i] == data.shape[i]`. Note that this
  broadcasting is limited in the sense that the number of axes must match, and
  broadcasts only `cdf` but not `data`.

  `data` should have an upper bound `m > 0` such that each element is an integer
  in range `[0, m)`. Then the last dimension size of `cdf` must be `m + 1`. For
  each element of `data`, the innermost strip of `cdf` is a vector representing a
  CDF. For each k = 0,...,m, `cdf[..., k] / 2^precision` is the probability that
  an outcome is less than `k` (not less than or equal to).

  ```
     cdf[..., 0] / 2^precision = Pr(data[...] < 0)
     cdf[..., 1] / 2^precision = Pr(data[...] < 1) = Pr(data[...] <= 0)
     cdf[..., 2] / 2^precision = Pr(data[...] < 2) = Pr(data[...] <= 1)
     ...
     cdf[..., m] / 2^precision = Pr(data[...] < m) = 1
  ```

  Therefore each element of `cdf` must be in `[0, 2^precision]`.

  Ideally `cdf[..., m]` should equal to `2^precision` but this is not a hard
  requirement as long as `cdf[..., m] <= 2^precision`.

  The encoded string neither contains the shape information of the encoded data
  nor a termination symbol. Therefore the shape of the encoded data must be
  explicitly provided to the decoder.

  Implementation notes:

  - Because of potential performance issues, the op does not check whether
  elements of `data` is in the correct range `[0, m)`, or if `cdf` satisfies
  monotonic increase property.

  - For the range coder to decode the encoded string correctly, the decoder should
  be able to reproduce the internal states of the encoder precisely. Otherwise,
  the decoding would fail and once an error occur, all subsequent decoded values
  are incorrect. For this reason, the range coder uses integer arithmetics and
  avoids using any floating point operations internally, and `cdf` should contain
  integers representing quantized probability mass rather than floating points.

  Args:
    data: A `Tensor` of type `int16`. An int16 tensor.
    cdf: A `Tensor` of type `int32`.
      An int32 tensor representing the CDF's of `data`. Each integer is divided
      by `2^precision` to represent a fraction.
    precision: An `int` that is `>= 1`.
      The number of bits for probability quantization. Must be <= 16.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`. A range-coded scalar string.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    precision = _execute.make_int(precision, "precision")
    _, _, _op = _op_def_lib._apply_op_helper(
        "RangeEncode", data=data, cdf=cdf, precision=precision, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("precision", _op.get_attr("precision"))
    _execute.record_gradient(
      "RangeEncode", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name, "RangeEncode",
        name, _ctx._post_execution_callbacks, data, cdf, "precision",
        precision)
      return _result
    except _core._FallbackException:
      return range_encode_eager_fallback(
          data, cdf, precision=precision, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #34
0
def collective_bcast_send(input,
                          group_size,
                          group_key,
                          instance_key,
                          shape,
                          name=None):
    r"""Broadcasts a tensor value to one or more other devices.

  Args:
    input: A `Tensor`. Must be one of the following types: `float32`, `half`, `float64`, `int32`, `int64`.
    group_size: An `int`.
    group_key: An `int`.
    instance_key: An `int`.
    shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
    _ctx = _context._context
    if _ctx is not None and _ctx._eager_context.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "CollectiveBcastSend", name, _ctx._post_execution_callbacks,
                input, "group_size", group_size, "group_key", group_key,
                "instance_key", instance_key, "shape", shape)
            return _result
        except _core._FallbackException:
            try:
                return collective_bcast_send_eager_fallback(
                    input,
                    group_size=group_size,
                    group_key=group_key,
                    instance_key=instance_key,
                    shape=shape,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    shape = _execute.make_shape(shape, "shape")
    _, _, _op = _op_def_lib._apply_op_helper("CollectiveBcastSend",
                                             input=input,
                                             group_size=group_size,
                                             group_key=group_key,
                                             instance_key=instance_key,
                                             shape=shape,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "group_size", _op.get_attr("group_size"),
              "group_key", _op.get_attr("group_key"), "instance_key",
              _op.get_attr("instance_key"), "shape", _op.get_attr("shape"))
    _execute.record_gradient("CollectiveBcastSend", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
def fused_conv2d_bias_activation_eager_fallback(conv_input,
                                                filter,
                                                bias,
                                                side_input,
                                                conv_input_scale,
                                                side_input_scale,
                                                strides,
                                                padding,
                                                data_format="NHWC",
                                                filter_format="HWIO",
                                                activation_mode="Relu",
                                                dilations=[1, 1, 1, 1],
                                                name=None,
                                                ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function fused_conv2d_bias_activation
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(strides, (list, tuple)):
        raise TypeError("Expected list for 'strides' argument to "
                        "'fused_conv2d_bias_activation' Op, not %r." % strides)
    strides = [_execute.make_int(_i, "strides") for _i in strides]
    padding = _execute.make_str(padding, "padding")
    if data_format is None:
        data_format = "NHWC"
    data_format = _execute.make_str(data_format, "data_format")
    if filter_format is None:
        filter_format = "HWIO"
    filter_format = _execute.make_str(filter_format, "filter_format")
    if activation_mode is None:
        activation_mode = "Relu"
    activation_mode = _execute.make_str(activation_mode, "activation_mode")
    if dilations is None:
        dilations = [1, 1, 1, 1]
    if not isinstance(dilations, (list, tuple)):
        raise TypeError("Expected list for 'dilations' argument to "
                        "'fused_conv2d_bias_activation' Op, not %r." %
                        dilations)
    dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
    _attr_T, _inputs_T = _execute.args_to_matching_eager(
        [conv_input, filter, side_input], _ctx)
    (conv_input, filter, side_input) = _inputs_T
    _attr_Tbias, (bias, ) = _execute.args_to_matching_eager([bias], _ctx)
    conv_input_scale = _ops.convert_to_tensor(conv_input_scale,
                                              _dtypes.float32)
    side_input_scale = _ops.convert_to_tensor(side_input_scale,
                                              _dtypes.float32)
    _inputs_flat = [
        conv_input, filter, bias, side_input, conv_input_scale,
        side_input_scale
    ]
    _attrs = ("T", _attr_T, "Tbias", _attr_Tbias, "strides", strides,
              "padding", padding, "data_format", data_format, "filter_format",
              filter_format, "activation_mode", activation_mode, "dilations",
              dilations)
    _result = _execute.execute(b"FusedConv2DBiasActivation",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("FusedConv2DBiasActivation", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
Exemple #36
0
def collective_bcast_recv(T,
                          group_size,
                          group_key,
                          instance_key,
                          shape,
                          name=None):
    r"""Receives a tensor value broadcast from another device.

  Args:
    T: A `tf.DType` from: `tf.float32, tf.half, tf.float64, tf.int32, tf.int64`.
    group_size: An `int`.
    group_key: An `int`.
    instance_key: An `int`.
    shape: A `tf.TensorShape` or list of `ints`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `T`.
  """
    _ctx = _context._context
    if _ctx is not None and _ctx._eager_context.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "CollectiveBcastRecv", name, _ctx._post_execution_callbacks,
                "T", T, "group_size", group_size, "group_key", group_key,
                "instance_key", instance_key, "shape", shape)
            return _result
        except _core._FallbackException:
            try:
                return collective_bcast_recv_eager_fallback(
                    T=T,
                    group_size=group_size,
                    group_key=group_key,
                    instance_key=instance_key,
                    shape=shape,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    T = _execute.make_type(T, "T")
    group_size = _execute.make_int(group_size, "group_size")
    group_key = _execute.make_int(group_key, "group_key")
    instance_key = _execute.make_int(instance_key, "instance_key")
    shape = _execute.make_shape(shape, "shape")
    _, _, _op = _op_def_lib._apply_op_helper("CollectiveBcastRecv",
                                             T=T,
                                             group_size=group_size,
                                             group_key=group_key,
                                             instance_key=instance_key,
                                             shape=shape,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "group_size", _op.get_attr("group_size"),
              "group_key", _op.get_attr("group_key"), "instance_key",
              _op.get_attr("instance_key"), "shape", _op.get_attr("shape"))
    _execute.record_gradient("CollectiveBcastRecv", _inputs_flat, _attrs,
                             _result, name)
    _result, = _result
    return _result
def fused_conv2d_bias_activation(conv_input,
                                 filter,
                                 bias,
                                 side_input,
                                 conv_input_scale,
                                 side_input_scale,
                                 strides,
                                 padding,
                                 data_format="NHWC",
                                 filter_format="HWIO",
                                 activation_mode="Relu",
                                 dilations=[1, 1, 1, 1],
                                 name=None):
    r"""    Computes a fused kernel which implements: 2-D convolution, adds side input,

      with separate scaling on convolution and side inputs, then adds bias and
      applies the RELU activation function to the result. Supports both float and
      qint8 data formats. In the case of qint8, the output is clipped to [0..127].

      conv_input: A tensor with format as specified by `data_format` (see below).
      filter: A tensor with format depending on `data_format` as follows:
          "NHWC", "NCHW":
               `float [ filter_height, filter_width, in_channels, out_channels ]`
          "NCHW_VECT_C":
               `qint8 [ out_channels, in_channels, filter_height, filter_width ]`
      bias: 1-D float tensor with size matching the `out_channels` dimension of
          `filter`.
          Note: this tensor is still float, even if other inputs are qint8.
      side_input: A tensor with format as specified by `data_format` (see below).
          This tensor will be ignored and can be [] if side_input_scale == 0.
          Otherwise, the size of each dimension must match the `output` tensor.
      conv_input_scale: scalar float value to be multiplied by `conv_input`.
          (conceptually.. in reality it is applied after convolution).
      side_input_scale: scalar float value to be multiplied by `side_input`.
      output: A tensor with format as specified by `data_format` (see below).
          The dimension sizes are determined automatically based on other inputs
          and attributes.
      T: The element data type of `conv_input`, `side_input` and `output` tensors.
          Note: must match with the `data_format`.
      Tbias: The element data type of `bias`.
      strides: 1-D tensor of length 4.  The stride of the sliding window for each
          dimension of `input`. The dimension order is determined by the value of
          `data_format`, see below for details.
          Note: the stride for batch and channel dimensions must be 1.
      padding: The type of padding algorithm to use.
      data_format: A string specifying the data format of `conv_input`,
          `side_input` and `output` tensors with the following options:
          "NHWC": `float [ batch, height, width, channels ]`
          "NCHW": `float [ batch, channels, height, width ]`
          "NCHW_VECT_C":
              `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
          Note: for "NCHW_VECT_C", `channels` must be a multiple of 4.
      filter_format: A string specifying the data format of `filter`,
          "HWIO": `float [ kernel_height, kernel_width, input_channels,
                           output_channels ]`
          "OIHW_VECT_I":
              `qint8 [ output_channels, input_channels / 4,
                       kernel_height, kernel_width, input_channels % 4 ]`
      activation_mode: The activation applied to the output.
          Must be "Relu" or "None".
      dilations: 1-D tensor of length 4.  The dilation factor for each dimension
          of `input`. If set to k > 1, there will be k-1 skipped cells between
          each filter element on that dimension. The dimension order is determined
          by the value of `data_format`, see above for details. Dilations in the
          batch and depth dimensions must be 1.

  Args:
    conv_input: A `Tensor`. Must be one of the following types: `float32`, `half`, `qint8`.
    filter: A `Tensor`. Must have the same type as `conv_input`.
    bias: A `Tensor`. Must be one of the following types: `float32`, `half`.
    side_input: A `Tensor`. Must have the same type as `conv_input`.
    conv_input_scale: A `Tensor` of type `float32`.
    side_input_scale: A `Tensor` of type `float32`.
    strides: A list of `ints`.
    padding: A `string` from: `"SAME", "VALID"`.
    data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
    filter_format: An optional `string` from: `"HWIO", "OIHW", "OIHW_VECT_I"`. Defaults to `"HWIO"`.
    activation_mode: An optional `string` from: `"Relu", "None"`. Defaults to `"Relu"`.
    dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `conv_input`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if not isinstance(strides, (list, tuple)):
            raise TypeError("Expected list for 'strides' argument to "
                            "'fused_conv2d_bias_activation' Op, not %r." %
                            strides)
        strides = [_execute.make_int(_i, "strides") for _i in strides]
        padding = _execute.make_str(padding, "padding")
        if data_format is None:
            data_format = "NHWC"
        data_format = _execute.make_str(data_format, "data_format")
        if filter_format is None:
            filter_format = "HWIO"
        filter_format = _execute.make_str(filter_format, "filter_format")
        if activation_mode is None:
            activation_mode = "Relu"
        activation_mode = _execute.make_str(activation_mode, "activation_mode")
        if dilations is None:
            dilations = [1, 1, 1, 1]
        if not isinstance(dilations, (list, tuple)):
            raise TypeError("Expected list for 'dilations' argument to "
                            "'fused_conv2d_bias_activation' Op, not %r." %
                            dilations)
        dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
        _, _, _op = _op_def_lib._apply_op_helper(
            "FusedConv2DBiasActivation",
            conv_input=conv_input,
            filter=filter,
            bias=bias,
            side_input=side_input,
            conv_input_scale=conv_input_scale,
            side_input_scale=side_input_scale,
            strides=strides,
            padding=padding,
            data_format=data_format,
            filter_format=filter_format,
            activation_mode=activation_mode,
            dilations=dilations,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("T", _op.get_attr("T"), "Tbias", _op.get_attr("Tbias"),
                  "strides", _op.get_attr("strides"), "padding",
                  _op.get_attr("padding"), "data_format",
                  _op.get_attr("data_format"), "filter_format",
                  _op.get_attr("filter_format"), "activation_mode",
                  _op.get_attr("activation_mode"), "dilations",
                  _op.get_attr("dilations"))
        _execute.record_gradient("FusedConv2DBiasActivation", _inputs_flat,
                                 _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "FusedConv2DBiasActivation", name,
                _ctx._post_execution_callbacks, conv_input, filter, bias,
                side_input, conv_input_scale, side_input_scale, "strides",
                strides, "padding", padding, "data_format", data_format,
                "filter_format", filter_format, "activation_mode",
                activation_mode, "dilations", dilations)
            return _result
        except _core._FallbackException:
            return fused_conv2d_bias_activation_eager_fallback(
                conv_input,
                filter,
                bias,
                side_input,
                conv_input_scale,
                side_input_scale,
                strides=strides,
                padding=padding,
                data_format=data_format,
                filter_format=filter_format,
                activation_mode=activation_mode,
                dilations=dilations,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Exemple #38
0
def sdca_optimizer_eager_fallback(sparse_example_indices,
                                  sparse_feature_indices,
                                  sparse_feature_values,
                                  dense_features,
                                  example_weights,
                                  example_labels,
                                  sparse_indices,
                                  sparse_weights,
                                  dense_weights,
                                  example_state_data,
                                  loss_type,
                                  l1,
                                  l2,
                                  num_loss_partitions,
                                  num_inner_iterations,
                                  adaptative=True,
                                  name=None,
                                  ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function sdca_optimizer
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(sparse_example_indices, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_example_indices' argument to "
            "'sdca_optimizer' Op, not %r." % sparse_example_indices)
    _attr_num_sparse_features = len(sparse_example_indices)
    if not isinstance(sparse_feature_indices, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_feature_indices' argument to "
            "'sdca_optimizer' Op, not %r." % sparse_feature_indices)
    if len(sparse_feature_indices) != _attr_num_sparse_features:
        raise ValueError(
            "List argument 'sparse_feature_indices' to 'sdca_optimizer' Op with length %d "
            "must match length %d of argument 'sparse_example_indices'." %
            (len(sparse_feature_indices), _attr_num_sparse_features))
    if not isinstance(sparse_indices, (list, tuple)):
        raise TypeError("Expected list for 'sparse_indices' argument to "
                        "'sdca_optimizer' Op, not %r." % sparse_indices)
    if len(sparse_indices) != _attr_num_sparse_features:
        raise ValueError(
            "List argument 'sparse_indices' to 'sdca_optimizer' Op with length %d "
            "must match length %d of argument 'sparse_example_indices'." %
            (len(sparse_indices), _attr_num_sparse_features))
    if not isinstance(sparse_weights, (list, tuple)):
        raise TypeError("Expected list for 'sparse_weights' argument to "
                        "'sdca_optimizer' Op, not %r." % sparse_weights)
    if len(sparse_weights) != _attr_num_sparse_features:
        raise ValueError(
            "List argument 'sparse_weights' to 'sdca_optimizer' Op with length %d "
            "must match length %d of argument 'sparse_example_indices'." %
            (len(sparse_weights), _attr_num_sparse_features))
    if not isinstance(sparse_feature_values, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_feature_values' argument to "
            "'sdca_optimizer' Op, not %r." % sparse_feature_values)
    _attr_num_sparse_features_with_values = len(sparse_feature_values)
    if not isinstance(dense_features, (list, tuple)):
        raise TypeError("Expected list for 'dense_features' argument to "
                        "'sdca_optimizer' Op, not %r." % dense_features)
    _attr_num_dense_features = len(dense_features)
    if not isinstance(dense_weights, (list, tuple)):
        raise TypeError("Expected list for 'dense_weights' argument to "
                        "'sdca_optimizer' Op, not %r." % dense_weights)
    if len(dense_weights) != _attr_num_dense_features:
        raise ValueError(
            "List argument 'dense_weights' to 'sdca_optimizer' Op with length %d "
            "must match length %d of argument 'dense_features'." %
            (len(dense_weights), _attr_num_dense_features))
    loss_type = _execute.make_str(loss_type, "loss_type")
    l1 = _execute.make_float(l1, "l1")
    l2 = _execute.make_float(l2, "l2")
    num_loss_partitions = _execute.make_int(num_loss_partitions,
                                            "num_loss_partitions")
    num_inner_iterations = _execute.make_int(num_inner_iterations,
                                             "num_inner_iterations")
    if adaptative is None:
        adaptative = True
    adaptative = _execute.make_bool(adaptative, "adaptative")
    sparse_example_indices = _ops.convert_n_to_tensor(sparse_example_indices,
                                                      _dtypes.int64)
    sparse_feature_indices = _ops.convert_n_to_tensor(sparse_feature_indices,
                                                      _dtypes.int64)
    sparse_feature_values = _ops.convert_n_to_tensor(sparse_feature_values,
                                                     _dtypes.float32)
    dense_features = _ops.convert_n_to_tensor(dense_features, _dtypes.float32)
    example_weights = _ops.convert_to_tensor(example_weights, _dtypes.float32)
    example_labels = _ops.convert_to_tensor(example_labels, _dtypes.float32)
    sparse_indices = _ops.convert_n_to_tensor(sparse_indices, _dtypes.int64)
    sparse_weights = _ops.convert_n_to_tensor(sparse_weights, _dtypes.float32)
    dense_weights = _ops.convert_n_to_tensor(dense_weights, _dtypes.float32)
    example_state_data = _ops.convert_to_tensor(example_state_data,
                                                _dtypes.float32)
    _inputs_flat = list(sparse_example_indices) + list(
        sparse_feature_indices
    ) + list(sparse_feature_values) + list(dense_features) + [
        example_weights, example_labels
    ] + list(sparse_indices) + list(sparse_weights) + list(dense_weights) + [
        example_state_data
    ]
    _attrs = ("loss_type", loss_type, "adaptative", adaptative,
              "num_sparse_features", _attr_num_sparse_features,
              "num_sparse_features_with_values",
              _attr_num_sparse_features_with_values, "num_dense_features",
              _attr_num_dense_features, "l1", l1, "l2", l2,
              "num_loss_partitions", num_loss_partitions,
              "num_inner_iterations", num_inner_iterations)
    _result = _execute.execute(b"SdcaOptimizer",
                               _attr_num_sparse_features +
                               _attr_num_dense_features + 1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("SdcaOptimizer", _inputs_flat, _attrs, _result,
                             name)
    _result = _result[:1] + [_result[1:1 + _attr_num_sparse_features]
                             ] + _result[1 + _attr_num_sparse_features:]
    _result = _result[:2] + [_result[2:]]
    _result = _SdcaOptimizerOutput._make(_result)
    return _result
Exemple #39
0
def single_image_random_dot_stereograms_eager_fallback(
        depth_values,
        hidden_surface_removal=True,
        convergence_dots_size=8,
        dots_per_inch=72,
        eye_separation=2.5,
        mu=0.3333,
        normalize=True,
        normalize_max=-100,
        normalize_min=100,
        border_level=0,
        number_colors=256,
        output_image_shape=[1024, 768, 1],
        output_data_window=[1022, 757],
        name=None,
        ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function single_image_random_dot_stereograms
  """
    _ctx = ctx if ctx else _context.context()
    if hidden_surface_removal is None:
        hidden_surface_removal = True
    hidden_surface_removal = _execute.make_bool(hidden_surface_removal,
                                                "hidden_surface_removal")
    if convergence_dots_size is None:
        convergence_dots_size = 8
    convergence_dots_size = _execute.make_int(convergence_dots_size,
                                              "convergence_dots_size")
    if dots_per_inch is None:
        dots_per_inch = 72
    dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
    if eye_separation is None:
        eye_separation = 2.5
    eye_separation = _execute.make_float(eye_separation, "eye_separation")
    if mu is None:
        mu = 0.3333
    mu = _execute.make_float(mu, "mu")
    if normalize is None:
        normalize = True
    normalize = _execute.make_bool(normalize, "normalize")
    if normalize_max is None:
        normalize_max = -100
    normalize_max = _execute.make_float(normalize_max, "normalize_max")
    if normalize_min is None:
        normalize_min = 100
    normalize_min = _execute.make_float(normalize_min, "normalize_min")
    if border_level is None:
        border_level = 0
    border_level = _execute.make_float(border_level, "border_level")
    if number_colors is None:
        number_colors = 256
    number_colors = _execute.make_int(number_colors, "number_colors")
    if output_image_shape is None:
        output_image_shape = [1024, 768, 1]
    output_image_shape = _execute.make_shape(output_image_shape,
                                             "output_image_shape")
    if output_data_window is None:
        output_data_window = [1022, 757]
    output_data_window = _execute.make_shape(output_data_window,
                                             "output_data_window")
    _attr_T, (depth_values, ) = _execute.args_to_matching_eager([depth_values],
                                                                _ctx)
    _inputs_flat = [depth_values]
    _attrs = ("T", _attr_T, "hidden_surface_removal", hidden_surface_removal,
              "convergence_dots_size", convergence_dots_size, "dots_per_inch",
              dots_per_inch, "eye_separation", eye_separation, "mu", mu,
              "normalize", normalize, "normalize_max", normalize_max,
              "normalize_min", normalize_min, "border_level", border_level,
              "number_colors", number_colors, "output_image_shape",
              output_image_shape, "output_data_window", output_data_window)
    _result = _execute.execute(b"SingleImageRandomDotStereograms",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("SingleImageRandomDotStereograms", _inputs_flat,
                             _attrs, _result, name)
    _result, = _result
    return _result
Exemple #40
0
def tree_ensemble_used_handlers(tree_ensemble_handle, stamp_token, num_all_handlers, name=None):
  r"""Returns the mask of used handlers along with the number of non-zero elements in

  this mask. Used in feature selection.

  Args:
    tree_ensemble_handle: A `Tensor` of type `resource`.
      Handle to the tree ensemble.
    stamp_token: A `Tensor` of type `int64`.
      Token to use as the new value of the resource stamp.
    num_all_handlers: An `int` that is `>= 0`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (num_used_handlers, used_handlers_mask).

    num_used_handlers: A `Tensor` of type `int64`. number of feature column handlers used in the model.
    used_handlers_mask: A `Tensor` of type `bool`. A boolean vector of showing which handlers are used in the
      model.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "TreeEnsembleUsedHandlers", name, _ctx.post_execution_callbacks,
        tree_ensemble_handle, stamp_token, "num_all_handlers",
        num_all_handlers)
      _result = _TreeEnsembleUsedHandlersOutput._make(_result)
      return _result
    except _core._FallbackException:
      try:
        return tree_ensemble_used_handlers_eager_fallback(
            tree_ensemble_handle, stamp_token,
            num_all_handlers=num_all_handlers, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              tree_ensemble_used_handlers, tree_ensemble_handle=tree_ensemble_handle,
                                           stamp_token=stamp_token,
                                           num_all_handlers=num_all_handlers,
                                           name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  num_all_handlers = _execute.make_int(num_all_handlers, "num_all_handlers")
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "TreeEnsembleUsedHandlers", tree_ensemble_handle=tree_ensemble_handle,
                                    stamp_token=stamp_token,
                                    num_all_handlers=num_all_handlers,
                                    name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          tree_ensemble_used_handlers, tree_ensemble_handle=tree_ensemble_handle,
                                       stamp_token=stamp_token,
                                       num_all_handlers=num_all_handlers,
                                       name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("num_all_handlers", _op.get_attr("num_all_handlers"))
  _execute.record_gradient(
      "TreeEnsembleUsedHandlers", _inputs_flat, _attrs, _result, name)
  _result = _TreeEnsembleUsedHandlersOutput._make(_result)
  return _result
Exemple #41
0
def single_image_random_dot_stereograms(depth_values,
                                        hidden_surface_removal=True,
                                        convergence_dots_size=8,
                                        dots_per_inch=72,
                                        eye_separation=2.5,
                                        mu=0.3333,
                                        normalize=True,
                                        normalize_max=-100,
                                        normalize_min=100,
                                        border_level=0,
                                        number_colors=256,
                                        output_image_shape=[1024, 768, 1],
                                        output_data_window=[1022, 757],
                                        name=None):
    r"""Outputs a single image random dot stereogram for export via encode_PNG/JPG OP.

  Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
  encode 3-D data into a 2-D image.  The output of this Op is suitable for the
  encode_PNG/JPG ops.  Be careful with image compression as this may corrupt the
  encode 3-D data within the image.

  This Op is based upon:
  'http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper'

  Example use which outputs a SIRDS image as picture_out.png:
  ```python
  img=[[1,2,3,3,2,1],
       [1,2,3,4,5,2],
       [1,2,3,4,5,3],
       [1,2,3,4,5,4],
       [6,5,4,4,5,5]]

  session = tf.InteractiveSession()

  sirds = single_image_random_dot_stereograms(img,convergence_dots_size=8,number_colors=256,normalize=True)

  out = sirds.eval()

  png = tf.image.encode_png(out).eval()

  with open('picture_out.png', 'wb') as f:
      f.write(png)
  ```

  Args:
    depth_values: A `Tensor`. Must be one of the following types: `float64`, `float32`, `int64`, `int32`.
      Z values of data to encode into 'output_data_window' window,
      lower values are further away {0.0 floor(far), 1.0 ceiling(near) after normalization}, must be 2-D tensor
    hidden_surface_removal: An optional `bool`. Defaults to `True`.
      Activate hidden surface removal
    convergence_dots_size: An optional `int`. Defaults to `8`.
      Black dot size in pixels to help view converge image, drawn on bottom of image
    dots_per_inch: An optional `int`. Defaults to `72`.
      Output device in dots/inch
    eye_separation: An optional `float`. Defaults to `2.5`.
      Separation between eyes in inches
    mu: An optional `float`. Defaults to `0.3333`.
      Depth of field, Fraction of viewing distance (eg. 1/3 = .3333)
    normalize: An optional `bool`. Defaults to `True`.
      Normalize input data to [0.0, 1.0]
    normalize_max: An optional `float`. Defaults to `-100`.
      Fix MAX value for Normalization - if < MIN, autoscale
    normalize_min: An optional `float`. Defaults to `100`.
      Fix MIN value for Normalization - if > MAX, autoscale
    border_level: An optional `float`. Defaults to `0`.
      Value of border depth 0.0 {far} to 1.0 {near}
    number_colors: An optional `int`. Defaults to `256`.
      2 (Black & White),256 (grayscale), and Numbers > 256 (Full Color) are all that are supported currently
    output_image_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1024, 768, 1]`.
      Output size of returned image in X,Y, Channels 1-grayscale, 3 color (1024, 768, 1),
      channels will be updated to 3 if 'number_colors' > 256
    output_data_window: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1022, 757]`.
      Size of "DATA" window, must be equal to or smaller than 'output_image_shape', will be centered
      and use 'convergence_dots_size' for best fit to avoid overlap if possible
    name: A name for the operation (optional).

  Returns:
    A tensor of size 'output_image_shape' with the encoded 'depth_values'
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "SingleImageRandomDotStereograms", name,
                _ctx._post_execution_callbacks, depth_values,
                "hidden_surface_removal", hidden_surface_removal,
                "convergence_dots_size", convergence_dots_size,
                "dots_per_inch", dots_per_inch, "eye_separation",
                eye_separation, "mu", mu, "normalize", normalize,
                "normalize_max", normalize_max, "normalize_min", normalize_min,
                "border_level", border_level, "number_colors", number_colors,
                "output_image_shape", output_image_shape, "output_data_window",
                output_data_window)
            return _result
        except _core._FallbackException:
            try:
                return single_image_random_dot_stereograms_eager_fallback(
                    depth_values,
                    hidden_surface_removal=hidden_surface_removal,
                    convergence_dots_size=convergence_dots_size,
                    dots_per_inch=dots_per_inch,
                    eye_separation=eye_separation,
                    mu=mu,
                    normalize=normalize,
                    normalize_max=normalize_max,
                    normalize_min=normalize_min,
                    border_level=border_level,
                    number_colors=number_colors,
                    output_image_shape=output_image_shape,
                    output_data_window=output_data_window,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
            except (TypeError, ValueError):
                result = _dispatch.dispatch(
                    single_image_random_dot_stereograms,
                    depth_values=depth_values,
                    hidden_surface_removal=hidden_surface_removal,
                    convergence_dots_size=convergence_dots_size,
                    dots_per_inch=dots_per_inch,
                    eye_separation=eye_separation,
                    mu=mu,
                    normalize=normalize,
                    normalize_max=normalize_max,
                    normalize_min=normalize_min,
                    border_level=border_level,
                    number_colors=number_colors,
                    output_image_shape=output_image_shape,
                    output_data_window=output_data_window,
                    name=name)
                if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                    return result
                raise
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if hidden_surface_removal is None:
        hidden_surface_removal = True
    hidden_surface_removal = _execute.make_bool(hidden_surface_removal,
                                                "hidden_surface_removal")
    if convergence_dots_size is None:
        convergence_dots_size = 8
    convergence_dots_size = _execute.make_int(convergence_dots_size,
                                              "convergence_dots_size")
    if dots_per_inch is None:
        dots_per_inch = 72
    dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
    if eye_separation is None:
        eye_separation = 2.5
    eye_separation = _execute.make_float(eye_separation, "eye_separation")
    if mu is None:
        mu = 0.3333
    mu = _execute.make_float(mu, "mu")
    if normalize is None:
        normalize = True
    normalize = _execute.make_bool(normalize, "normalize")
    if normalize_max is None:
        normalize_max = -100
    normalize_max = _execute.make_float(normalize_max, "normalize_max")
    if normalize_min is None:
        normalize_min = 100
    normalize_min = _execute.make_float(normalize_min, "normalize_min")
    if border_level is None:
        border_level = 0
    border_level = _execute.make_float(border_level, "border_level")
    if number_colors is None:
        number_colors = 256
    number_colors = _execute.make_int(number_colors, "number_colors")
    if output_image_shape is None:
        output_image_shape = [1024, 768, 1]
    output_image_shape = _execute.make_shape(output_image_shape,
                                             "output_image_shape")
    if output_data_window is None:
        output_data_window = [1022, 757]
    output_data_window = _execute.make_shape(output_data_window,
                                             "output_data_window")
    try:
        _, _, _op = _op_def_lib._apply_op_helper(
            "SingleImageRandomDotStereograms",
            depth_values=depth_values,
            hidden_surface_removal=hidden_surface_removal,
            convergence_dots_size=convergence_dots_size,
            dots_per_inch=dots_per_inch,
            eye_separation=eye_separation,
            mu=mu,
            normalize=normalize,
            normalize_max=normalize_max,
            normalize_min=normalize_min,
            border_level=border_level,
            number_colors=number_colors,
            output_image_shape=output_image_shape,
            output_data_window=output_data_window,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(
            single_image_random_dot_stereograms,
            depth_values=depth_values,
            hidden_surface_removal=hidden_surface_removal,
            convergence_dots_size=convergence_dots_size,
            dots_per_inch=dots_per_inch,
            eye_separation=eye_separation,
            mu=mu,
            normalize=normalize,
            normalize_max=normalize_max,
            normalize_min=normalize_min,
            border_level=border_level,
            number_colors=number_colors,
            output_image_shape=output_image_shape,
            output_data_window=output_data_window,
            name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "hidden_surface_removal",
              _op.get_attr("hidden_surface_removal"), "convergence_dots_size",
              _op.get_attr("convergence_dots_size"), "dots_per_inch",
              _op.get_attr("dots_per_inch"), "eye_separation",
              _op.get_attr("eye_separation"), "mu", _op.get_attr("mu"),
              "normalize", _op.get_attr("normalize"), "normalize_max",
              _op.get_attr("normalize_max"), "normalize_min",
              _op.get_attr("normalize_min"), "border_level",
              _op.get_attr("border_level"), "number_colors",
              _op.get_attr("number_colors"), "output_image_shape",
              _op.get_attr("output_image_shape"), "output_data_window",
              _op.get_attr("output_data_window"))
    _execute.record_gradient("SingleImageRandomDotStereograms", _inputs_flat,
                             _attrs, _result, name)
    _result, = _result
    return _result
Exemple #42
0
def range_encode(data, cdf, precision, name=None):
  r"""Using the provided cumulative distribution functions (CDF) inside `cdf`, returns

  a range-code of `data`.

  The shape of `cdf` should have one more axis than the shape of `data`, and the
  prefix `cdf.shape[:-1]` should be broadcastable to `data.shape`. That is, for
  every `i = 0,...,rank(data) - 1`, the op requires that either
  `cdf.shape[i] == 1` or `cdf.shape[i] == data.shape[i]`. Note that this
  broadcasting is limited in the sense that the number of axes must match, and
  broadcasts only `cdf` but not `data`.

  `data` should have an upper bound `m > 0` such that each element is an integer
  in range `[0, m)`. Then the last dimension size of `cdf` must be `m + 1`. For
  each element of `data`, the innermost strip of `cdf` is a vector representing a
  CDF. For each k = 0,...,m, `cdf[..., k] / 2^precision` is the probability that
  an outcome is less than `k` (not less than or equal to).

  ```
     cdf[..., 0] / 2^precision = Pr(data[...] < 0)
     cdf[..., 1] / 2^precision = Pr(data[...] < 1) = Pr(data[...] <= 0)
     cdf[..., 2] / 2^precision = Pr(data[...] < 2) = Pr(data[...] <= 1)
     ...
     cdf[..., m] / 2^precision = Pr(data[...] < m) = 1
  ```

  Therefore each element of `cdf` must be in `[0, 2^precision]`.

  Ideally `cdf[..., m]` should equal to `2^precision` but this is not a hard
  requirement as long as `cdf[..., m] <= 2^precision`.

  The encoded string neither contains the shape information of the encoded data
  nor a termination symbol. Therefore the shape of the encoded data must be
  explicitly provided to the decoder.

  Implementation notes:

  - Because of potential performance issues, the op does not check whether
  elements of `data` is in the correct range `[0, m)`, or if `cdf` satisfies
  monotonic increase property.

  - For the range coder to decode the encoded string correctly, the decoder should
  be able to reproduce the internal states of the encoder precisely. Otherwise,
  the decoding would fail and once an error occur, all subsequent decoded values
  are incorrect. For this reason, the range coder uses integer arithmetics and
  avoids using any floating point operations internally, and `cdf` should contain
  integers representing quantized probability mass rather than floating points.

  Args:
    data: A `Tensor` of type `int16`. An int32 tensor.
    cdf: A `Tensor` of type `int32`.
      An int32 tensor representing the CDF's of `data`. Each integer is divided
      by `2^precision` to represent a fraction.
    precision: An `int` that is `>= 1`.
      The number of bits for probability quantization. Must be <= 16.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`. A range-coded scalar string.
  """
  precision = _execute.make_int(precision, "precision")
  _ctx = _context.context()
  if _ctx.in_graph_mode():
    _, _, _op = _op_def_lib._apply_op_helper(
        "RangeEncode", data=data, cdf=cdf, precision=precision, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("precision", _op.get_attr("precision"))
  else:
    data = _ops.convert_to_tensor(data, _dtypes.int16)
    cdf = _ops.convert_to_tensor(cdf, _dtypes.int32)
    _inputs_flat = [data, cdf]
    _attrs = ("precision", precision)
    _result = _execute.execute(b"RangeEncode", 1, inputs=_inputs_flat,
                               attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "RangeEncode", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
def sparse_feature_cross(indices, values, shapes, dense, hashed_output, num_buckets, out_type, internal_type, name=None):
  r"""Generates sparse cross form a list of sparse tensors.

  The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
  representing features of one feature column. It outputs a 2D `SparseTensor` with
  the batchwise crosses of these features.

  For example, if the inputs are

      inputs[0]: SparseTensor with shape = [2, 2]
      [0, 0]: "a"
      [1, 0]: "b"
      [1, 1]: "c"

      inputs[1]: SparseTensor with shape = [2, 1]
      [0, 0]: "d"
      [1, 0]: "e"

      inputs[2]: Tensor [["f"], ["g"]]

  then the output will be

      shape = [2, 2]
      [0, 0]: "a_X_d_X_f"
      [1, 0]: "b_X_e_X_g"
      [1, 1]: "c_X_e_X_g"

  if hashed_output=true then the output will be

      shape = [2, 2]
      [0, 0]: HashCombine(
                  Fingerprint64("f"), HashCombine(
                      Fingerprint64("d"), Fingerprint64("a")))
      [1, 0]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("b")))
      [1, 1]: HashCombine(
                  Fingerprint64("g"), HashCombine(
                      Fingerprint64("e"), Fingerprint64("c")))

  Args:
    indices: A list of `Tensor` objects with type `int64`.
      2-D.  Indices of each input `SparseTensor`.
    values: A list of `Tensor` objects with types from: `int64`, `string`.
      1-D.   values of each `SparseTensor`.
    shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
      1-D.   Shapes of each `SparseTensor`.
    dense: A list of `Tensor` objects with types from: `int64`, `string`.
      2-D.    Columns represented by dense `Tensor`.
    hashed_output: A `bool`.
    num_buckets: An `int` that is `>= 0`.
    out_type: A `tf.DType` from: `tf.int64, tf.string`.
    internal_type: A `tf.DType` from: `tf.int64, tf.string`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (output_indices, output_values, output_shape).

    output_indices: A `Tensor` of type `int64`. 2-D.  Indices of the concatenated `SparseTensor`.
    output_values: A `Tensor` of type `out_type`. 1-D.  Non-empty values of the concatenated or hashed
      `SparseTensor`.
    output_shape: A `Tensor` of type `int64`. 1-D.  Shape of the concatenated `SparseTensor`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    if not isinstance(indices, (list, tuple)):
      raise TypeError(
          "Expected list for 'indices' argument to "
          "'sparse_feature_cross' Op, not %r." % indices)
    _attr_N = len(indices)
    if not isinstance(shapes, (list, tuple)):
      raise TypeError(
          "Expected list for 'shapes' argument to "
          "'sparse_feature_cross' Op, not %r." % shapes)
    if len(shapes) != _attr_N:
      raise ValueError(
          "List argument 'shapes' to 'sparse_feature_cross' Op with length %d "
          "must match length %d of argument 'indices'." %
          (len(shapes), _attr_N))
    hashed_output = _execute.make_bool(hashed_output, "hashed_output")
    num_buckets = _execute.make_int(num_buckets, "num_buckets")
    out_type = _execute.make_type(out_type, "out_type")
    internal_type = _execute.make_type(internal_type, "internal_type")
    _, _, _op = _op_def_lib._apply_op_helper(
        "SparseFeatureCross", indices=indices, values=values, shapes=shapes,
        dense=dense, hashed_output=hashed_output, num_buckets=num_buckets,
        out_type=out_type, internal_type=internal_type, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("N", _op.get_attr("N"), "hashed_output",
              _op.get_attr("hashed_output"), "num_buckets",
              _op.get_attr("num_buckets"), "sparse_types",
              _op.get_attr("sparse_types"), "dense_types",
              _op.get_attr("dense_types"), "out_type",
              _op.get_attr("out_type"), "internal_type",
              _op.get_attr("internal_type"))
    _execute.record_gradient(
      "SparseFeatureCross", _inputs_flat, _attrs, _result, name)
    _result = _SparseFeatureCrossOutput._make(_result)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "SparseFeatureCross", name, _ctx._post_execution_callbacks, indices,
        values, shapes, dense, "hashed_output", hashed_output, "num_buckets",
        num_buckets, "out_type", out_type, "internal_type", internal_type)
      _result = _SparseFeatureCrossOutput._make(_result)
      return _result
    except _core._FallbackException:
      return sparse_feature_cross_eager_fallback(
          indices, values, shapes, dense, hashed_output=hashed_output,
          num_buckets=num_buckets, out_type=out_type,
          internal_type=internal_type, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)