Пример #1
0
def batch(in_tensors,
          num_batch_threads,
          max_batch_size,
          batch_timeout_micros,
          grad_timeout_micros,
          max_enqueued_batches=10,
          allowed_batch_sizes=[],
          container="",
          shared_name="",
          batching_queue="",
          name=None):
    r"""Batches all input tensors nondeterministically.

  When many instances of this Op are being run concurrently with the same
  container/shared_name in the same device, some will output zero-shaped Tensors
  and others will output Tensors of size up to max_batch_size.

  All Tensors in in_tensors are batched together (so, for example, labels and
  features should be batched with a single instance of this operation.

  Each invocation of batch emits an `id` scalar which will be used to identify
  this particular invocation when doing unbatch or its gradient.

  Each op which emits a non-empty batch will also emit a non-empty batch_index
  Tensor, which, is a [K, 3] matrix where each row contains the invocation's id,
  start, and length of elements of each set of Tensors present in batched_tensors.

  Batched tensors are concatenated along the first dimension, and all tensors in
  in_tensors must have the first dimension of the same size.

  in_tensors: The tensors to be batched.
  num_batch_threads: Number of scheduling threads for processing batches of work.
   Determines the number of batches processed in parallel.
  max_batch_size: Batch sizes will never be bigger than this.
  batch_timeout_micros: Maximum number of microseconds to wait before outputting
   an incomplete batch.
  allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does
   nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
   batches up to one of those sizes. The entries must increase monotonically, and
   the final entry must equal max_batch_size.
  grad_timeout_micros: The timeout to use for the gradient. See Unbatch.
  batched_tensors: Either empty tensors or a batch of concatenated Tensors.
  batch_index: If out_tensors is non-empty, has information to invert it.
  container: Controls the scope of sharing of this batch.
  id: always contains a scalar with a unique ID for this invocation of Batch.
  shared_name: Concurrently running instances of batch in the same device with the
   same container and shared_name will batch their elements together. If left
   empty, the op name will be used as the shared name.
  T: the types of tensors to be batched.

  Args:
    in_tensors: A list of `Tensor` objects.
    num_batch_threads: An `int`.
    max_batch_size: An `int`.
    batch_timeout_micros: An `int`.
    grad_timeout_micros: An `int`.
    max_enqueued_batches: An optional `int`. Defaults to `10`.
    allowed_batch_sizes: An optional list of `ints`. Defaults to `[]`.
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    batching_queue: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (batched_tensors, batch_index, id).

    batched_tensors: A list of `Tensor` objects. Has the same type as `in_tensors`.
    batch_index: A `Tensor` of type `int64`.
    id: A `Tensor` of type `int64`.
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        num_batch_threads = _execute.make_int(num_batch_threads,
                                              "num_batch_threads")
        max_batch_size = _execute.make_int(max_batch_size, "max_batch_size")
        batch_timeout_micros = _execute.make_int(batch_timeout_micros,
                                                 "batch_timeout_micros")
        grad_timeout_micros = _execute.make_int(grad_timeout_micros,
                                                "grad_timeout_micros")
        if max_enqueued_batches is None:
            max_enqueued_batches = 10
        max_enqueued_batches = _execute.make_int(max_enqueued_batches,
                                                 "max_enqueued_batches")
        if allowed_batch_sizes is None:
            allowed_batch_sizes = []
        if not isinstance(allowed_batch_sizes, (list, tuple)):
            raise TypeError(
                "Expected list for 'allowed_batch_sizes' argument to "
                "'batch' Op, not %r." % allowed_batch_sizes)
        allowed_batch_sizes = [
            _execute.make_int(_i, "allowed_batch_sizes")
            for _i in allowed_batch_sizes
        ]
        if container is None:
            container = ""
        container = _execute.make_str(container, "container")
        if shared_name is None:
            shared_name = ""
        shared_name = _execute.make_str(shared_name, "shared_name")
        if batching_queue is None:
            batching_queue = ""
        batching_queue = _execute.make_str(batching_queue, "batching_queue")
        _, _, _op = _op_def_lib._apply_op_helper(
            "Batch",
            in_tensors=in_tensors,
            num_batch_threads=num_batch_threads,
            max_batch_size=max_batch_size,
            batch_timeout_micros=batch_timeout_micros,
            grad_timeout_micros=grad_timeout_micros,
            max_enqueued_batches=max_enqueued_batches,
            allowed_batch_sizes=allowed_batch_sizes,
            container=container,
            shared_name=shared_name,
            batching_queue=batching_queue,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("num_batch_threads",
                  _op.get_attr("num_batch_threads"), "max_batch_size",
                  _op.get_attr("max_batch_size"), "max_enqueued_batches",
                  _op.get_attr("max_enqueued_batches"), "batch_timeout_micros",
                  _op.get_attr("batch_timeout_micros"), "allowed_batch_sizes",
                  _op.get_attr("allowed_batch_sizes"), "grad_timeout_micros",
                  _op.get_attr("grad_timeout_micros"), "container",
                  _op.get_attr("container"), "shared_name",
                  _op.get_attr("shared_name"), "batching_queue",
                  _op.get_attr("batching_queue"), "T", _op.get_attr("T"))
        _execute.record_gradient("Batch", _inputs_flat, _attrs, _result, name)
        _result = [_result[:len(in_tensors)]] + _result[len(in_tensors):]
        _result = _BatchOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name, "Batch", name,
                _ctx._post_execution_callbacks, in_tensors,
                "num_batch_threads", num_batch_threads, "max_batch_size",
                max_batch_size, "max_enqueued_batches", max_enqueued_batches,
                "batch_timeout_micros", batch_timeout_micros,
                "allowed_batch_sizes", allowed_batch_sizes,
                "grad_timeout_micros", grad_timeout_micros, "container",
                container, "shared_name", shared_name, "batching_queue",
                batching_queue)
            _result = _BatchOutput._make(_result)
            return _result
        except _core._FallbackException:
            return batch_eager_fallback(
                in_tensors,
                num_batch_threads=num_batch_threads,
                max_batch_size=max_batch_size,
                max_enqueued_batches=max_enqueued_batches,
                batch_timeout_micros=batch_timeout_micros,
                allowed_batch_sizes=allowed_batch_sizes,
                grad_timeout_micros=grad_timeout_micros,
                container=container,
                shared_name=shared_name,
                batching_queue=batching_queue,
                name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #2
0
def kmeans_plus_plus_initialization(points,
                                    num_to_sample,
                                    seed,
                                    num_retries_per_sample,
                                    name=None):
    r"""Selects num_to_sample rows of input using the KMeans++ criterion.

  Rows of points are assumed to be input points. One row is selected at random.
  Subsequent rows are sampled with probability proportional to the squared L2
  distance from the nearest row selected thus far till num_to_sample rows have
  been sampled.

  Args:
    points: A `Tensor` of type `float32`.
      Matrix of shape (n, d). Rows are assumed to be input points.
    num_to_sample: A `Tensor` of type `int64`.
      Scalar. The number of rows to sample. This value must not be
      larger than n.
    seed: A `Tensor` of type `int64`.
      Scalar. Seed for initializing the random number generator.
    num_retries_per_sample: A `Tensor` of type `int64`.
      Scalar. For each row that is sampled, this parameter
      specifies the number of additional points to draw from the current
      distribution before selecting the best. If a negative value is specified, a
      heuristic is used to sample O(log(num_to_sample)) additional points.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32`.
    Matrix of shape (num_to_sample, d). The sampled rows.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        _, _, _op = _op_def_lib._apply_op_helper(
            "KmeansPlusPlusInitialization",
            points=points,
            num_to_sample=num_to_sample,
            seed=seed,
            num_retries_per_sample=num_retries_per_sample,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = None
        _execute.record_gradient("KmeansPlusPlusInitialization", _inputs_flat,
                                 _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "KmeansPlusPlusInitialization", name,
                _ctx._post_execution_callbacks, points, num_to_sample, seed,
                num_retries_per_sample)
            return _result
        except _core._FallbackException:
            return kmeans_plus_plus_initialization_eager_fallback(
                points,
                num_to_sample,
                seed,
                num_retries_per_sample,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
def roll(input, shift, axis, name=None):
    r"""Rolls the elements of a tensor along an axis.

  The elements are shifted positively (towards larger indices) by the offset of
  `shift` along the dimension of `axis`. Negative `shift` values will shift
  elements in the opposite direction. Elements that roll passed the last position
  will wrap around to the first and vice versa. Multiple shifts along multiple
  axes may be specified.

  For example:

  ```
  # 't' is [0, 1, 2, 3, 4]
  roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]

  # shifting along multiple dimensions
  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
  roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]

  # shifting along the same axis multiple times
  # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
  roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
  ```

  Args:
    input: A `Tensor`.
    shift: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
      elements are shifted positively (towards larger indices) along the dimension
      specified by `axis[i]`. Negative shifts will roll the elements in the opposite
      direction.
    axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
      `shift[i]` should occur. If the same axis is referenced more than once, the
      total shift for that axis will be the sum of all the shifts that belong to that
      axis.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "Roll", name, _ctx._post_execution_callbacks, input, shift,
                axis)
            return _result
        except _core._FallbackException:
            try:
                return roll_eager_fallback(input,
                                           shift,
                                           axis,
                                           name=name,
                                           ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    _, _, _op = _op_def_lib._apply_op_helper("Roll",
                                             input=input,
                                             shift=shift,
                                             axis=axis,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op._get_attr_type("T"), "Tshift",
              _op._get_attr_type("Tshift"), "Taxis",
              _op._get_attr_type("Taxis"))
    _execute.record_gradient("Roll", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
def fused_conv2d_bias_activation(conv_input,
                                 filter,
                                 bias,
                                 side_input,
                                 conv_input_scale,
                                 side_input_scale,
                                 strides,
                                 padding,
                                 data_format="NHWC",
                                 filter_format="HWIO",
                                 activation_mode="Relu",
                                 dilations=[1, 1, 1, 1],
                                 name=None):
    r"""    Computes a fused kernel which implements: 2-D convolution, adds side input,

      with separate scaling on convolution and side inputs, then adds bias and
      applies the RELU activation function to the result. Supports both float and
      qint8 data formats. In the case of qint8, the output is clipped to [0..127].

      conv_input: A tensor with format as specified by `data_format` (see below).
      filter: A tensor with format depending on `data_format` as follows:
          "NHWC", "NCHW":
               `float [ filter_height, filter_width, in_channels, out_channels ]`
          "NCHW_VECT_C":
               `qint8 [ out_channels, in_channels, filter_height, filter_width ]`
      bias: 1-D float tensor with size matching the `out_channels` dimension of
          `filter`.
          Note: this tensor is still float, even if other inputs are qint8.
      side_input: A tensor with format as specified by `data_format` (see below).
          This tensor will be ignored and can be [] if side_input_scale == 0.
          Otherwise, the size of each dimension must match the `output` tensor.
      conv_input_scale: scalar float value to be multiplied by `conv_input`.
          (conceptually.. in reality it is applied after convolution).
      side_input_scale: scalar float value to be multiplied by `side_input`.
      output: A tensor with format as specified by `data_format` (see below).
          The dimension sizes are determined automatically based on other inputs
          and attributes.
      T: The element data type of `conv_input`, `side_input` and `output` tensors.
          Note: must match with the `data_format`.
      Tbias: The element data type of `bias`.
      strides: 1-D tensor of length 4.  The stride of the sliding window for each
          dimension of `input`. The dimension order is determined by the value of
          `data_format`, see below for details.
          Note: the stride for batch and channel dimensions must be 1.
      padding: The type of padding algorithm to use.
      data_format: A string specifying the data format of `conv_input`,
          `side_input` and `output` tensors with the following options:
          "NHWC": `float [ batch, height, width, channels ]`
          "NCHW": `float [ batch, channels, height, width ]`
          "NCHW_VECT_C":
              `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
          Note: for "NCHW_VECT_C", `channels` must be a multiple of 4.
      filter_format: A string specifying the data format of `filter`,
          "HWIO": `float [ kernel_height, kernel_width, input_channels,
                           output_channels ]`
          "OIHW_VECT_I":
              `qint8 [ output_channels, input_channels / 4,
                       kernel_height, kernel_width, input_channels % 4 ]`
      activation_mode: The activation applied to the output.
          Must be "Relu" or "None".
      dilations: 1-D tensor of length 4.  The dilation factor for each dimension
          of `input`. If set to k > 1, there will be k-1 skipped cells between
          each filter element on that dimension. The dimension order is determined
          by the value of `data_format`, see above for details. Dilations in the
          batch and depth dimensions must be 1.

  Args:
    conv_input: A `Tensor`. Must be one of the following types: `float32`, `half`, `qint8`.
    filter: A `Tensor`. Must have the same type as `conv_input`.
    bias: A `Tensor`. Must be one of the following types: `float32`, `half`.
    side_input: A `Tensor`. Must have the same type as `conv_input`.
    conv_input_scale: A `Tensor` of type `float32`.
    side_input_scale: A `Tensor` of type `float32`.
    strides: A list of `ints`.
    padding: A `string` from: `"SAME", "VALID"`.
    data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
    filter_format: An optional `string` from: `"HWIO", "OIHW", "OIHW_VECT_I"`. Defaults to `"HWIO"`.
    activation_mode: An optional `string` from: `"Relu", "None"`. Defaults to `"Relu"`.
    dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `conv_input`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if not isinstance(strides, (list, tuple)):
            raise TypeError("Expected list for 'strides' argument to "
                            "'fused_conv2d_bias_activation' Op, not %r." %
                            strides)
        strides = [_execute.make_int(_i, "strides") for _i in strides]
        padding = _execute.make_str(padding, "padding")
        if data_format is None:
            data_format = "NHWC"
        data_format = _execute.make_str(data_format, "data_format")
        if filter_format is None:
            filter_format = "HWIO"
        filter_format = _execute.make_str(filter_format, "filter_format")
        if activation_mode is None:
            activation_mode = "Relu"
        activation_mode = _execute.make_str(activation_mode, "activation_mode")
        if dilations is None:
            dilations = [1, 1, 1, 1]
        if not isinstance(dilations, (list, tuple)):
            raise TypeError("Expected list for 'dilations' argument to "
                            "'fused_conv2d_bias_activation' Op, not %r." %
                            dilations)
        dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
        _, _, _op = _op_def_lib._apply_op_helper(
            "FusedConv2DBiasActivation",
            conv_input=conv_input,
            filter=filter,
            bias=bias,
            side_input=side_input,
            conv_input_scale=conv_input_scale,
            side_input_scale=side_input_scale,
            strides=strides,
            padding=padding,
            data_format=data_format,
            filter_format=filter_format,
            activation_mode=activation_mode,
            dilations=dilations,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("T", _op.get_attr("T"), "Tbias", _op.get_attr("Tbias"),
                  "strides", _op.get_attr("strides"), "padding",
                  _op.get_attr("padding"), "data_format",
                  _op.get_attr("data_format"), "filter_format",
                  _op.get_attr("filter_format"), "activation_mode",
                  _op.get_attr("activation_mode"), "dilations",
                  _op.get_attr("dilations"))
        _execute.record_gradient("FusedConv2DBiasActivation", _inputs_flat,
                                 _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "FusedConv2DBiasActivation", name,
                _ctx._post_execution_callbacks, conv_input, filter, bias,
                side_input, conv_input_scale, side_input_scale, "strides",
                strides, "padding", padding, "data_format", data_format,
                "filter_format", filter_format, "activation_mode",
                activation_mode, "dilations", dilations)
            return _result
        except _core._FallbackException:
            return fused_conv2d_bias_activation_eager_fallback(
                conv_input,
                filter,
                bias,
                side_input,
                conv_input_scale,
                side_input_scale,
                strides=strides,
                padding=padding,
                data_format=data_format,
                filter_format=filter_format,
                activation_mode=activation_mode,
                dilations=dilations,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #5
0
def load_and_remap_matrix(ckpt_path,
                          old_tensor_name,
                          row_remapping,
                          col_remapping,
                          initializing_values,
                          num_rows,
                          num_cols,
                          max_rows_in_memory=-1,
                          name=None):
    r"""Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint

  at `ckpt_path` and potentially reorders its rows and columns using the

  specified remappings.

  

  Most users should use one of the wrapper initializers (such as

  `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this

  function directly.

  

  The remappings are 1-D tensors with the following properties:

  

  * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output

    matrix will be initialized from the row corresponding to index

    `row_remapping[i]` in the old `Tensor` from the checkpoint.

  * `col_remapping` must have either 0 entries (indicating that no column

    reordering is needed) or `num_cols` entries. If specified, column `j` of the

    output matrix will be initialized from the column corresponding to index

    `col_remapping[j]` in the old `Tensor` from the checkpoint.

  * A value of -1 in either of the remappings signifies a "missing" entry. In that

    case, values from the `initializing_values` tensor will be used to fill that

    missing row or column. If `row_remapping` has `r` missing entries and

    `col_remapping` has `c` missing entries, then the following condition must be

    true:

  

  `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`

  

  The remapping tensors can be generated using the GenerateVocabRemapping op.

  

  As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],

  initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing

  the value from row i, column j of the old tensor in the checkpoint, the output

  matrix will look like the following:

  

  [[w(1, 0),  w(1, 2),  0.5],

   [w(0, 0),  w(0, 2), -0.5],

   [0.25,    -0.25,      42]]

  Args:
    ckpt_path: A `Tensor` of type `string`.
      Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from

      which the old matrix `Tensor` will be loaded.
    old_tensor_name: A `Tensor` of type `string`.
      Name of the 2-D `Tensor` to load from checkpoint.
    row_remapping: A `Tensor` of type `int64`.
      An int `Tensor` of row remappings (generally created by

      `generate_vocab_remapping`).  Even if no row remapping is needed, this must

      still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted

      index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
    col_remapping: A `Tensor` of type `int64`.
      An int `Tensor` of column remappings (generally created by

      `generate_vocab_remapping`).  May be a size-0 `Tensor` if only row remapping

      is to be done (e.g. column ordering is the same).
    initializing_values: A `Tensor` of type `float32`.
      A float `Tensor` containing  values to fill in for cells

      in the output matrix that are not loaded from the checkpoint. Length must be

      exactly the same as the number of missing / new cells.
    num_rows: An `int` that is `>= 0`.
      Number of rows (length of the 1st dimension) in the output matrix.
    num_cols: An `int` that is `>= 1`.
      Number of columns (length of the 2nd dimension) in the output matrix.
    max_rows_in_memory: An optional `int`. Defaults to `-1`.
      The maximum number of rows to load from the checkpoint at

      once. If less than or equal to 0, the entire matrix will be loaded into

      memory. Setting this arg trades increased disk reads for lower memory usage.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        num_rows = _execute.make_int(num_rows, "num_rows")
        num_cols = _execute.make_int(num_cols, "num_cols")
        if max_rows_in_memory is None:
            max_rows_in_memory = -1
        max_rows_in_memory = _execute.make_int(max_rows_in_memory,
                                               "max_rows_in_memory")
        _, _, _op = _op_def_lib._apply_op_helper(
            "LoadAndRemapMatrix",
            ckpt_path=ckpt_path,
            old_tensor_name=old_tensor_name,
            row_remapping=row_remapping,
            col_remapping=col_remapping,
            initializing_values=initializing_values,
            num_rows=num_rows,
            num_cols=num_cols,
            max_rows_in_memory=max_rows_in_memory,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("num_rows", _op.get_attr("num_rows"), "num_cols",
                  _op.get_attr("num_cols"), "max_rows_in_memory",
                  _op.get_attr("max_rows_in_memory"))
        _execute.record_gradient("LoadAndRemapMatrix", _inputs_flat, _attrs,
                                 _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "LoadAndRemapMatrix", name, _ctx._post_execution_callbacks,
                ckpt_path, old_tensor_name, row_remapping, col_remapping,
                initializing_values, "num_rows", num_rows, "num_cols",
                num_cols, "max_rows_in_memory", max_rows_in_memory)
            return _result
        except _core._FallbackException:
            return load_and_remap_matrix_eager_fallback(
                ckpt_path,
                old_tensor_name,
                row_remapping,
                col_remapping,
                initializing_values,
                num_rows=num_rows,
                num_cols=num_cols,
                max_rows_in_memory=max_rows_in_memory,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #6
0
def nccl_all_reduce(input, reduction, num_devices, shared_name, name=None):
    r"""Outputs a tensor containing the reduction across all input tensors.

  Outputs a tensor containing the reduction across all input tensors passed to ops
  within the same `shared_name.

  The graph should be constructed so if one op runs with shared_name value `c`,
  then `num_devices` ops will run with shared_name value `c`.  Failure to do so
  will cause the graph execution to fail to complete.

  input: the input to the reduction
  data: the value of the reduction across all `num_devices` devices.
  reduction: the reduction operation to perform.
  num_devices: The number of devices participating in this reduction.
  shared_name: Identifier that shared between ops of the same reduction.

  Args:
    input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`.
    reduction: A `string` from: `"min", "max", "prod", "sum"`.
    num_devices: An `int`.
    shared_name: A `string`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "NcclAllReduce", name, _ctx.post_execution_callbacks, input,
                "reduction", reduction, "num_devices", num_devices,
                "shared_name", shared_name)
            return _result
        except _core._FallbackException:
            try:
                return nccl_all_reduce_eager_fallback(input,
                                                      reduction=reduction,
                                                      num_devices=num_devices,
                                                      shared_name=shared_name,
                                                      name=name,
                                                      ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    reduction = _execute.make_str(reduction, "reduction")
    num_devices = _execute.make_int(num_devices, "num_devices")
    shared_name = _execute.make_str(shared_name, "shared_name")
    _, _, _op = _op_def_lib._apply_op_helper("NcclAllReduce",
                                             input=input,
                                             reduction=reduction,
                                             num_devices=num_devices,
                                             shared_name=shared_name,
                                             name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("reduction", _op.get_attr("reduction"), "T",
              _op._get_attr_type("T"), "num_devices",
              _op.get_attr("num_devices"), "shared_name",
              _op.get_attr("shared_name"))
    _execute.record_gradient("NcclAllReduce", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
def masked_matmul(a, b, mask_indices, transpose_a, transpose_b, name=None):
  r"""Computes the product a * b, but only for indices (i, j) in mask_indices. The

  result is stored in prod_values, a rank 1 tensor, such that for all i,
  prod_values[i] = (a * b)[mask_indices[i, 0], mask_indices[i, 1]].
  Note that the shapes of the input matrices a, b should be compatible (after
  transposing as specified by the arguments transpose_a and transpose_b).

  Input arguments:

  Args:
    a: A `Tensor` of type `float32`. A rank 2 tensor of shape [m, n].
    b: A `Tensor` of type `float32`.
      A rank 2 tensor of shape [s, t]. The inner dimensions of a and b should match
      after transposition.
    mask_indices: A `Tensor` of type `int64`.
      A rank 2 tensor, of shape [nnz, 2] where nnz is the number of
      non-zero elements in the output. The indices are not assumed to be in
      lexicographic, or any particular order.
      For all i, mask_indices[i, :] should represent a valid index of the product
      matrix (a * b) (after transposition). That is:
      mask_indices[i, 0] should be in [0, m) if !transpose_a, and in [0, n)
        otherwise.
      mask_indices[i, 1] should be in [0, t) if !transpose_b, and in [0, s)
        otherwise.
    transpose_a: A `Tensor` of type `bool`.
      A boolean, specifies whether to transpose the matrix a.
    transpose_b: A `Tensor` of type `bool`.
      A boolean, specifies whether to transpose the matrix b.

      Output arguments:
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32`.
    A rank 1 tensor of shape [nnz], representing the values of the
    non-zero elements in the product, such that for all i,
    prod_values[i] = (a * b)[mask_indices[i, 0], mask_indices[i, 1]].
  """
  _ctx = _context._context
  if _ctx is not None and _ctx._eager_context.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name, "MaskedMatmul",
        name, _ctx._post_execution_callbacks, a, b, mask_indices, transpose_a,
        transpose_b)
      return _result
    except _core._FallbackException:
      try:
        return masked_matmul_eager_fallback(
            a, b, mask_indices, transpose_a, transpose_b, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              masked_matmul, a=a, b=b, mask_indices=mask_indices,
                             transpose_a=transpose_a, transpose_b=transpose_b,
                             name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "MaskedMatmul", a=a, b=b, mask_indices=mask_indices,
                        transpose_a=transpose_a, transpose_b=transpose_b,
                        name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          masked_matmul, a=a, b=b, mask_indices=mask_indices,
                         transpose_a=transpose_a, transpose_b=transpose_b,
                         name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = None
  _execute.record_gradient(
      "MaskedMatmul", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Пример #8
0
def set_size(set_indices,
             set_values,
             set_shape,
             validate_indices=True,
             name=None):
    r"""Number of unique elements along last dimension of input `set`.

  Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,

  and `set_shape`. The last dimension contains values in a set, duplicates are

  allowed but ignored.

  

  If `validate_indices` is `True`, this op validates the order and range of `set`

  indices.

  Args:
    set_indices: A `Tensor` of type `int64`.
      2D `Tensor`, indices of a `SparseTensor`.
    set_values: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
      1D `Tensor`, values of a `SparseTensor`.
    set_shape: A `Tensor` of type `int64`.
      1D `Tensor`, shape of a `SparseTensor`.
    validate_indices: An optional `bool`. Defaults to `True`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `int32`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if validate_indices is None:
            validate_indices = True
        validate_indices = _execute.make_bool(validate_indices,
                                              "validate_indices")
        _, _, _op = _op_def_lib._apply_op_helper(
            "SetSize",
            set_indices=set_indices,
            set_values=set_values,
            set_shape=set_shape,
            validate_indices=validate_indices,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("validate_indices", _op.get_attr("validate_indices"), "T",
                  _op.get_attr("T"))
        _execute.record_gradient("SetSize", _inputs_flat, _attrs, _result,
                                 name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "SetSize", name, _ctx._post_execution_callbacks, set_indices,
                set_values, set_shape, "validate_indices", validate_indices)
            return _result
        except _core._FallbackException:
            return set_size_eager_fallback(set_indices,
                                           set_values,
                                           set_shape,
                                           validate_indices=validate_indices,
                                           name=name,
                                           ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #9
0
def parameterized_truncated_normal(shape, means, stdevs, minvals, maxvals, seed=0, seed2=0, name=None):
  r"""Outputs random values from a normal distribution. The parameters may each be a

  scalar which applies to the entire output, or a vector of length shape[0] which
  stores the parameters for each batch.

  Args:
    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      The shape of the output tensor. Batches are indexed by the 0th dimension.
    means: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
      The mean parameter of each batch.
    stdevs: A `Tensor`. Must have the same type as `means`.
      The standard deviation parameter of each batch. Must be greater than 0.
    minvals: A `Tensor`. Must have the same type as `means`.
      The minimum cutoff. May be -infinity.
    maxvals: A `Tensor`. Must have the same type as `means`.
      The maximum cutoff. May be +infinity, and must be more than the minval
      for each batch.
    seed: An optional `int`. Defaults to `0`.
      If either `seed` or `seed2` are set to be non-zero, the random number
      generator is seeded by the given seed.  Otherwise, it is seeded by a
      random seed.
    seed2: An optional `int`. Defaults to `0`.
      A second seed to avoid seed collision.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `means`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if seed is None:
      seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
      seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    _, _, _op = _op_def_lib._apply_op_helper(
        "ParameterizedTruncatedNormal", shape=shape, means=means,
        stdevs=stdevs, minvals=minvals, maxvals=maxvals, seed=seed,
        seed2=seed2, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("seed", _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
              "dtype", _op.get_attr("dtype"), "T", _op.get_attr("T"))
    _execute.record_gradient(
      "ParameterizedTruncatedNormal", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "ParameterizedTruncatedNormal", name,
        _ctx._post_execution_callbacks, shape, means, stdevs, minvals,
        maxvals, "seed", seed, "seed2", seed2)
      return _result
    except _core._FallbackException:
      return parameterized_truncated_normal_eager_fallback(
          shape, means, stdevs, minvals, maxvals, seed=seed, seed2=seed2,
          name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #10
0
def bipartite_match(distance_mat, num_valid_rows, top_k=-1, name=None):
    r"""Find bipartite matching based on a given distance matrix.

  A greedy bi-partite matching algorithm is used to obtain the matching with the
  (greedy) minimum distance.

  Args:
    distance_mat: A `Tensor` of type `float32`.
      A 2-D float tensor of shape `[num_rows, num_columns]`. It is a
      pair-wise distance matrix between the entities represented by each row and
      each column. It is an asymmetric matrix. The smaller the distance is, the more
      similar the pairs are. The bipartite matching is to minimize the distances.
    num_valid_rows: A `Tensor` of type `float32`.
      A scalar or a 1-D tensor with one element describing the
      number of valid rows of distance_mat to consider for the bipartite matching.
      If set to be negative, then all rows from `distance_mat` are used.
    top_k: An optional `int`. Defaults to `-1`.
      A scalar that specifies the number of top-k matches to retrieve.
      If set to be negative, then is set according to the maximum number of
      matches from `distance_mat`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (row_to_col_match_indices, col_to_row_match_indices).

    row_to_col_match_indices: A `Tensor` of type `int32`. A vector of length num_rows, which is the number of
      rows of the input `distance_matrix`.
      If `row_to_col_match_indices[i]` is not -1, row i is matched to column
      `row_to_col_match_indices[i]`.
    col_to_row_match_indices: A `Tensor` of type `int32`. A vector of length num_columns, which is the number
      of columns of the input ditance matrix.
      If `col_to_row_match_indices[j]` is not -1, column j is matched to row
      `col_to_row_match_indices[j]`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if top_k is None:
            top_k = -1
        top_k = _execute.make_int(top_k, "top_k")
        _, _, _op = _op_def_lib._apply_op_helper("BipartiteMatch",
                                                 distance_mat=distance_mat,
                                                 num_valid_rows=num_valid_rows,
                                                 top_k=top_k,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("top_k", _op.get_attr("top_k"))
        _execute.record_gradient("BipartiteMatch", _inputs_flat, _attrs,
                                 _result, name)
        _result = _BipartiteMatchOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "BipartiteMatch", name, _ctx._post_execution_callbacks,
                distance_mat, num_valid_rows, "top_k", top_k)
            _result = _BipartiteMatchOutput._make(_result)
            return _result
        except _core._FallbackException:
            return bipartite_match_eager_fallback(distance_mat,
                                                  num_valid_rows,
                                                  top_k=top_k,
                                                  name=name,
                                                  ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #11
0
def kinesis_dataset(stream, shard, read_indefinitely, interval, name=None):
    r"""Creates a dataset that emits the messages of one or more Kinesis topics.

  Args:
    stream: A `Tensor` of type `string`.
      A `tf.string` tensor containing the name of the stream.
    shard: A `Tensor` of type `string`.
      A `tf.string` tensor containing the id of the shard.
    read_indefinitely: A `Tensor` of type `bool`.
      If `True`, the Kinesis dataset will keep retry
      again on `EOF` after the `interval` period. If `False`, then
      the dataset will stop on `EOF`. The default value is `True`.
    interval: A `Tensor` of type `int64`.
      The interval for the Kinesis Client to wait before
      it tries to get records again (in millisecond).
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `variant`.
  """
    _ctx = _context._context
    if _ctx is not None and _ctx._eager_context.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "KinesisDataset", name, _ctx._post_execution_callbacks, stream,
                shard, read_indefinitely, interval)
            return _result
        except _core._FallbackException:
            try:
                return kinesis_dataset_eager_fallback(stream,
                                                      shard,
                                                      read_indefinitely,
                                                      interval,
                                                      name=name,
                                                      ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
            except (TypeError, ValueError):
                result = _dispatch.dispatch(
                    kinesis_dataset,
                    stream=stream,
                    shard=shard,
                    read_indefinitely=read_indefinitely,
                    interval=interval,
                    name=name)
                if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                    return result
                raise
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    try:
        _, _, _op = _op_def_lib._apply_op_helper(
            "KinesisDataset",
            stream=stream,
            shard=shard,
            read_indefinitely=read_indefinitely,
            interval=interval,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(kinesis_dataset,
                                    stream=stream,
                                    shard=shard,
                                    read_indefinitely=read_indefinitely,
                                    interval=interval,
                                    name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = None
    _execute.record_gradient("KinesisDataset", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
Пример #12
0
def image_projective_transform(images, transforms, interpolation, name=None):
    r"""Applies the given transform to each of the images.

  Input `image` is a `Tensor` in NHWC format (where the axes are image in batch,
  rows, columns, and channels. Input `transforms` is a num_images x 8 or 1 x 8
  matrix, where each row corresponds to a 3 x 3 projective transformation matrix,
  with the last entry assumed to be 1. If there is one row, the same
  transformation will be applied to all images.

  If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps
  the *output* point `(x, y)` to a transformed *input* point
  `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
  `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input
  image, the output pixel is set to 0. The output is the same size as the input,

  Args:
    images: A `Tensor`. Must be one of the following types: `uint8`, `int32`, `int64`, `float32`, `float64`.
      4D `Tensor`, input image(s) in NHWC format.
    transforms: A `Tensor` of type `float32`.
      2D `Tensor`, projective transform(s) to apply to the image(s).
    interpolation: A `string`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `images`.
    4D `Tensor`, image(s) in NHWC format, generated by applying
    the `transforms` to the `images`. Satisfies the description above.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        interpolation = _execute.make_str(interpolation, "interpolation")
        _, _, _op = _op_def_lib._apply_op_helper("ImageProjectiveTransform",
                                                 images=images,
                                                 transforms=transforms,
                                                 interpolation=interpolation,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("dtype", _op.get_attr("dtype"), "interpolation",
                  _op.get_attr("interpolation"))
        _execute.record_gradient("ImageProjectiveTransform", _inputs_flat,
                                 _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "ImageProjectiveTransform", name,
                _ctx._post_execution_callbacks, images, transforms,
                "interpolation", interpolation)
            return _result
        except _core._FallbackException:
            return image_projective_transform_eager_fallback(
                images,
                transforms,
                interpolation=interpolation,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #13
0
def generate_big_query_reader_partitions(project_id,
                                         dataset_id,
                                         table_id,
                                         columns,
                                         timestamp_millis,
                                         num_partitions,
                                         test_end_point="",
                                         name=None):
    r"""Generates serialized partition messages suitable for batch reads.

  This op should not be used directly by clients. Instead, the
  bigquery_reader_ops.py file defines a clean interface to the reader.

  Args:
    project_id: A `string`. GCP project ID.
    dataset_id: A `string`. BigQuery Dataset ID.
    table_id: A `string`. Table to read.
    columns: A list of `strings`.
      List of columns to read. Leave empty to read all columns.
    timestamp_millis: An `int`.
      Table snapshot timestamp in millis since epoch. Relative
      (negative or zero) snapshot times are not allowed. For more details, see
      'Table Decorators' in BigQuery docs.
    num_partitions: An `int`. Number of partitions to split the table into.
    test_end_point: An optional `string`. Defaults to `""`.
      Do not use. For testing purposes only.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`. Serialized table partitions.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        project_id = _execute.make_str(project_id, "project_id")
        dataset_id = _execute.make_str(dataset_id, "dataset_id")
        table_id = _execute.make_str(table_id, "table_id")
        if not isinstance(columns, (list, tuple)):
            raise TypeError(
                "Expected list for 'columns' argument to "
                "'generate_big_query_reader_partitions' Op, not %r." % columns)
        columns = [_execute.make_str(_s, "columns") for _s in columns]
        timestamp_millis = _execute.make_int(timestamp_millis,
                                             "timestamp_millis")
        num_partitions = _execute.make_int(num_partitions, "num_partitions")
        if test_end_point is None:
            test_end_point = ""
        test_end_point = _execute.make_str(test_end_point, "test_end_point")
        _, _, _op = _op_def_lib._apply_op_helper(
            "GenerateBigQueryReaderPartitions",
            project_id=project_id,
            dataset_id=dataset_id,
            table_id=table_id,
            columns=columns,
            timestamp_millis=timestamp_millis,
            num_partitions=num_partitions,
            test_end_point=test_end_point,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("project_id", _op.get_attr("project_id"), "dataset_id",
                  _op.get_attr("dataset_id"), "table_id",
                  _op.get_attr("table_id"), "columns",
                  _op.get_attr("columns"), "timestamp_millis",
                  _op.get_attr("timestamp_millis"), "num_partitions",
                  _op.get_attr("num_partitions"), "test_end_point",
                  _op.get_attr("test_end_point"))
        _execute.record_gradient("GenerateBigQueryReaderPartitions",
                                 _inputs_flat, _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "GenerateBigQueryReaderPartitions", name,
                _ctx._post_execution_callbacks, "project_id", project_id,
                "dataset_id", dataset_id, "table_id", table_id, "columns",
                columns, "timestamp_millis", timestamp_millis,
                "num_partitions", num_partitions, "test_end_point",
                test_end_point)
            return _result
        except _core._FallbackException:
            return generate_big_query_reader_partitions_eager_fallback(
                project_id=project_id,
                dataset_id=dataset_id,
                table_id=table_id,
                columns=columns,
                timestamp_millis=timestamp_millis,
                num_partitions=num_partitions,
                test_end_point=test_end_point,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #14
0
def unbatch_grad(original_input,
                 batch_index,
                 grad,
                 id,
                 container="",
                 shared_name="",
                 name=None):
    r"""Gradient of Unbatch.

  Acts like Batch but using the given batch_index index of batching things as they
  become available. This ensures that the gradients are propagated back in the
  same session which did the forward pass.

  original_input: The input to the Unbatch operation this is the gradient of.
  batch_index: The batch_index given to the Unbatch operation this is the gradient
  of.
  grad: The downstream gradient.
  id: The id scalar emitted by Batch.
  batched_grad: The return value, either an empty tensor or the batched gradient.
  container: Container to control resource sharing.
  shared_name: Instances of UnbatchGrad with the same container and shared_name
   are assumed to possibly belong to the same batch. If left empty, the op name
   will be used as the shared name.

  Args:
    original_input: A `Tensor`.
    batch_index: A `Tensor` of type `int64`.
    grad: A `Tensor`. Must have the same type as `original_input`.
    id: A `Tensor` of type `int64`.
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `original_input`.
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        if container is None:
            container = ""
        container = _execute.make_str(container, "container")
        if shared_name is None:
            shared_name = ""
        shared_name = _execute.make_str(shared_name, "shared_name")
        _, _, _op = _op_def_lib._apply_op_helper("UnbatchGrad",
                                                 original_input=original_input,
                                                 batch_index=batch_index,
                                                 grad=grad,
                                                 id=id,
                                                 container=container,
                                                 shared_name=shared_name,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("container", _op.get_attr("container"), "shared_name",
                  _op.get_attr("shared_name"), "T", _op.get_attr("T"))
        _execute.record_gradient("UnbatchGrad", _inputs_flat, _attrs, _result,
                                 name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name, "UnbatchGrad", name,
                _ctx._post_execution_callbacks, original_input, batch_index,
                grad, id, "container", container, "shared_name", shared_name)
            return _result
        except _core._FallbackException:
            return unbatch_grad_eager_fallback(original_input,
                                               batch_index,
                                               grad,
                                               id,
                                               container=container,
                                               shared_name=shared_name,
                                               name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #15
0
def rpc(address,
        method,
        request,
        protocol="",
        fail_fast=True,
        timeout_in_ms=0,
        name=None):
    r"""TODO: add doc.

  Args:
    address: A `Tensor` of type `string`.
    method: A `Tensor` of type `string`.
    request: A `Tensor` of type `string`.
    protocol: An optional `string`. Defaults to `""`.
    fail_fast: An optional `bool`. Defaults to `True`.
    timeout_in_ms: An optional `int`. Defaults to `0`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        if protocol is None:
            protocol = ""
        protocol = _execute.make_str(protocol, "protocol")
        if fail_fast is None:
            fail_fast = True
        fail_fast = _execute.make_bool(fail_fast, "fail_fast")
        if timeout_in_ms is None:
            timeout_in_ms = 0
        timeout_in_ms = _execute.make_int(timeout_in_ms, "timeout_in_ms")
        _, _, _op = _op_def_lib._apply_op_helper("Rpc",
                                                 address=address,
                                                 method=method,
                                                 request=request,
                                                 protocol=protocol,
                                                 fail_fast=fail_fast,
                                                 timeout_in_ms=timeout_in_ms,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("protocol", _op.get_attr("protocol"), "fail_fast",
                  _op.get_attr("fail_fast"), "timeout_in_ms",
                  _op.get_attr("timeout_in_ms"))
        _execute.record_gradient("Rpc", _inputs_flat, _attrs, _result, name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name, "Rpc",
                name, _ctx._post_execution_callbacks, address, method, request,
                "protocol", protocol, "fail_fast", fail_fast, "timeout_in_ms",
                timeout_in_ms)
            return _result
        except _core._FallbackException:
            return rpc_eager_fallback(address,
                                      method,
                                      request,
                                      protocol=protocol,
                                      fail_fast=fail_fast,
                                      timeout_in_ms=timeout_in_ms,
                                      name=name,
                                      ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #16
0
def random_gamma(shape, alpha, seed=0, seed2=0, name=None):
  r"""Outputs random values from the Gamma distribution(s) described by alpha.

  This op uses the algorithm by Marsaglia et al. to acquire samples via
  transformation-rejection from pairs of uniform and normal random variables.
  See http://dl.acm.org/citation.cfm?id=358414

  Args:
    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      1-D integer tensor. Shape of independent samples to draw from each
      distribution described by the shape parameters given in alpha.
    alpha: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
      A tensor in which each scalar is a "shape" parameter describing the
      associated gamma distribution.
    seed: An optional `int`. Defaults to `0`.
      If either `seed` or `seed2` are set to be non-zero, the random number
      generator is seeded by the given seed.  Otherwise, it is seeded by a
      random seed.
    seed2: An optional `int`. Defaults to `0`.
      A second seed to avoid seed collision.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `alpha`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if seed is None:
      seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
      seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    _, _, _op = _op_def_lib._apply_op_helper(
        "RandomGamma", shape=shape, alpha=alpha, seed=seed, seed2=seed2,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("seed", _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
              "S", _op.get_attr("S"), "T", _op.get_attr("T"))
    _execute.record_gradient(
      "RandomGamma", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "RandomGamma", name,
        _ctx._post_execution_callbacks, shape, alpha, "seed", seed, "seed2",
        seed2)
      return _result
    except _core._FallbackException:
      return random_gamma_eager_fallback(
          shape, alpha, seed=seed, seed2=seed2, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #17
0
def resampler_grad(data, warp, grad_output, name=None):
  r"""Resampler Grad op.

  Args:
    data: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
    warp: A `Tensor`. Must have the same type as `data`.
    grad_output: A `Tensor`. Must have the same type as `data`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (grad_data, grad_warp).

    grad_data: A `Tensor`. Has the same type as `data`.
    grad_warp: A `Tensor`. Has the same type as `data`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name,
        "ResamplerGrad", name, _ctx._post_execution_callbacks, data, warp,
        grad_output)
      _result = _ResamplerGradOutput._make(_result)
      return _result
    except _core._FallbackException:
      try:
        return resampler_grad_eager_fallback(
            data, warp, grad_output, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              resampler_grad, data=data, warp=warp, grad_output=grad_output,
                              name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "ResamplerGrad", data=data, warp=warp, grad_output=grad_output,
                         name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          resampler_grad, data=data, warp=warp, grad_output=grad_output,
                          name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("T", _op.get_attr("T"))
  _execute.record_gradient(
      "ResamplerGrad", _inputs_flat, _attrs, _result, name)
  _result = _ResamplerGradOutput._make(_result)
  return _result
Пример #18
0
def multinomial(logits, num_samples, seed=0, seed2=0, output_dtype=_dtypes.int64, name=None):
  r"""Draws samples from a multinomial distribution.

  Args:
    logits: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
      2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
      represents the unnormalized log probabilities for all classes.
    num_samples: A `Tensor` of type `int32`.
      0-D.  Number of independent samples to draw for each row slice.
    seed: An optional `int`. Defaults to `0`.
      If either seed or seed2 is set to be non-zero, the internal random number
      generator is seeded by the given seed.  Otherwise, a random seed is used.
    seed2: An optional `int`. Defaults to `0`.
      A second seed to avoid seed collision.
    output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `output_dtype`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if seed is None:
      seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
      seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    if output_dtype is None:
      output_dtype = _dtypes.int64
    output_dtype = _execute.make_type(output_dtype, "output_dtype")
    _, _, _op = _op_def_lib._apply_op_helper(
        "Multinomial", logits=logits, num_samples=num_samples, seed=seed,
        seed2=seed2, output_dtype=output_dtype, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("seed", _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
              "T", _op.get_attr("T"), "output_dtype",
              _op.get_attr("output_dtype"))
    _execute.record_gradient(
      "Multinomial", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "Multinomial", name,
        _ctx._post_execution_callbacks, logits, num_samples, "seed", seed,
        "seed2", seed2, "output_dtype", output_dtype)
      return _result
    except _core._FallbackException:
      return multinomial_eager_fallback(
          logits, num_samples, seed=seed, seed2=seed2,
          output_dtype=output_dtype, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
def wals_compute_partial_lhs_and_rhs(factors, factor_weights, unobserved_weights, input_weights, input_indices, input_values, entry_weights, input_block_size, input_is_transpose, name=None):
  r"""Computes the partial left-hand side and right-hand side of WALS update. For

  observed entry input_indices[i]=[m, n] with value input_values[i]=v, the weight
  should be specified either through (1) entry_weights[i] or (2) through
  input_weights[m] * factor_weights[n] (if input_is_transpose is false) or
  input_weights[n] * factor_weights[m] (if input_is_transpose is true). Note it is
  not allowed to have both (1) and (2) specified at the same time: when one
  approach is used, the input tensors related to the other approach must be kept
  completely empty.

  Args:
    factors: A `Tensor` of type `float32`. Matrix of size m * k.
    factor_weights: A `Tensor` of type `float32`.
      Vector of size m. Corresponds to column weights. Should be empty
      if entry_weights is used.
    unobserved_weights: A `Tensor` of type `float32`.
      Scalar. Weight for unobserved input entries.
    input_weights: A `Tensor` of type `float32`.
      Vector of size n. Corresponds to row weights. Should be empty if
      entry_weights is used.
    input_indices: A `Tensor` of type `int64`.
      Indices for the input SparseTensor.
    input_values: A `Tensor` of type `float32`.
      Values for the input SparseTensor.
    entry_weights: A `Tensor` of type `float32`.
      If not empty, this must be same length as input_vaues and is used
      as the per-entry non-zero weight. If this is used, input_weights and
      factor_weights must be empty.
    input_block_size: A `Tensor` of type `int64`.
      Scalar. Number of rows spanned by input.
    input_is_transpose: A `Tensor` of type `bool`.
      If true, logically transposes the input for processing.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (partial_lhs, partial_rhs).

    partial_lhs: A `Tensor` of type `float32`. 3-D tensor with size input_block_size x k x k.
    partial_rhs: A `Tensor` of type `float32`. Matrix with size input_block_size x k.
  """
  _ctx = _context._context
  if _ctx is not None and _ctx._eager_context.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "WALSComputePartialLhsAndRhs", name, _ctx._post_execution_callbacks,
        factors, factor_weights, unobserved_weights, input_weights,
        input_indices, input_values, entry_weights, input_block_size,
        input_is_transpose)
      _result = _WALSComputePartialLhsAndRhsOutput._make(_result)
      return _result
    except _core._FallbackException:
      try:
        return wals_compute_partial_lhs_and_rhs_eager_fallback(
            factors, factor_weights, unobserved_weights, input_weights,
            input_indices, input_values, entry_weights, input_block_size,
            input_is_transpose, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              wals_compute_partial_lhs_and_rhs, factors=factors,
                                                factor_weights=factor_weights,
                                                unobserved_weights=unobserved_weights,
                                                input_weights=input_weights,
                                                input_indices=input_indices,
                                                input_values=input_values,
                                                entry_weights=entry_weights,
                                                input_block_size=input_block_size,
                                                input_is_transpose=input_is_transpose,
                                                name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  try:
    _, _, _op = _op_def_lib._apply_op_helper(
        "WALSComputePartialLhsAndRhs", factors=factors,
                                       factor_weights=factor_weights,
                                       unobserved_weights=unobserved_weights,
                                       input_weights=input_weights,
                                       input_indices=input_indices,
                                       input_values=input_values,
                                       entry_weights=entry_weights,
                                       input_block_size=input_block_size,
                                       input_is_transpose=input_is_transpose,
                                       name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          wals_compute_partial_lhs_and_rhs, factors=factors,
                                            factor_weights=factor_weights,
                                            unobserved_weights=unobserved_weights,
                                            input_weights=input_weights,
                                            input_indices=input_indices,
                                            input_values=input_values,
                                            entry_weights=entry_weights,
                                            input_block_size=input_block_size,
                                            input_is_transpose=input_is_transpose,
                                            name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = None
  _execute.record_gradient(
      "WALSComputePartialLhsAndRhs", _inputs_flat, _attrs, _result, name)
  _result = _WALSComputePartialLhsAndRhsOutput._make(_result)
  return _result
Пример #20
0
def random_poisson_v2(shape, rate, seed=0, seed2=0, dtype=_dtypes.int64, name=None):
  r"""Outputs random values from the Poisson distribution(s) described by rate.

  This op uses two algorithms, depending on rate. If rate >= 10, then
  the algorithm by Hormann is used to acquire samples via
  transformation-rejection.
  See http://www.sciencedirect.com/science/article/pii/0167668793909974.

  Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
  random variables.
  See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
  Programming, Volume 2. Addison Wesley

  Args:
    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      1-D integer tensor. Shape of independent samples to draw from each
      distribution described by the shape parameters given in rate.
    rate: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`.
      A tensor in which each scalar is a "rate" parameter describing the
      associated poisson distribution.
    seed: An optional `int`. Defaults to `0`.
      If either `seed` or `seed2` are set to be non-zero, the random number
      generator is seeded by the given seed.  Otherwise, it is seeded by a
      random seed.
    seed2: An optional `int`. Defaults to `0`.
      A second seed to avoid seed collision.
    dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.int64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `dtype`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if seed is None:
      seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
      seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    if dtype is None:
      dtype = _dtypes.int64
    dtype = _execute.make_type(dtype, "dtype")
    _, _, _op = _op_def_lib._apply_op_helper(
        "RandomPoissonV2", shape=shape, rate=rate, seed=seed, seed2=seed2,
        dtype=dtype, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("seed", _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
              "S", _op.get_attr("S"), "R", _op.get_attr("R"), "dtype",
              _op.get_attr("dtype"))
    _execute.record_gradient(
      "RandomPoissonV2", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "RandomPoissonV2", name,
        _ctx._post_execution_callbacks, shape, rate, "seed", seed, "seed2",
        seed2, "dtype", dtype)
      return _result
    except _core._FallbackException:
      return random_poisson_v2_eager_fallback(
          shape, rate, seed=seed, seed2=seed2, dtype=dtype, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #21
0
def py_func(input, token, Tout, name=None):
    r"""Invokes a python function to compute func(input)->output.

  This operation is considered stateful. For a stateless version, see

  PyFuncStateless.

  Args:
    input: A list of `Tensor` objects.
      List of Tensors that will provide input to the Op.
    token: A `string`.
      A token representing a registered python function in this address space.
    Tout: A list of `tf.DTypes`. Data types of the outputs from the op.

      The length of the list specifies the number of outputs.
    name: A name for the operation (optional).

  Returns:
    A list of `Tensor` objects of type `Tout`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "PyFunc", name, _ctx._post_execution_callbacks, input, "token",
                token, "Tout", Tout)
            return _result
        except _core._FallbackException:
            try:
                return py_func_eager_fallback(input,
                                              token=token,
                                              Tout=Tout,
                                              name=name,
                                              ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    token = _execute.make_str(token, "token")
    if not isinstance(Tout, (list, tuple)):
        raise TypeError("Expected list for 'Tout' argument to "
                        "'py_func' Op, not %r." % Tout)
    Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
    _, _, _op = _op_def_lib._apply_op_helper("PyFunc",
                                             input=input,
                                             token=token,
                                             Tout=Tout,
                                             name=name)
    _result = _op.outputs[:]
    if not _result:
        return _op
    _inputs_flat = _op.inputs
    _attrs = ("token", _op.get_attr("token"), "Tin", _op.get_attr("Tin"),
              "Tout", _op.get_attr("Tout"))
    _execute.record_gradient("PyFunc", _inputs_flat, _attrs, _result, name)
    return _result
Пример #22
0
def random_uniform_int(shape, minval, maxval, seed=0, seed2=0, name=None):
  r"""Outputs random integers from a uniform distribution.

  The generated values are uniform integers in the range `[minval, maxval)`.
  The lower bound `minval` is included in the range, while the upper bound
  `maxval` is excluded.

  The random integers are slightly biased unless `maxval - minval` is an exact
  power of two.  The bias is small for values of `maxval - minval` significantly
  smaller than the range of the output (either `2^32` or `2^64`).

  Args:
    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      The shape of the output tensor.
    minval: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      0-D.  Inclusive lower bound on the generated integers.
    maxval: A `Tensor`. Must have the same type as `minval`.
      0-D.  Exclusive upper bound on the generated integers.
    seed: An optional `int`. Defaults to `0`.
      If either `seed` or `seed2` are set to be non-zero, the random number
      generator is seeded by the given seed.  Otherwise, it is seeded by a
      random seed.
    seed2: An optional `int`. Defaults to `0`.
      A second seed to avoid seed collision.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `minval`.
  """
  _ctx = _context.context()
  if not _ctx.executing_eagerly():
    if seed is None:
      seed = 0
    seed = _execute.make_int(seed, "seed")
    if seed2 is None:
      seed2 = 0
    seed2 = _execute.make_int(seed2, "seed2")
    _, _, _op = _op_def_lib._apply_op_helper(
        "RandomUniformInt", shape=shape, minval=minval, maxval=maxval,
        seed=seed, seed2=seed2, name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("seed", _op.get_attr("seed"), "seed2", _op.get_attr("seed2"),
              "Tout", _op.get_attr("Tout"), "T", _op.get_attr("T"))
    _execute.record_gradient(
      "RandomUniformInt", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._handle, _ctx.device_name, "RandomUniformInt", name,
        _ctx._post_execution_callbacks, shape, minval, maxval, "seed", seed,
        "seed2", seed2)
      return _result
    except _core._FallbackException:
      return random_uniform_int_eager_fallback(
          shape, minval, maxval, seed=seed, seed2=seed2, name=name)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #23
0
def single_image_random_dot_stereograms(depth_values,
                                        hidden_surface_removal=True,
                                        convergence_dots_size=8,
                                        dots_per_inch=72,
                                        eye_separation=2.5,
                                        mu=0.3333,
                                        normalize=True,
                                        normalize_max=-100,
                                        normalize_min=100,
                                        border_level=0,
                                        number_colors=256,
                                        output_image_shape=[1024, 768, 1],
                                        output_data_window=[1022, 757],
                                        name=None):
    r"""Outputs a single image random dot stereogram for export via encode_PNG/JPG OP.

  Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
  encode 3-D data into a 2-D image.  The output of this Op is suitable for the
  encode_PNG/JPG ops.  Be careful with image compression as this may corrupt the
  encode 3-D data within the image.

  This Op is based upon:
  'http://www.learningace.com/doc/4331582/b6ab058d1e206d68ab60e4e1ead2fe6e/sirds-paper'

  Example use which outputs a SIRDS image as picture_out.png:
  ```python
  img=[[1,2,3,3,2,1],
       [1,2,3,4,5,2],
       [1,2,3,4,5,3],
       [1,2,3,4,5,4],
       [6,5,4,4,5,5]]

  session = tf.InteractiveSession()

  sirds = single_image_random_dot_stereograms(img,convergence_dots_size=8,number_colors=256,normalize=True)

  out = sirds.eval()

  png = tf.image.encode_png(out).eval()

  with open('picture_out.png', 'wb') as f:
      f.write(png)
  ```

  Args:
    depth_values: A `Tensor`. Must be one of the following types: `float64`, `float32`, `int64`, `int32`.
      Z values of data to encode into 'output_data_window' window,
      lower values are further away {0.0 floor(far), 1.0 ceiling(near) after normalization}, must be 2-D tensor
    hidden_surface_removal: An optional `bool`. Defaults to `True`.
      Activate hidden surface removal
    convergence_dots_size: An optional `int`. Defaults to `8`.
      Black dot size in pixels to help view converge image, drawn on bottom of image
    dots_per_inch: An optional `int`. Defaults to `72`.
      Output device in dots/inch
    eye_separation: An optional `float`. Defaults to `2.5`.
      Separation between eyes in inches
    mu: An optional `float`. Defaults to `0.3333`.
      Depth of field, Fraction of viewing distance (eg. 1/3 = .3333)
    normalize: An optional `bool`. Defaults to `True`.
      Normalize input data to [0.0, 1.0]
    normalize_max: An optional `float`. Defaults to `-100`.
      Fix MAX value for Normalization - if < MIN, autoscale
    normalize_min: An optional `float`. Defaults to `100`.
      Fix MIN value for Normalization - if > MAX, autoscale
    border_level: An optional `float`. Defaults to `0`.
      Value of border depth 0.0 {far} to 1.0 {near}
    number_colors: An optional `int`. Defaults to `256`.
      2 (Black & White),256 (grayscale), and Numbers > 256 (Full Color) are all that are supported currently
    output_image_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1024, 768, 1]`.
      Output size of returned image in X,Y, Channels 1-grayscale, 3 color (1024, 768, 1),
      channels will be updated to 3 if 'number_colors' > 256
    output_data_window: An optional `tf.TensorShape` or list of `ints`. Defaults to `[1022, 757]`.
      Size of "DATA" window, must be equal to or smaller than 'output_image_shape', will be centered
      and use 'convergence_dots_size' for best fit to avoid overlap if possible
    name: A name for the operation (optional).

  Returns:
    A tensor of size 'output_image_shape' with the encoded 'depth_values'
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "SingleImageRandomDotStereograms", name,
                _ctx._post_execution_callbacks, depth_values,
                "hidden_surface_removal", hidden_surface_removal,
                "convergence_dots_size", convergence_dots_size,
                "dots_per_inch", dots_per_inch, "eye_separation",
                eye_separation, "mu", mu, "normalize", normalize,
                "normalize_max", normalize_max, "normalize_min", normalize_min,
                "border_level", border_level, "number_colors", number_colors,
                "output_image_shape", output_image_shape, "output_data_window",
                output_data_window)
            return _result
        except _core._FallbackException:
            try:
                return single_image_random_dot_stereograms_eager_fallback(
                    depth_values,
                    hidden_surface_removal=hidden_surface_removal,
                    convergence_dots_size=convergence_dots_size,
                    dots_per_inch=dots_per_inch,
                    eye_separation=eye_separation,
                    mu=mu,
                    normalize=normalize,
                    normalize_max=normalize_max,
                    normalize_min=normalize_min,
                    border_level=border_level,
                    number_colors=number_colors,
                    output_image_shape=output_image_shape,
                    output_data_window=output_data_window,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
            except (TypeError, ValueError):
                result = _dispatch.dispatch(
                    single_image_random_dot_stereograms,
                    depth_values=depth_values,
                    hidden_surface_removal=hidden_surface_removal,
                    convergence_dots_size=convergence_dots_size,
                    dots_per_inch=dots_per_inch,
                    eye_separation=eye_separation,
                    mu=mu,
                    normalize=normalize,
                    normalize_max=normalize_max,
                    normalize_min=normalize_min,
                    border_level=border_level,
                    number_colors=number_colors,
                    output_image_shape=output_image_shape,
                    output_data_window=output_data_window,
                    name=name)
                if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                    return result
                raise
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if hidden_surface_removal is None:
        hidden_surface_removal = True
    hidden_surface_removal = _execute.make_bool(hidden_surface_removal,
                                                "hidden_surface_removal")
    if convergence_dots_size is None:
        convergence_dots_size = 8
    convergence_dots_size = _execute.make_int(convergence_dots_size,
                                              "convergence_dots_size")
    if dots_per_inch is None:
        dots_per_inch = 72
    dots_per_inch = _execute.make_int(dots_per_inch, "dots_per_inch")
    if eye_separation is None:
        eye_separation = 2.5
    eye_separation = _execute.make_float(eye_separation, "eye_separation")
    if mu is None:
        mu = 0.3333
    mu = _execute.make_float(mu, "mu")
    if normalize is None:
        normalize = True
    normalize = _execute.make_bool(normalize, "normalize")
    if normalize_max is None:
        normalize_max = -100
    normalize_max = _execute.make_float(normalize_max, "normalize_max")
    if normalize_min is None:
        normalize_min = 100
    normalize_min = _execute.make_float(normalize_min, "normalize_min")
    if border_level is None:
        border_level = 0
    border_level = _execute.make_float(border_level, "border_level")
    if number_colors is None:
        number_colors = 256
    number_colors = _execute.make_int(number_colors, "number_colors")
    if output_image_shape is None:
        output_image_shape = [1024, 768, 1]
    output_image_shape = _execute.make_shape(output_image_shape,
                                             "output_image_shape")
    if output_data_window is None:
        output_data_window = [1022, 757]
    output_data_window = _execute.make_shape(output_data_window,
                                             "output_data_window")
    try:
        _, _, _op = _op_def_lib._apply_op_helper(
            "SingleImageRandomDotStereograms",
            depth_values=depth_values,
            hidden_surface_removal=hidden_surface_removal,
            convergence_dots_size=convergence_dots_size,
            dots_per_inch=dots_per_inch,
            eye_separation=eye_separation,
            mu=mu,
            normalize=normalize,
            normalize_max=normalize_max,
            normalize_min=normalize_min,
            border_level=border_level,
            number_colors=number_colors,
            output_image_shape=output_image_shape,
            output_data_window=output_data_window,
            name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(
            single_image_random_dot_stereograms,
            depth_values=depth_values,
            hidden_surface_removal=hidden_surface_removal,
            convergence_dots_size=convergence_dots_size,
            dots_per_inch=dots_per_inch,
            eye_separation=eye_separation,
            mu=mu,
            normalize=normalize,
            normalize_max=normalize_max,
            normalize_min=normalize_min,
            border_level=border_level,
            number_colors=number_colors,
            output_image_shape=output_image_shape,
            output_data_window=output_data_window,
            name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("T", _op.get_attr("T"), "hidden_surface_removal",
              _op.get_attr("hidden_surface_removal"), "convergence_dots_size",
              _op.get_attr("convergence_dots_size"), "dots_per_inch",
              _op.get_attr("dots_per_inch"), "eye_separation",
              _op.get_attr("eye_separation"), "mu", _op.get_attr("mu"),
              "normalize", _op.get_attr("normalize"), "normalize_max",
              _op.get_attr("normalize_max"), "normalize_min",
              _op.get_attr("normalize_min"), "border_level",
              _op.get_attr("border_level"), "number_colors",
              _op.get_attr("number_colors"), "output_image_shape",
              _op.get_attr("output_image_shape"), "output_data_window",
              _op.get_attr("output_data_window"))
    _execute.record_gradient("SingleImageRandomDotStereograms", _inputs_flat,
                             _attrs, _result, name)
    _result, = _result
    return _result
Пример #24
0
def enter(data, frame_name, is_constant=False, parallel_iterations=10, name=None):
  r"""Creates or finds a child frame, and makes `data` available to the child frame.

  This op is used together with `Exit` to create loops in the graph.
  The unique `frame_name` is used by the `Executor` to identify frames. If
  `is_constant` is true, `output` is a constant in the child frame; otherwise
  it may be changed in the child frame. At most `parallel_iterations` iterations
  are run in parallel in the child frame.

  Args:
    data: A `Tensor`. The tensor to be made available to the child frame.
    frame_name: A `string`. The name of the child frame.
    is_constant: An optional `bool`. Defaults to `False`.
      If true, the output is constant within the child frame.
    parallel_iterations: An optional `int`. Defaults to `10`.
      The number of iterations allowed to run in parallel.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `data`.
  """
  _ctx = _context._context or _context.context()
  if _ctx is not None and _ctx._thread_local_data.is_eager:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._thread_local_data.device_name, "Enter",
        name, _ctx._post_execution_callbacks, data, "frame_name", frame_name,
        "is_constant", is_constant, "parallel_iterations",
        parallel_iterations)
      return _result
    except _core._FallbackException:
      try:
        return enter_eager_fallback(
            data, frame_name=frame_name, is_constant=is_constant,
            parallel_iterations=parallel_iterations, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
  # Add nodes to the TensorFlow graph.
  frame_name = _execute.make_str(frame_name, "frame_name")
  if is_constant is None:
    is_constant = False
  is_constant = _execute.make_bool(is_constant, "is_constant")
  if parallel_iterations is None:
    parallel_iterations = 10
  parallel_iterations = _execute.make_int(parallel_iterations, "parallel_iterations")
  _, _, _op = _op_def_lib._apply_op_helper(
        "Enter", data=data, frame_name=frame_name, is_constant=is_constant,
                 parallel_iterations=parallel_iterations, name=name)
  _result = _op.outputs[:]
  _inputs_flat = _op.inputs
  _attrs = ("T", _op.get_attr("T"), "frame_name", _op.get_attr("frame_name"),
            "is_constant", _op.get_attr("is_constant"), "parallel_iterations",
            _op.get_attr("parallel_iterations"))
  _execute.record_gradient(
      "Enter", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
Пример #25
0
def generate_vocab_remapping(new_vocab_file,
                             old_vocab_file,
                             new_vocab_offset,
                             num_new_vocab,
                             old_vocab_size=-1,
                             name=None):
    r"""Given a path to new and old vocabulary files, returns a remapping Tensor of

  length `num_new_vocab`, where `remapping[i]` contains the row number in the old

  vocabulary that corresponds to row `i` in the new vocabulary (starting at line

  `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`

  in the new vocabulary is not in the old vocabulary.  The old vocabulary is

  constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the

  default value of -1.

  

  `num_vocab_offset` enables

  use in the partitioned variable case, and should generally be set through

  examining partitioning info.  The format of the files should be a text file,

  with each line containing a single entity within the vocabulary.

  

  For example, with `new_vocab_file` a text file containing each of the following

  elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],

  `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be

  `[0, -1, 2]`.

  

  The op also returns a count of how many entries in the new vocabulary

  were present in the old vocabulary, which is used to calculate the number of

  values to initialize in a weight matrix remapping

  

  This functionality can be used to remap both row vocabularies (typically,

  features) and column vocabularies (typically, classes) from TensorFlow

  checkpoints.  Note that the partitioning logic relies on contiguous vocabularies

  corresponding to div-partitioned variables.  Moreover, the underlying remapping

  uses an IndexTable (as opposed to an inexact CuckooTable), so client code should

  use the corresponding index_table_from_file() as the FeatureColumn framework

  does (as opposed to tf.feature_to_id(), which uses a CuckooTable).

  Args:
    new_vocab_file: A `Tensor` of type `string`. Path to the new vocab file.
    old_vocab_file: A `Tensor` of type `string`. Path to the old vocab file.
    new_vocab_offset: An `int` that is `>= 0`.
      How many entries into the new vocab file to start reading.
    num_new_vocab: An `int` that is `>= 0`.
      Number of entries in the new vocab file to remap.
    old_vocab_size: An optional `int` that is `>= -1`. Defaults to `-1`.
      Number of entries in the old vocab file to consider.  If -1,

      use the entire old vocabulary.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (remapping, num_present).

    remapping: A `Tensor` of type `int64`.
    num_present: A `Tensor` of type `int32`.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        new_vocab_offset = _execute.make_int(new_vocab_offset,
                                             "new_vocab_offset")
        num_new_vocab = _execute.make_int(num_new_vocab, "num_new_vocab")
        if old_vocab_size is None:
            old_vocab_size = -1
        old_vocab_size = _execute.make_int(old_vocab_size, "old_vocab_size")
        _, _, _op = _op_def_lib._apply_op_helper(
            "GenerateVocabRemapping",
            new_vocab_file=new_vocab_file,
            old_vocab_file=old_vocab_file,
            new_vocab_offset=new_vocab_offset,
            num_new_vocab=num_new_vocab,
            old_vocab_size=old_vocab_size,
            name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("new_vocab_offset", _op.get_attr("new_vocab_offset"),
                  "num_new_vocab", _op.get_attr("num_new_vocab"),
                  "old_vocab_size", _op.get_attr("old_vocab_size"))
        _execute.record_gradient("GenerateVocabRemapping", _inputs_flat,
                                 _attrs, _result, name)
        _result = _GenerateVocabRemappingOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "GenerateVocabRemapping", name, _ctx._post_execution_callbacks,
                new_vocab_file, old_vocab_file, "new_vocab_offset",
                new_vocab_offset, "num_new_vocab", num_new_vocab,
                "old_vocab_size", old_vocab_size)
            _result = _GenerateVocabRemappingOutput._make(_result)
            return _result
        except _core._FallbackException:
            return generate_vocab_remapping_eager_fallback(
                new_vocab_file,
                old_vocab_file,
                new_vocab_offset=new_vocab_offset,
                num_new_vocab=num_new_vocab,
                old_vocab_size=old_vocab_size,
                name=name,
                ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #26
0
def try_rpc(address,
            method,
            request,
            protocol="",
            fail_fast=True,
            timeout_in_ms=0,
            name=None):
    r"""TODO: add doc.

  Args:
    address: A `Tensor` of type `string`.
    method: A `Tensor` of type `string`.
    request: A `Tensor` of type `string`.
    protocol: An optional `string`. Defaults to `""`.
    fail_fast: An optional `bool`. Defaults to `True`.
    timeout_in_ms: An optional `int`. Defaults to `0`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (response, status_code, status_message).

    response: A `Tensor` of type `string`.
    status_code: A `Tensor` of type `int32`.
    status_message: A `Tensor` of type `string`.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "TryRpc", name, _ctx.post_execution_callbacks, address, method,
                request, "protocol", protocol, "fail_fast", fail_fast,
                "timeout_in_ms", timeout_in_ms)
            _result = _TryRpcOutput._make(_result)
            return _result
        except _core._FallbackException:
            try:
                return try_rpc_eager_fallback(address,
                                              method,
                                              request,
                                              protocol=protocol,
                                              fail_fast=fail_fast,
                                              timeout_in_ms=timeout_in_ms,
                                              name=name,
                                              ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
            except (TypeError, ValueError):
                result = _dispatch.dispatch(try_rpc,
                                            address=address,
                                            method=method,
                                            request=request,
                                            protocol=protocol,
                                            fail_fast=fail_fast,
                                            timeout_in_ms=timeout_in_ms,
                                            name=name)
                if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
                    return result
                raise
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if protocol is None:
        protocol = ""
    protocol = _execute.make_str(protocol, "protocol")
    if fail_fast is None:
        fail_fast = True
    fail_fast = _execute.make_bool(fail_fast, "fail_fast")
    if timeout_in_ms is None:
        timeout_in_ms = 0
    timeout_in_ms = _execute.make_int(timeout_in_ms, "timeout_in_ms")
    try:
        _, _, _op = _op_def_lib._apply_op_helper("TryRpc",
                                                 address=address,
                                                 method=method,
                                                 request=request,
                                                 protocol=protocol,
                                                 fail_fast=fail_fast,
                                                 timeout_in_ms=timeout_in_ms,
                                                 name=name)
    except (TypeError, ValueError):
        result = _dispatch.dispatch(try_rpc,
                                    address=address,
                                    method=method,
                                    request=request,
                                    protocol=protocol,
                                    fail_fast=fail_fast,
                                    timeout_in_ms=timeout_in_ms,
                                    name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
            return result
        raise
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("protocol", _op.get_attr("protocol"), "fail_fast",
              _op.get_attr("fail_fast"), "timeout_in_ms",
              _op.get_attr("timeout_in_ms"))
    _execute.record_gradient("TryRpc", _inputs_flat, _attrs, _result, name)
    _result = _TryRpcOutput._make(_result)
    return _result
Пример #27
0
def nearest_neighbors(points, centers, k, name=None):
    r"""Selects the k nearest centers for each point.

  Rows of points are assumed to be input points. Rows of centers are assumed to be
  the list of candidate centers. For each point, the k centers that have least L2
  distance to it are computed.

  Args:
    points: A `Tensor` of type `float32`.
      Matrix of shape (n, d). Rows are assumed to be input points.
    centers: A `Tensor` of type `float32`.
      Matrix of shape (m, d). Rows are assumed to be centers.
    k: A `Tensor` of type `int64`.
      Scalar. Number of nearest centers to return for each point. If k is larger
      than m, then only m centers are returned.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (nearest_center_indices, nearest_center_distances).

    nearest_center_indices: A `Tensor` of type `int64`. Matrix of shape (n, min(m, k)). Each row contains the
      indices of the centers closest to the corresponding point, ordered by
      increasing distance.
    nearest_center_distances: A `Tensor` of type `float32`. Matrix of shape (n, min(m, k)). Each row contains the
      squared L2 distance to the corresponding center in nearest_center_indices.
  """
    _ctx = _context._context
    if _ctx is None or not _ctx._eager_context.is_eager:
        _, _, _op = _op_def_lib._apply_op_helper("NearestNeighbors",
                                                 points=points,
                                                 centers=centers,
                                                 k=k,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = None
        _execute.record_gradient("NearestNeighbors", _inputs_flat, _attrs,
                                 _result, name)
        _result = _NearestNeighborsOutput._make(_result)
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._eager_context.device_name,
                "NearestNeighbors", name, _ctx._post_execution_callbacks,
                points, centers, k)
            _result = _NearestNeighborsOutput._make(_result)
            return _result
        except _core._FallbackException:
            return nearest_neighbors_eager_fallback(points,
                                                    centers,
                                                    k,
                                                    name=name,
                                                    ctx=_ctx)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #28
0
def write_image_summary(writer,
                        step,
                        tag,
                        tensor,
                        bad_color,
                        max_images=3,
                        name=None):
    r"""TODO: add doc.

  Args:
    writer: A `Tensor` of type `resource`.
    step: A `Tensor` of type `int64`.
    tag: A `Tensor` of type `string`.
    tensor: A `Tensor`. Must be one of the following types: `uint8`, `float32`, `half`.
    bad_color: A `Tensor` of type `uint8`.
    max_images: An optional `int` that is `>= 1`. Defaults to `3`.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  """
    _ctx = _context._context or _context.context()
    if _ctx is not None and _ctx._thread_local_data.is_eager:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._context_handle, _ctx._thread_local_data.device_name,
                "WriteImageSummary", name, _ctx._post_execution_callbacks,
                writer, step, tag, tensor, bad_color, "max_images", max_images)
            return _result
        except _core._FallbackException:
            try:
                return write_image_summary_eager_fallback(
                    writer,
                    step,
                    tag,
                    tensor,
                    bad_color,
                    max_images=max_images,
                    name=name,
                    ctx=_ctx)
            except _core._SymbolicException:
                pass  # Add nodes to the TensorFlow graph.
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)
    # Add nodes to the TensorFlow graph.
    if max_images is None:
        max_images = 3
    max_images = _execute.make_int(max_images, "max_images")
    _, _, _op = _op_def_lib._apply_op_helper("WriteImageSummary",
                                             writer=writer,
                                             step=step,
                                             tag=tag,
                                             tensor=tensor,
                                             bad_color=bad_color,
                                             max_images=max_images,
                                             name=name)
    return _op
    _result = None
    return _result
Пример #29
0
def dense_to_dense_set_operation(set1, set2, set_operation, validate_indices=True, name=None):
  r"""Applies set operation along last dimension of 2 `Tensor` inputs.

  See SetOperationOp::SetOperationFromContext for values of `set_operation`.

  Output `result` is a `SparseTensor` represented by `result_indices`,
  `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
  has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
  dimension contains the result of `set_operation` applied to the corresponding
  `[0...n-1]` dimension of `set`.

  Args:
    set1: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
      `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
      Dimension `n` contains values in a set, duplicates are allowed but ignored.
    set2: A `Tensor`. Must have the same type as `set1`.
      `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
      Dimension `n` contains values in a set, duplicates are allowed but ignored.
    set_operation: A `string`.
    validate_indices: An optional `bool`. Defaults to `True`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (result_indices, result_values, result_shape).

    result_indices: A `Tensor` of type `int64`.
    result_values: A `Tensor`. Has the same type as `set1`.
    result_shape: A `Tensor` of type `int64`.
  """
  _ctx = _context._context
  if _ctx is None or not _ctx._eager_context.is_eager:
    set_operation = _execute.make_str(set_operation, "set_operation")
    if validate_indices is None:
      validate_indices = True
    validate_indices = _execute.make_bool(validate_indices, "validate_indices")
    _, _, _op = _op_def_lib._apply_op_helper(
        "DenseToDenseSetOperation", set1=set1, set2=set2,
        set_operation=set_operation, validate_indices=validate_indices,
        name=name)
    _result = _op.outputs[:]
    _inputs_flat = _op.inputs
    _attrs = ("set_operation", _op.get_attr("set_operation"),
              "validate_indices", _op.get_attr("validate_indices"), "T",
              _op.get_attr("T"))
    _execute.record_gradient(
      "DenseToDenseSetOperation", _inputs_flat, _attrs, _result, name)
    _result = _DenseToDenseSetOperationOutput._make(_result)
    return _result

  else:
    try:
      _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
        _ctx._context_handle, _ctx._eager_context.device_name,
        "DenseToDenseSetOperation", name, _ctx._post_execution_callbacks,
        set1, set2, "set_operation", set_operation, "validate_indices",
        validate_indices)
      _result = _DenseToDenseSetOperationOutput._make(_result)
      return _result
    except _core._FallbackException:
      return dense_to_dense_set_operation_eager_fallback(
          set1, set2, set_operation=set_operation,
          validate_indices=validate_indices, name=name, ctx=_ctx)
    except _core._NotOkStatusException as e:
      if name is not None:
        message = e.message + " name: " + name
      else:
        message = e.message
      _six.raise_from(_core._status_to_exception(e.code, message), None)
Пример #30
0
def unbatch(batched_tensor,
            batch_index,
            id,
            timeout_micros,
            container="",
            shared_name="",
            name=None):
    r"""Reverses the operation of Batch for a single output Tensor.

  An instance of Unbatch either receives an empty batched_tensor, in which case it
  asynchronously waits until the values become available from a concurrently
  running instance of Unbatch with the same container and shared_name, or receives
  a non-empty batched_tensor in which case it finalizes all other concurrently
  running instances and outputs its own element from the batch.

  batched_tensor: The possibly transformed output of Batch. The size of the first
   dimension should remain unchanged by the transformations for the operation to
   work.
  batch_index: The matching batch_index obtained from Batch.
  id: The id scalar emitted by Batch.
  unbatched_tensor: The Tensor corresponding to this execution.
  timeout_micros: Maximum amount of time (in microseconds) to wait to receive the
   batched input tensor associated with a given invocation of the op.
  container: Container to control resource sharing.
  shared_name: Instances of Unbatch with the same container and shared_name are
   assumed to possibly belong to the same batch. If left empty, the op name will
   be used as the shared name.

  Args:
    batched_tensor: A `Tensor`.
    batch_index: A `Tensor` of type `int64`.
    id: A `Tensor` of type `int64`.
    timeout_micros: An `int`.
    container: An optional `string`. Defaults to `""`.
    shared_name: An optional `string`. Defaults to `""`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `batched_tensor`.
  """
    _ctx = _context.context()
    if not _ctx.executing_eagerly():
        timeout_micros = _execute.make_int(timeout_micros, "timeout_micros")
        if container is None:
            container = ""
        container = _execute.make_str(container, "container")
        if shared_name is None:
            shared_name = ""
        shared_name = _execute.make_str(shared_name, "shared_name")
        _, _, _op = _op_def_lib._apply_op_helper("Unbatch",
                                                 batched_tensor=batched_tensor,
                                                 batch_index=batch_index,
                                                 id=id,
                                                 timeout_micros=timeout_micros,
                                                 container=container,
                                                 shared_name=shared_name,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("timeout_micros", _op.get_attr("timeout_micros"),
                  "container", _op.get_attr("container"), "shared_name",
                  _op.get_attr("shared_name"), "T", _op.get_attr("T"))
        _execute.record_gradient("Unbatch", _inputs_flat, _attrs, _result,
                                 name)
        _result, = _result
        return _result

    else:
        try:
            _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
                _ctx._handle, _ctx.device_name, "Unbatch", name,
                _ctx._post_execution_callbacks, batched_tensor, batch_index,
                id, "timeout_micros", timeout_micros, "container", container,
                "shared_name", shared_name)
            return _result
        except _core._FallbackException:
            return unbatch_eager_fallback(batched_tensor,
                                          batch_index,
                                          id,
                                          timeout_micros=timeout_micros,
                                          container=container,
                                          shared_name=shared_name,
                                          name=name)
        except _core._NotOkStatusException as e:
            if name is not None:
                message = e.message + " name: " + name
            else:
                message = e.message
            _six.raise_from(_core._status_to_exception(e.code, message), None)