コード例 #1
0
def piecewise_constant(x, boundaries, values, name=None):
  """ Piecewise constant from boundaries and interval values.

  Example: use a learning rate that's 1.0 for the first 100000 steps, 0.5
    for steps 100001 to 110000, and 0.1 for any additional steps.

  ```python
  global_step = tf.Variable(0, trainable=False)
  boundaries = [100000, 110000]
  values = [1.0, 0.5, 0.1]
  learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)

  # Later, whenever we perform an optimization step, we increment global_step.
  ```

  Args:
    x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
      `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
    boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
      increasing entries, and with all elements having the same type as `x`.
    values: A list of `Tensor`s or float`s or `int`s that specifies the values
      for the intervals defined by `boundaries`. It should have one more element
      than `boundaries`, and all elements should have the same type.
    name: A string. Optional name of the operation. Defaults to
      'PiecewiseConstant'.

  Returns:
    A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
    `values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
    and values[-1] when `x > boundaries[-1]`.
  """

  with ops.name_scope(name, 'PiecewiseConstant',
                      [x, boundaries, values, name]) as name:
    x = ops.convert_to_tensor(x)
    # Avoid explicit conversion to x's dtype. This could result in faulty
    # comparisons, for example if floats are converted to integers.
    boundaries = ops.convert_n_to_tensor(boundaries)
    if not all(b.dtype == x.dtype for b in boundaries):
      raise ValueError('boundaries must have the same dtype as x.')
    # TODO(rdipietro): Ensure that boundaries' elements are strictly increasing.
    values = ops.convert_n_to_tensor(values)
    if not all(v.dtype == values[0].dtype for v in values):
      raise ValueError('values must have elements all with the same dtype.')

    pred_fn_pairs = {}
    pred_fn_pairs[x <= boundaries[0]] = lambda: values[0]
    pred_fn_pairs[x > boundaries[-1]] = lambda: values[-1]
    for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
      # Need to bind v here; can do this with lambda v=v: ...
      pred = (x > low) & (x <= high)
      pred_fn_pairs[pred] = lambda v=v: v

    # The default isn't needed here because our conditions are mutually
    # exclusive and exhaustive, but tf.case requires it.
    default = lambda: values[0]
    return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
コード例 #2
0
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x))
   self.assertAllClose(x[0] * 5,
                       math_ops.accumulate_n([tf_x[0]] * 5))
コード例 #3
0
  def test_mean(self):
    m = metrics.Mean(name='my_mean')

    # check config
    self.assertEqual(m.name, 'my_mean')
    self.assertTrue(m.stateful)
    self.assertEqual(m.dtype, dtypes.float32)
    self.assertEqual(len(m.variables), 2)
    self.evaluate(variables.global_variables_initializer())

    # check initial state
    self.assertEqual(self.evaluate(m.total), 0)
    self.assertEqual(self.evaluate(m.count), 0)

    # check __call__()
    self.assertEqual(self.evaluate(m(100)), 100)
    self.assertEqual(self.evaluate(m.total), 100)
    self.assertEqual(self.evaluate(m.count), 1)

    # check update_state() and result() + state accumulation + tensor input
    update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
    self.evaluate(update_op)
    self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
    self.assertEqual(self.evaluate(m.total), 106)  # 100 + 1 + 5
    self.assertEqual(self.evaluate(m.count), 3)

    # check reset_states()
    m.reset_states()
    self.assertEqual(self.evaluate(m.total), 0)
    self.assertEqual(self.evaluate(m.count), 0)
コード例 #4
0
 def testInt(self):
   np.random.seed(54321)
   x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllEqual(x[0] * 6, math_ops.accumulate_n([tf_x[0]] * 6).eval())
コード例 #5
0
ファイル: xla_ops.py プロジェクト: whqkdhfh13/sswp
def xla_launch_eager_fallback(constants, args, resources, Tresults, function, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function xla_launch
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(resources, (list, tuple)):
    raise TypeError(
        "Expected list for 'resources' argument to "
        "'xla_launch' Op, not %r." % resources)
  _attr_Nresources = len(resources)
  if not isinstance(Tresults, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tresults' argument to "
        "'xla_launch' Op, not %r." % Tresults)
  Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults]
  _attr_Tconstants, constants = _execute.convert_to_mixed_eager_tensors(constants, _ctx)
  _attr_Targs, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
  resources = _ops.convert_n_to_tensor(resources, _dtypes.resource)
  _inputs_flat = list(constants) + list(args) + list(resources)
  _attrs = ("Tconstants", _attr_Tconstants, "Targs", _attr_Targs,
  "Nresources", _attr_Nresources, "Tresults", Tresults, "function", function)
  _result = _execute.execute(b"XlaLaunch", len(Tresults), inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "XlaLaunch", _inputs_flat, _attrs, _result, name)
  return _result
コード例 #6
0
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
コード例 #7
0
  def testBuild(self):
    graph = graph_pb2.GraphDef()
    node = graph.node.add()
    node.name = "a"
    node.op = "op0"
    node = graph.node.add()
    node.name = "b"
    node.op = "op1"
    inputs = [ops.convert_n_to_tensor([1], dtypes.int64)]
    output_types = [np.int64, np.int64]
    graph_input_node_names = ["a"]
    graph_output_node_names = ["a", "b"]
    executor_name = ""
    serialized_executor_parameters = b""
    default_graph_input_tensor_type_shapes = [[dtypes.int64, [1]]]
    default_graph_output_tensor_type_shapes = [[dtypes.int64, [1]],
                                               [dtypes.int64, [1]]]

    output_nodes = remote_fused_graph_ops.remote_fused_graph_execute(
        inputs, output_types, graph, graph_input_node_names,
        graph_output_node_names, executor_name, serialized_executor_parameters,
        default_graph_input_tensor_type_shapes,
        default_graph_output_tensor_type_shapes)
    self.assertEqual(2, len(output_nodes))
    for output_node in output_nodes:
      with self.test_session(use_gpu=False):
        output_node.eval()
コード例 #8
0
ファイル: gen_training_ops.py プロジェクト: whqkdhfh13/sswp
def grow_tree_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed, max_tree_depth, weak_learner_type, partition_ids, gains, splits, learner_config, center_bias, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function grow_tree_ensemble
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(partition_ids, (list, tuple)):
    raise TypeError(
        "Expected list for 'partition_ids' argument to "
        "'grow_tree_ensemble' Op, not %r." % partition_ids)
  _attr_num_handlers = len(partition_ids)
  if not isinstance(gains, (list, tuple)):
    raise TypeError(
        "Expected list for 'gains' argument to "
        "'grow_tree_ensemble' Op, not %r." % gains)
  if len(gains) != _attr_num_handlers:
    raise ValueError(
        "List argument 'gains' to 'grow_tree_ensemble' Op with length %d "
        "must match length %d of argument 'partition_ids'." %
        (len(gains), _attr_num_handlers))
  if not isinstance(splits, (list, tuple)):
    raise TypeError(
        "Expected list for 'splits' argument to "
        "'grow_tree_ensemble' Op, not %r." % splits)
  if len(splits) != _attr_num_handlers:
    raise ValueError(
        "List argument 'splits' to 'grow_tree_ensemble' Op with length %d "
        "must match length %d of argument 'partition_ids'." %
        (len(splits), _attr_num_handlers))
  learner_config = _execute.make_str(learner_config, "learner_config")
  center_bias = _execute.make_bool(center_bias, "center_bias")
  tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
  stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
  next_stamp_token = _ops.convert_to_tensor(next_stamp_token, _dtypes.int64)
  learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32)
  dropout_seed = _ops.convert_to_tensor(dropout_seed, _dtypes.int64)
  max_tree_depth = _ops.convert_to_tensor(max_tree_depth, _dtypes.int32)
  weak_learner_type = _ops.convert_to_tensor(weak_learner_type, _dtypes.int32)
  partition_ids = _ops.convert_n_to_tensor(partition_ids, _dtypes.int32)
  gains = _ops.convert_n_to_tensor(gains, _dtypes.float32)
  splits = _ops.convert_n_to_tensor(splits, _dtypes.string)
  _inputs_flat = [tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed, max_tree_depth, weak_learner_type] + list(partition_ids) + list(gains) + list(splits)
  _attrs = ("learner_config", learner_config, "num_handlers",
  _attr_num_handlers, "center_bias", center_bias)
  _result = _execute.execute(b"GrowTreeEnsemble", 0, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _result = None
  return _result
コード例 #9
0
 def testFloat(self):
     np.random.seed(12345)
     x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
     tf_x = ops.convert_n_to_tensor(x)
     with self.test_session(use_gpu=True):
         self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
         self.assertAllClose(x[0] * 5,
                             math_ops.accumulate_n([tf_x[0]] * 5).eval())
コード例 #10
0
 def testInt(self):
     np.random.seed(54321)
     x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
     tf_x = ops.convert_n_to_tensor(x)
     with self.test_session(use_gpu=True):
         self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
         self.assertAllEqual(x[0] * 6,
                             math_ops.accumulate_n([tf_x[0]] * 6).eval())
コード例 #11
0
def grow_tree_ensemble_eager_fallback(tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed, partition_ids, gains, splits, learner_config, center_bias, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function grow_tree_ensemble
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(partition_ids, (list, tuple)):
    raise TypeError(
        "Expected list for 'partition_ids' argument to "
        "'grow_tree_ensemble' Op, not %r." % partition_ids)
  _attr_num_handlers = len(partition_ids)
  if not isinstance(gains, (list, tuple)):
    raise TypeError(
        "Expected list for 'gains' argument to "
        "'grow_tree_ensemble' Op, not %r." % gains)
  if len(gains) != _attr_num_handlers:
    raise ValueError(
        "List argument 'gains' to 'grow_tree_ensemble' Op with length %d "
        "must match length %d of argument 'partition_ids'." %
        (len(gains), _attr_num_handlers))
  if not isinstance(splits, (list, tuple)):
    raise TypeError(
        "Expected list for 'splits' argument to "
        "'grow_tree_ensemble' Op, not %r." % splits)
  if len(splits) != _attr_num_handlers:
    raise ValueError(
        "List argument 'splits' to 'grow_tree_ensemble' Op with length %d "
        "must match length %d of argument 'partition_ids'." %
        (len(splits), _attr_num_handlers))
  learner_config = _execute.make_str(learner_config, "learner_config")
  center_bias = _execute.make_bool(center_bias, "center_bias")
  tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource)
  stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64)
  next_stamp_token = _ops.convert_to_tensor(next_stamp_token, _dtypes.int64)
  learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32)
  dropout_seed = _ops.convert_to_tensor(dropout_seed, _dtypes.int64)
  partition_ids = _ops.convert_n_to_tensor(partition_ids, _dtypes.int32)
  gains = _ops.convert_n_to_tensor(gains, _dtypes.float32)
  splits = _ops.convert_n_to_tensor(splits, _dtypes.string)
  _inputs_flat = [tree_ensemble_handle, stamp_token, next_stamp_token, learning_rate, dropout_seed] + list(partition_ids) + list(gains) + list(splits)
  _attrs = ("learner_config", learner_config, "num_handlers",
  _attr_num_handlers, "center_bias", center_bias)
  _result = _execute.execute(b"GrowTreeEnsemble", 0, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _result = None
  return _result
コード例 #12
0
    def decayed_lr(x, boundaries, values, name):
        """Helper to recompute learning rate; most helpful in eager-mode."""
        with ops.name_scope(name, "PiecewiseConstant",
                            [x, boundaries, values, name]) as name:
            boundaries = ops.convert_n_to_tensor(boundaries)
            values = ops.convert_n_to_tensor(values)
            x_recomp = ops.convert_to_tensor(x)
            # Avoid explicit conversion to x's dtype. This could result in faulty
            # comparisons, for example if floats are converted to integers.
            for i, b in enumerate(boundaries):
                if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
                    # We can promote int32 boundaries to int64 without loss of precision.
                    # This covers the most common case where the user passes in boundaries
                    # as an array of Python integers.
                    if (b.dtype.base_dtype == dtypes.int32
                            and x_recomp.dtype.base_dtype == dtypes.int64):
                        b = math_ops.cast(b, x_recomp.dtype.base_dtype)
                        boundaries[i] = b
                    else:
                        raise ValueError(
                            "Boundaries (%s) must have the same dtype as x (%s)."
                            % (b.dtype.base_dtype, x_recomp.dtype.base_dtype))
            # TODO(rdipietro): Ensure that boundaries' elements strictly increases.
            for v in values[1:]:
                if v.dtype.base_dtype != values[0].dtype.base_dtype:
                    raise ValueError(
                        "Values must have elements all with the same dtype (%s vs %s)."
                        % (values[0].dtype.base_dtype, v.dtype.base_dtype))
            pred_fn_pairs = []
            pred_fn_pairs.append(
                (x_recomp <= boundaries[0], lambda: values[0]))
            pred_fn_pairs.append(
                (x_recomp > boundaries[-1], lambda: values[-1]))
            for low, high, v in zip(boundaries[:-1], boundaries[1:],
                                    values[1:-1]):
                # Need to bind v here; can do this with lambda v=v: ...
                pred = (x_recomp > low) & (x_recomp <= high)
                pred_fn_pairs.append((pred, lambda v=v: v))

            # The default isn't needed here because our conditions are mutually
            # exclusive and exhaustive, but tf.case requires it.
            default = lambda: values[0]
            return control_flow_ops.case(pred_fn_pairs,
                                         default,
                                         exclusive=True)
コード例 #13
0
 def testFloat(self):
   np.random.seed(12345)
   for num_inputs in range(1, 10):
     x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(num_inputs)]
     tf_x = ops.convert_n_to_tensor(x)
     with test_util.use_gpu():
       self.assertAllClose(sum(x), math_ops.add_n(tf_x))
       self.assertAllClose(x[0] * num_inputs,
                           math_ops.add_n([tf_x[0]] * num_inputs))
コード例 #14
0
 def testFloat(self):
   np.random.seed(12345)
   for num_inputs in range(1, 10):
     x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(num_inputs)]
     tf_x = ops.convert_n_to_tensor(x)
     with self.test_session(use_gpu=True):
       self.assertAllClose(sum(x), math_ops.add_n(tf_x).eval())
       self.assertAllClose(x[0] * num_inputs,
                           math_ops.add_n([tf_x[0]] * num_inputs).eval())
コード例 #15
0
 def testFloat(self):
   np.random.seed(12345)
   for num_inputs in range(1, 10):
     x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(num_inputs)]
     tf_x = ops.convert_n_to_tensor(x)
     with self.cached_session(use_gpu=True):
       self.assertAllClose(sum(x), math_ops.add_n(tf_x).eval())
       self.assertAllClose(x[0] * num_inputs,
                           math_ops.add_n([tf_x[0]] * num_inputs).eval())
コード例 #16
0
ファイル: math_ops_test.py プロジェクト: govindap/tensorflow
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   for u in tf_x:
     print("shape=%s" % u.get_shape())
   with self.test_session():
     self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
コード例 #17
0
ファイル: math_ops_test.py プロジェクト: igorbb/tensorflow-1
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   for u in tf_x:
     print("shape=%s" % u.get_shape())
   with self.test_session():
     self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
     self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
コード例 #18
0
def ragged_cross_eager_fallback(ragged_values, ragged_row_splits, sparse_indices, sparse_values, sparse_shape, dense_inputs, input_order, hashed_output, num_buckets, hash_key, out_values_type, out_row_splits_type, name, ctx):
  if not isinstance(sparse_indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'sparse_indices' argument to "
        "'ragged_cross' Op, not %r." % sparse_indices)
  _attr_Nsparse = len(sparse_indices)
  if not isinstance(sparse_shape, (list, tuple)):
    raise TypeError(
        "Expected list for 'sparse_shape' argument to "
        "'ragged_cross' Op, not %r." % sparse_shape)
  if len(sparse_shape) != _attr_Nsparse:
    raise ValueError(
        "List argument 'sparse_shape' to 'ragged_cross' Op with length %d "
        "must match length %d of argument 'sparse_indices'." %
        (len(sparse_shape), _attr_Nsparse))
  input_order = _execute.make_str(input_order, "input_order")
  hashed_output = _execute.make_bool(hashed_output, "hashed_output")
  num_buckets = _execute.make_int(num_buckets, "num_buckets")
  hash_key = _execute.make_int(hash_key, "hash_key")
  out_values_type = _execute.make_type(out_values_type, "out_values_type")
  out_row_splits_type = _execute.make_type(out_row_splits_type, "out_row_splits_type")
  _attr_ragged_values_types, ragged_values = _execute.convert_to_mixed_eager_tensors(ragged_values, ctx)
  _attr_ragged_splits_types, ragged_row_splits = _execute.convert_to_mixed_eager_tensors(ragged_row_splits, ctx)
  _attr_sparse_values_types, sparse_values = _execute.convert_to_mixed_eager_tensors(sparse_values, ctx)
  _attr_dense_types, dense_inputs = _execute.convert_to_mixed_eager_tensors(dense_inputs, ctx)
  sparse_indices = _ops.convert_n_to_tensor(sparse_indices, _dtypes.int64)
  sparse_shape = _ops.convert_n_to_tensor(sparse_shape, _dtypes.int64)
  _inputs_flat = list(ragged_values) + list(ragged_row_splits) + list(sparse_indices) + list(sparse_values) + list(sparse_shape) + list(dense_inputs)
  _attrs = ("Nsparse", _attr_Nsparse, "input_order", input_order,
  "hashed_output", hashed_output, "num_buckets", num_buckets, "hash_key",
  hash_key, "ragged_values_types", _attr_ragged_values_types,
  "ragged_splits_types", _attr_ragged_splits_types, "sparse_values_types",
  _attr_sparse_values_types, "dense_types", _attr_dense_types,
  "out_values_type", out_values_type, "out_row_splits_type",
  out_row_splits_type)
  _result = _execute.execute(b"RaggedCross", 2, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "RaggedCross", _inputs_flat, _attrs, _result)
  _result = _RaggedCrossOutput._make(_result)
  return _result
コード例 #19
0
 def testInt(self):
     np.random.seed(54321)
     for num_inputs in range(1, 10):
         x = [
             np.random.randint(-128, 128, (5, 4, 3, 2, 1))
             for _ in range(num_inputs)
         ]
         tf_x = ops.convert_n_to_tensor(x)
         with test_util.use_gpu():
             self.assertAllEqual(sum(x), math_ops.add_n(tf_x))
             self.assertAllEqual(x[0] * num_inputs,
                                 math_ops.add_n([tf_x[0]] * num_inputs))
コード例 #20
0
 def testInt(self):
   np.random.seed(54321)
   for num_inputs in range(1, 10):
     x = [
         np.random.randint(-128, 128, (5, 4, 3, 2, 1))
         for _ in range(num_inputs)
     ]
     tf_x = ops.convert_n_to_tensor(x)
     with self.cached_session(use_gpu=True):
       self.assertAllEqual(sum(x), math_ops.add_n(tf_x).eval())
       self.assertAllEqual(x[0] * num_inputs,
                           math_ops.add_n([tf_x[0]] * num_inputs).eval())
コード例 #21
0
 def testInt(self):
   np.random.seed(54321)
   for num_inputs in range(1, 10):
     x = [
         np.random.randint(-128, 128, (5, 4, 3, 2, 1))
         for _ in range(num_inputs)
     ]
     tf_x = ops.convert_n_to_tensor(x)
     with self.test_session(use_gpu=True):
       self.assertAllEqual(sum(x), math_ops.add_n(tf_x).eval())
       self.assertAllEqual(x[0] * num_inputs,
                           math_ops.add_n([tf_x[0]] * num_inputs).eval())
コード例 #22
0
def _merge_summary(inputs, name=None):
    r"""Merges summaries.

  This op creates a

  [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)

  protocol buffer that contains the union of all the values in the input

  summaries.

  

  When the Op is run, it reports an `InvalidArgument` error if multiple values

  in the summaries to merge use the same tag.

  Args:
    inputs: A list of at least 1 `Tensor` objects with type `string`.
      Can be of any shape.  Each must contain serialized `Summary` protocol

      buffers.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`. Scalar. Serialized `Summary` protocol buffer.
  """
    if not isinstance(inputs, (list, tuple)):
        raise TypeError("Expected list for 'inputs' argument to "
                        "'merge_summary' Op, not %r." % inputs)
    _attr_N = len(inputs)
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("MergeSummary",
                                                 inputs=inputs,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("N", _op.get_attr("N"))
    else:
        inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string)
        _inputs_flat = list(inputs)
        _attrs = ("N", _attr_N)
        _result = _execute.execute(b"MergeSummary",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("MergeSummary", _inputs_flat, _attrs, _result,
                             name)
    _result, = _result
    return _result
コード例 #23
0
  def decayed_lr(x, boundaries, values, name):
    """Helper to recompute learning rate; most helpful in eager-mode."""
    with ops.name_scope(name, "PiecewiseConstant",
                        [x, boundaries, values, name]) as name:
      boundaries = ops.convert_n_to_tensor(boundaries)
      values = ops.convert_n_to_tensor(values)
      x_recomp = ops.convert_to_tensor(x)
      # Avoid explicit conversion to x's dtype. This could result in faulty
      # comparisons, for example if floats are converted to integers.
      for i, b in enumerate(boundaries):
        if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
          # We can promote int32 boundaries to int64 without loss of precision.
          # This covers the most common case where the user passes in boundaries
          # as an array of Python integers.
          if (b.dtype.base_dtype == dtypes.int32 and
              x_recomp.dtype.base_dtype == dtypes.int64):
            b = math_ops.cast(b, x_recomp.dtype.base_dtype)
            boundaries[i] = b
          else:
            raise ValueError(
                "Boundaries (%s) must have the same dtype as x (%s)." %
                (b.dtype.base_dtype, x_recomp.dtype.base_dtype))
      # TODO(rdipietro): Ensure that boundaries' elements strictly increases.
      for v in values[1:]:
        if v.dtype.base_dtype != values[0].dtype.base_dtype:
          raise ValueError(
              "Values must have elements all with the same dtype (%s vs %s)." %
              (values[0].dtype.base_dtype, v.dtype.base_dtype))
      pred_fn_pairs = []
      pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
      pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
      for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
        # Need to bind v here; can do this with lambda v=v: ...
        pred = (x_recomp > low) & (x_recomp <= high)
        pred_fn_pairs.append((pred, lambda v=v: v))

      # The default isn't needed here because our conditions are mutually
      # exclusive and exhaustive, but tf.case requires it.
      default = lambda: values[0]
      return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
コード例 #24
0
def sparse_feature_cross_v2_eager_fallback(indices, values, shapes, dense, hashed_output, num_buckets, hash_key, out_type, internal_type, name=None):
  r"""This is the slowpath function for Eager mode.
  This is for function sparse_feature_cross_v2
  """
  _ctx = _context.context()
  if not isinstance(indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'indices' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % indices)
  _attr_N = len(indices)
  if not isinstance(shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'shapes' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % shapes)
  if len(shapes) != _attr_N:
    raise ValueError(
        "List argument 'shapes' to 'sparse_feature_cross_v2' Op with length %d "
        "must match length %d of argument 'indices'." %
        (len(shapes), _attr_N))
  hashed_output = _execute.make_bool(hashed_output, "hashed_output")
  num_buckets = _execute.make_int(num_buckets, "num_buckets")
  hash_key = _execute.make_int(hash_key, "hash_key")
  out_type = _execute.make_type(out_type, "out_type")
  internal_type = _execute.make_type(internal_type, "internal_type")
  _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
  _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(dense, _ctx)
  indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
  shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
  _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense)
  _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
  num_buckets, "hash_key", hash_key, "sparse_types", _attr_sparse_types,
  "dense_types", _attr_dense_types, "out_type", out_type, "internal_type",
  internal_type)
  _result = _execute.execute(b"SparseFeatureCrossV2", 3, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "SparseFeatureCrossV2", _inputs_flat, _attrs, _result, name)
  _result = _SparseFeatureCrossV2Output._make(_result)
  return _result
コード例 #25
0
def sparse_feature_cross_v2_eager_fallback(indices, values, shapes, dense, hashed_output, num_buckets, hash_key, out_type, internal_type, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function sparse_feature_cross_v2
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(indices, (list, tuple)):
    raise TypeError(
        "Expected list for 'indices' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % indices)
  _attr_N = len(indices)
  if not isinstance(shapes, (list, tuple)):
    raise TypeError(
        "Expected list for 'shapes' argument to "
        "'sparse_feature_cross_v2' Op, not %r." % shapes)
  if len(shapes) != _attr_N:
    raise ValueError(
        "List argument 'shapes' to 'sparse_feature_cross_v2' Op with length %d "
        "must match length %d of argument 'indices'." %
        (len(shapes), _attr_N))
  hashed_output = _execute.make_bool(hashed_output, "hashed_output")
  num_buckets = _execute.make_int(num_buckets, "num_buckets")
  hash_key = _execute.make_int(hash_key, "hash_key")
  out_type = _execute.make_type(out_type, "out_type")
  internal_type = _execute.make_type(internal_type, "internal_type")
  _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, _ctx)
  _attr_dense_types, dense = _execute.convert_to_mixed_eager_tensors(dense, _ctx)
  indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
  shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
  _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense)
  _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
  num_buckets, "hash_key", hash_key, "sparse_types", _attr_sparse_types,
  "dense_types", _attr_dense_types, "out_type", out_type, "internal_type",
  internal_type)
  _result = _execute.execute(b"SparseFeatureCrossV2", 3, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "SparseFeatureCrossV2", _inputs_flat, _attrs, _result, name)
  _result = _SparseFeatureCrossV2Output._make(_result)
  return _result
コード例 #26
0
  def __call__(self, step):
    with ops.name_scope_v2(self.name or "PiecewiseConstant"):
      boundaries = ops.convert_n_to_tensor(self.boundaries)
      values = ops.convert_n_to_tensor(self.values)
      x_recomp = ops.convert_to_tensor_v2(step)
      for i, b in enumerate(boundaries):
        if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
          # We cast the boundaries to have the same type as the step
          b = math_ops.cast(b, x_recomp.dtype.base_dtype)
          boundaries[i] = b
      pred_fn_pairs = []
      pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
      pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
      for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
        # Need to bind v here; can do this with lambda v=v: ...
        pred = (x_recomp > low) & (x_recomp <= high)
        pred_fn_pairs.append((pred, lambda v=v: v))

      # The default isn't needed here because our conditions are mutually
      # exclusive and exhaustive, but tf.case requires it.
      default = lambda: values[0]
      return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
コード例 #27
0
    def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
        keep_prob = 1.0 - rate
        x = ops.convert_n_to_tensor(x, name="x")
        if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
            raise ValueError(
                "keep_prob must be a scalar tensor or a float in the range"
                "(0, 1], got %g" % keep_prob)
        keep_prob = ops.convert_n_to_tensor(keep_prob,
                                            dtype=x.dtype,
                                            name="keep_prob")
        keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
        alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        if tensor_util.constant_value(keep_prob) == 1:
            return x

        noise_shape = noise_shape if noise_shape is not None else array_ops.shape(
            x)
        random_tensor = keep_prob
        random_tensor += random_ops.random_uniform(noise_shape,
                                                   seed=seed,
                                                   dtype=x.dtype)
        binary_tensor = math_ops.floor(random_tensor)
        ret = x * binary_tensor + alpha * (1 - binary_tensor)

        a = math_ops.sqrt(
            fixedPointVar /
            (keep_prob *
             ((1 - keep_prob) * math_ops.pow(alpha - fixedPointMean, 2) +
              fixedPointVar)))

        b = fixedPointMean - a * (keep_prob * fixedPointMean +
                                  (1 - keep_prob) * alpha)
        ret = a * ret + b
        ret.set_shape(x.get_shape())
        return ret
コード例 #28
0
def dense_table_init_eager_fallback(vars, table_handle, name, ctx):
  if not isinstance(vars, (list, tuple)):
    raise TypeError(
        "Expected list for 'vars' argument to "
        "'dense_table_init' Op, not %r." % vars)
  _attr_N = len(vars)
  table_handle = _execute.make_int(table_handle, "table_handle")
  vars = _ops.convert_n_to_tensor(vars, _dtypes.resource)
  _inputs_flat = list(vars)
  _attrs = ("table_handle", table_handle, "N", _attr_N)
  _result = _execute.execute(b"DenseTableInit", 0, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  _result = None
  return _result
コード例 #29
0
def string_join(inputs, separator="", name=None):
    r"""Joins the strings in the given list of string tensors into one tensor;

  with the given separator (default is an empty separator).

  Args:
    inputs: A list of at least 1 `Tensor` objects with type `string`.
      A list of string tensors.  The tensors must all have the same shape,

      or be scalars.  Scalars may be mixed in; these will be broadcast to the shape

      of non-scalar inputs.
    separator: An optional `string`. Defaults to `""`.
      string, an optional join separator.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `string`.
  """
    if not isinstance(inputs, (list, tuple)):
        raise TypeError("Expected list for 'inputs' argument to "
                        "'string_join' Op, not %r." % inputs)
    _attr_N = len(inputs)
    if separator is None:
        separator = ""
    separator = _execute.make_str(separator, "separator")
    _ctx = _context.context()
    if _ctx.in_graph_mode():
        _, _, _op = _op_def_lib._apply_op_helper("StringJoin",
                                                 inputs=inputs,
                                                 separator=separator,
                                                 name=name)
        _result = _op.outputs[:]
        _inputs_flat = _op.inputs
        _attrs = ("N", _op.get_attr("N"), "separator",
                  _op.get_attr("separator"))
    else:
        inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string)
        _inputs_flat = list(inputs)
        _attrs = ("N", _attr_N, "separator", separator)
        _result = _execute.execute(b"StringJoin",
                                   1,
                                   inputs=_inputs_flat,
                                   attrs=_attrs,
                                   ctx=_ctx,
                                   name=name)
    _execute.record_gradient("StringJoin", _inputs_flat, _attrs, _result, name)
    _result, = _result
    return _result
コード例 #30
0
def dense_table_push_pull_eager_fallback(vars, grads, table_handle, name, ctx):
  if not isinstance(vars, (list, tuple)):
    raise TypeError(
        "Expected list for 'vars' argument to "
        "'dense_table_push_pull' Op, not %r." % vars)
  _attr_N = len(vars)
  if not isinstance(grads, (list, tuple)):
    raise TypeError(
        "Expected list for 'grads' argument to "
        "'dense_table_push_pull' Op, not %r." % grads)
  if len(grads) != _attr_N:
    raise ValueError(
        "List argument 'grads' to 'dense_table_push_pull' Op with length %d "
        "must match length %d of argument 'vars'." %
        (len(grads), _attr_N))
  table_handle = _execute.make_int(table_handle, "table_handle")
  vars = _ops.convert_n_to_tensor(vars, _dtypes.resource)
  grads = _ops.convert_n_to_tensor(grads, _dtypes.float32)
  _inputs_flat = list(vars) + list(grads)
  _attrs = ("table_handle", table_handle, "N", _attr_N)
  _result = _execute.execute(b"DenseTablePushPull", 0, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  _result = None
  return _result
コード例 #31
0
def merge_summary_eager_fallback(inputs, name, ctx):
  if not isinstance(inputs, (list, tuple)):
    raise TypeError(
        "Expected list for 'inputs' argument to "
        "'merge_summary' Op, not %r." % inputs)
  _attr_N = len(inputs)
  inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string)
  _inputs_flat = list(inputs)
  _attrs = ("N", _attr_N)
  _result = _execute.execute(b"MergeSummary", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "MergeSummary", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result
コード例 #32
0
    def apply_op(self, op_type_name, name=None, **keywords):
        # pylint: disable=g-doc-args
        """Add a node invoking a registered Op to a graph.

    Example usage:
       # input1 and input2 can be Tensors or anything ops.convert_to_tensor()
       # will convert to a Tensor.
       op_def_library.apply_op("op", input1=input1, input2=input2)
       # Can specify a node name.
       op_def_library.apply_op("op", input1=input1, name="node_name")
       # Must use keyword arguments, with the names specified in the OpDef.
       op_def_library.apply_op("op", input_name=input, attr_name=attr)

    All attrs must either be inferred from an input or specified.
    (If inferred, the attr must not be specified.)  If an attr has a default
    value specified in the Op's OpDef, then you may pass None as the value
    of that attr to get the default.

    Args:
      op_type_name: string. Must match the name field of a registered Op.
      name: string. Optional name of the created op.
      **keywords: input Tensor and attr arguments specified by name,
        and optional parameters to pass when constructing the Operation.

    Returns:
      The Tensor(s) representing the output of the operation, or the Operation
      itself if there are no outputs.

    Raises:
      RuntimeError: On some errors.
      TypeError: On some errors.
      ValueError: On some errors.
    """
        output_structure, is_stateful, op = self._apply_op_helper(
            op_type_name, name, **keywords)
        if output_structure:
            outputs = op.outputs
            res = _Restructure(ops.convert_n_to_tensor(outputs),
                               output_structure)
            if isinstance(res, list) and not res and is_stateful:
                return op
            else:
                return res
        else:
            return op
コード例 #33
0
  def apply_op(self, op_type_name, name=None, **keywords):
    # pylint: disable=g-doc-args
    """Add a node invoking a registered Op to a graph.

    Example usage:
       # input1 and input2 can be Tensors or anything ops.convert_to_tensor()
       # will convert to a Tensor.
       op_def_library.apply_op("op", input1=input1, input2=input2)
       # Can specify a node name.
       op_def_library.apply_op("op", input1=input1, name="node_name")
       # Must use keyword arguments, with the names specified in the OpDef.
       op_def_library.apply_op("op", input_name=input, attr_name=attr)

    All attrs must either be inferred from an input or specified.
    (If inferred, the attr must not be specified.)  If an attr has a default
    value specified in the Op's OpDef, then you may pass None as the value
    of that attr to get the default.

    Args:
      op_type_name: string. Must match the name field of a registered Op.
      name: string. Optional name of the created op.
      **keywords: input Tensor and attr arguments specified by name,
        and optional parameters to pass when constructing the Operation.

    Returns:
      The Tensor(s) representing the output of the operation, or the Operation
      itself if there are no outputs.

    Raises:
      RuntimeError: On some errors.
      TypeError: On some errors.
      ValueError: On some errors.
    """
    output_structure, is_stateful, op = self._apply_op_helper(
        op_type_name, name, **keywords)
    if output_structure:
      outputs = op.outputs
      res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
      if isinstance(res, list) and not res and is_stateful:
        return op
      else:
        return res
    else:
      return op
コード例 #34
0
def merge_summary_eager_fallback(inputs, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function merge_summary
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(inputs, (list, tuple)):
    raise TypeError(
        "Expected list for 'inputs' argument to "
        "'merge_summary' Op, not %r." % inputs)
  _attr_N = len(inputs)
  inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string)
  _inputs_flat = list(inputs)
  _attrs = ("N", _attr_N)
  _result = _execute.execute(b"MergeSummary", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "MergeSummary", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
コード例 #35
0
def ragged_tensor_to_sparse_eager_fallback(rt_nested_splits, rt_dense_values, name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function ragged_tensor_to_sparse
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(rt_nested_splits, (list, tuple)):
    raise TypeError(
        "Expected list for 'rt_nested_splits' argument to "
        "'ragged_tensor_to_sparse' Op, not %r." % rt_nested_splits)
  _attr_RAGGED_RANK = len(rt_nested_splits)
  _attr_T, (rt_dense_values,) = _execute.args_to_matching_eager([rt_dense_values], _ctx)
  rt_nested_splits = _ops.convert_n_to_tensor(rt_nested_splits, _dtypes.int64)
  _inputs_flat = list(rt_nested_splits) + [rt_dense_values]
  _attrs = ("RAGGED_RANK", _attr_RAGGED_RANK, "T", _attr_T)
  _result = _execute.execute(b"RaggedTensorToSparse", 3, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "RaggedTensorToSparse", _inputs_flat, _attrs, _result, name)
  _result = _RaggedTensorToSparseOutput._make(_result)
  return _result
コード例 #36
0
def string_join_eager_fallback(inputs, separator="", name=None, ctx=None):
  r"""This is the slowpath function for Eager mode.
  This is for function string_join
  """
  _ctx = ctx if ctx else _context.context()
  if not isinstance(inputs, (list, tuple)):
    raise TypeError(
        "Expected list for 'inputs' argument to "
        "'string_join' Op, not %r." % inputs)
  _attr_N = len(inputs)
  if separator is None:
    separator = ""
  separator = _execute.make_str(separator, "separator")
  inputs = _ops.convert_n_to_tensor(inputs, _dtypes.string)
  _inputs_flat = list(inputs)
  _attrs = ("N", _attr_N, "separator", separator)
  _result = _execute.execute(b"StringJoin", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=_ctx, name=name)
  _execute.record_gradient(
      "StringJoin", _inputs_flat, _attrs, _result, name)
  _result, = _result
  return _result
コード例 #37
0
def xla_launch_eager_fallback(constants, args, resources, Tresults, function, name, ctx):
  if not isinstance(resources, (list, tuple)):
    raise TypeError(
        "Expected list for 'resources' argument to "
        "'xla_launch' Op, not %r." % resources)
  _attr_Nresources = len(resources)
  if not isinstance(Tresults, (list, tuple)):
    raise TypeError(
        "Expected list for 'Tresults' argument to "
        "'xla_launch' Op, not %r." % Tresults)
  Tresults = [_execute.make_type(_t, "Tresults") for _t in Tresults]
  _attr_Tconstants, constants = _execute.convert_to_mixed_eager_tensors(constants, ctx)
  _attr_Targs, args = _execute.convert_to_mixed_eager_tensors(args, ctx)
  resources = _ops.convert_n_to_tensor(resources, _dtypes.resource)
  _inputs_flat = list(constants) + list(args) + list(resources)
  _attrs = ("Tconstants", _attr_Tconstants, "Targs", _attr_Targs,
  "Nresources", _attr_Nresources, "Tresults", Tresults, "function", function)
  _result = _execute.execute(b"XlaLaunch", len(Tresults), inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "XlaLaunch", _inputs_flat, _attrs, _result)
  return _result
コード例 #38
0
    def test_mean(self):
        m = metrics.Mean(name='my_mean')

        # check config
        self.assertEqual(m.name, 'my_mean')
        self.assertTrue(m.stateful)
        self.assertEqual(m.dtype, dtypes.float32)
        self.assertEqual(len(m.variables), 2)
        self.evaluate(variables.variables_initializer(m.variables))

        # check initial state
        self.assertEqual(self.evaluate(m.total), 0)
        self.assertEqual(self.evaluate(m.count), 0)

        # check __call__()
        self.assertEqual(self.evaluate(m(100)), 100)
        self.assertEqual(self.evaluate(m.total), 100)
        self.assertEqual(self.evaluate(m.count), 1)

        # check update_state() and result() + state accumulation + tensor input
        update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
        self.evaluate(update_op)
        self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
        self.assertEqual(self.evaluate(m.total), 106)  # 100 + 1 + 5
        self.assertEqual(self.evaluate(m.count), 3)

        # check reset_states()
        m.reset_states()
        self.assertEqual(self.evaluate(m.total), 0)
        self.assertEqual(self.evaluate(m.count), 0)

        # Check save and restore config
        m2 = metrics.Mean.from_config(m.get_config())
        self.assertEqual(m2.name, 'my_mean')
        self.assertTrue(m2.stateful)
        self.assertEqual(m2.dtype, dtypes.float32)
        self.assertEqual(len(m2.variables), 2)
コード例 #39
0
def ragged_gather_eager_fallback(params_nested_splits,
                                 params_dense_values,
                                 indices,
                                 OUTPUT_RAGGED_RANK,
                                 name=None,
                                 ctx=None):
    r"""This is the slowpath function for Eager mode.
  This is for function ragged_gather
  """
    _ctx = ctx if ctx else _context.context()
    if not isinstance(params_nested_splits, (list, tuple)):
        raise TypeError("Expected list for 'params_nested_splits' argument to "
                        "'ragged_gather' Op, not %r." % params_nested_splits)
    _attr_PARAMS_RAGGED_RANK = len(params_nested_splits)
    OUTPUT_RAGGED_RANK = _execute.make_int(OUTPUT_RAGGED_RANK,
                                           "OUTPUT_RAGGED_RANK")
    _attr_Tvalues, (params_dense_values, ) = _execute.args_to_matching_eager(
        [params_dense_values], _ctx)
    _attr_Tindices, (indices, ) = _execute.args_to_matching_eager([indices],
                                                                  _ctx)
    params_nested_splits = _ops.convert_n_to_tensor(params_nested_splits,
                                                    _dtypes.int64)
    _inputs_flat = list(params_nested_splits) + [params_dense_values, indices]
    _attrs = ("Tvalues", _attr_Tvalues, "Tindices", _attr_Tindices,
              "PARAMS_RAGGED_RANK", _attr_PARAMS_RAGGED_RANK,
              "OUTPUT_RAGGED_RANK", OUTPUT_RAGGED_RANK)
    _result = _execute.execute(b"RaggedGather",
                               OUTPUT_RAGGED_RANK + 1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("RaggedGather", _inputs_flat, _attrs, _result,
                             name)
    _result = [_result[:OUTPUT_RAGGED_RANK]] + _result[OUTPUT_RAGGED_RANK:]
    _result = _RaggedGatherOutput._make(_result)
    return _result
コード例 #40
0
ファイル: readers.py プロジェクト: jfreedman0/tensorflow
  def __init__(self,
               filenames,
               record_defaults,
               buffer_size=None,
               header=False,
               field_delim=",",
               use_quote_delim=True,
               na_value="",
               select_cols=None):
    """Creates a `CsvDataset` by reading and decoding CSV files.

    The elements of this dataset correspond to records from the file(s).
    RFC 4180 format is expected for CSV files
    (https://tools.ietf.org/html/rfc4180)
    Note that we allow leading and trailing spaces with int or float field.


    For example, suppose we have a file 'my_file0.csv' with four CSV columns of
    different data types:
    ```
    abcdefg,4.28E10,5.55E6,12
    hijklmn,-5.3E14,,2
    ```

    We can construct a CsvDataset from it as follows:
    ```python
    dataset = tf.contrib.data.CsvDataset(
      "my_file*.csv",
      [tf.float32,  # Required field, use dtype or empty tensor
       tf.constant([0.0], dtype=tf.float32),  # Optional field, default to 0.0
       tf.int32,  # Required field, use dtype or empty tensor
       ],
      select_cols=[1,2,3]  # Only parse last three columns
    )
    ```

    The expected output of its iterations is:
    ```python
    next = dataset.make_one_shot_iterator().get_next()
    with tf.Session() as sess:
      while True:
        try:
          print(sess.run(nxt))
        except tf.errors.OutOfRangeError:
          break

    >> (4.28e10, 5.55e6, 12)
    >> (-5.3e14, 0.0, 2)
    ```

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      record_defaults: A list of default values for the CSV fields. Each item in
        the list is either a valid CSV `DType` (float32, float64, int32, int64,
        string), or a `Tensor` object with one of the above types. One per
        column of CSV data, with either a scalar `Tensor` default value for the
        column if it is optional, or `DType` or empty `Tensor` if required. If
        both this and `select_columns` are specified, these must have the same
        lengths, and `column_defaults` is assumed to be sorted in order of
        increasing column index.
      buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
        to buffer while reading files. Defaults to 4MB.
      header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
        have header line(s) that should be skipped when parsing. Defaults to
        `False`.
      field_delim: (Optional.) A `tf.string` scalar containing the delimiter
        character that separates fields in a record. Defaults to `","`.
      use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats
        double quotation marks as regular characters inside of string fields
        (ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`.
      na_value: (Optional.) A `tf.string` scalar indicating a value that will
        be treated as NA/NaN.
      select_cols: (Optional.) A sorted list of column indices to select from
        the input data. If specified, only this subset of columns will be
        parsed. Defaults to parsing all columns.
    """
    super(CsvDataset, self).__init__()
    self._filenames = ops.convert_to_tensor(
        filenames, dtype=dtypes.string, name="filenames")
    record_defaults = [
        constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
        for x in record_defaults
    ]
    self._record_defaults = ops.convert_n_to_tensor(
        record_defaults, name="record_defaults")
    self._buffer_size = convert.optional_param_to_tensor(
        "buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
    self._header = ops.convert_to_tensor(
        header, dtype=dtypes.bool, name="header")
    self._field_delim = ops.convert_to_tensor(
        field_delim, dtype=dtypes.string, name="field_delim")
    self._use_quote_delim = ops.convert_to_tensor(
        use_quote_delim, dtype=dtypes.bool, name="use_quote_delim")
    self._na_value = ops.convert_to_tensor(
        na_value, dtype=dtypes.string, name="na_value")
    self._select_cols = convert.optional_param_to_tensor(
        "select_cols",
        select_cols,
        argument_default=[],
        argument_dtype=dtypes.int64,
    )
    self._output_shapes = tuple(
        tensor_shape.scalar() for _ in range(len(record_defaults)))
    self._output_types = tuple(d.dtype for d in self._record_defaults)
    self._output_classes = tuple(
        ops.Tensor for _ in range(len(record_defaults)))
コード例 #41
0
  def apply_op(self, op_type_name, name=None, **keywords):
    # pylint: disable=g-doc-args
    """Add a node invoking a registered Op to a graph.

    Example usage:
       # input1 and input2 can be Tensors or anything ops.convert_to_tensor()
       # will convert to a Tensor.
       op_def_library.apply_op("op", input1=input1, input2=input2)
       # Can specify a node name.
       op_def_library.apply_op("op", input1=input1, name="node_name")
       # Must use keyword arguments, with the names specified in the OpDef.
       op_def_library.apply_op("op", input_name=input, attr_name=attr)

    All attrs must either be inferred from an input or specified.
    (If inferred, the attr must not be specified.)  If an attr has a default
    value specified in the Op's OpDef, then you may pass None as the value
    of that attr to get the default.

    Args:
      op_type_name: string. Must match the name field of a registered Op.
      name: string. Optional name of the created op.
      **keywords: input Tensor and attr arguments specified by name,
        and optional parameters to pass when constructing the Operation.

    Returns:
      The Tensor(s) representing the output of the operation, or the Operation
      itself if there are no outputs.

    Raises:
      RuntimeError: On some errors.
      TypeError: On some errors.
      ValueError: On some errors.
    """
    op_info = self._ops.get(op_type_name, None)
    if op_info is None:
      raise RuntimeError("Unrecognized Op name " + op_type_name)
    op_def = op_info.op_def

    # Determine the graph context.
    try:
      # Need to flatten all the arguments into a list.
      # pylint: disable=protected-access
      g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
      # pyline: enable=protected-access
    except AssertionError as e:
      raise RuntimeError(
          "Cannot determine graph for Op '%s' due to: %s"
          % (op_type_name, e.message))

    # Default name if not specified.
    if name is None:
      name = op_type_name

    # Check for deprecation
    deprecation_version = op_def.deprecation.version
    if deprecation_version:
      producer = g.graph_def_versions.producer
      if producer >= deprecation_version:
        raise NotImplementedError(
            ("Op %s is not available in GraphDef version %d. "
             "It has been removed in version %d. %s.") %
            (op_type_name, producer, deprecation_version,
             op_def.deprecation.explanation))

    # Fill in the list of default types for all "type" attrs.  This
    # will be used to choose a preferred dtype to convert to in the
    # absence of input type information.
    #
    # TODO(b/31302892): Currently the defaults don't work in the right
    # way if you have two inputs, one of whose type resolution depends
    # on the other.  Handling this will require restructuring this code
    # significantly.
    default_type_attr_map = {}
    for attr_def in op_def.attr:
      if attr_def.type != "type":
        continue
      key = attr_def.name
      if attr_def.HasField("default_value"):
        default_type_attr_map[key] = dtypes.as_dtype(
            attr_def.default_value.type)

    # Requires that op_def has passed validation (using the C++
    # ValidateOpDef() from ../framework/op_def_util.h).
    attrs = {}
    inputs = []
    input_types = []
    with g.as_default(), ops.name_scope(name) as scope:

      # Perform input type inference
      inferred_from = {}
      for input_arg in op_def.input_arg:
        input_name = input_arg.name
        if input_name in keywords:
          values = keywords.pop(input_name)
        elif input_name + "_" in keywords:
          # Handle the case where the name is a keyword or built-in
          # for Python so we use the name + _ instead.
          input_name += "_"
          values = keywords.pop(input_name)
        else:
          raise TypeError("No argument for input " + input_name)

        # Goals:
        # * Convert values to Tensors if it contains constants.
        # * Verify that values is a list if that matches the input_arg's
        #   type.
        # * If the input_arg's type is determined by attrs, either set
        #   those attrs and validate those attr values are legal (if
        #   they have not yet been set) or validate the input matches
        #   the type indicated by the attrs (if they have already been
        #   inferred via an earlier input).
        # * If the input_arg has an explicit type, make sure the input
        #   conforms.

        if _IsListParameter(input_arg):
          if not _IsListValue(values):
            raise TypeError(
                "Expected list for '%s' argument to '%s' Op, not %s." %
                (input_name, op_type_name, values))
          # In cases where we expect all elements of the list to have the
          # same dtype, try to cast non-Tensor elements to that type.
          dtype = None
          default_dtype = None
          if input_arg.type != types_pb2.DT_INVALID:
            dtype = input_arg.type
          elif input_arg.number_attr:
            if input_arg.type_attr in attrs:
              dtype = attrs[input_arg.type_attr]
            else:
              for t in values:
                if isinstance(t, ops.Tensor):
                  dtype = t.dtype
                  break

            # dtype still not found, prefer using the default dtype
            # from the attr.
            if dtype is None and input_arg.type_attr in default_type_attr_map:
              default_dtype = default_type_attr_map[input_arg.type_attr]

          try:
            if not input_arg.is_ref and dtype:
              dtype = dtypes.as_dtype(dtype).base_dtype
            values = ops.convert_n_to_tensor(
                values,
                name=input_arg.name,
                dtype=dtype if dtype else None,
                preferred_dtype=default_dtype,
                as_ref=input_arg.is_ref)
            if input_arg.number_attr and len(
                set(v.dtype.base_dtype for v in values)) > 1:
              raise TypeError()  # All types should match.
          except (TypeError, ValueError):
            # What types does the conversion function think values have?
            observed_types = []
            for value in values:
              try:
                converted_value = ops.convert_to_tensor(
                    value, as_ref=input_arg.is_ref)
                observed_types.append(converted_value.dtype.base_dtype.name)
              except (TypeError, ValueError):
                observed_types.append("<NOT CONVERTIBLE TO TENSOR>")
            observed = ", ".join(observed_types)

            prefix = (
                "Tensors in list passed to '%s' of '%s' Op have types [%s]" %
                (input_name, op_type_name, observed))
            if input_arg.number_attr:
              if input_arg.type != types_pb2.DT_INVALID:
                raise TypeError("%s that do not match expected type %s." %
                                (prefix, dtype.name))
              elif input_arg.type_attr in attrs:
                raise TypeError("%s that do not match type %s inferred from "
                                "earlier arguments." %
                                (prefix, dtype.name))
              else:
                raise TypeError("%s that don't all match." % prefix)
            else:
              raise TypeError("%s that are invalid." % prefix)

          types = [x.dtype for x in values]
          inputs.extend(values)
        else:
          # In cases where we have an expected type, try to convert non-Tensor
          # arguments to that type.
          dtype = None
          default_dtype = None
          if input_arg.type != types_pb2.DT_INVALID:
            dtype = input_arg.type
          elif input_arg.type_attr in attrs:
            dtype = attrs[input_arg.type_attr]
          elif input_arg.type_attr in default_type_attr_map:
            # The dtype could not be inferred solely from the inputs,
            # so we prefer the attr's default, so code that adds a new attr
            # with a default is backwards compatible.
            default_dtype = default_type_attr_map[input_arg.type_attr]

          try:
            values = ops.convert_to_tensor(
                values,
                name=input_arg.name,
                dtype=dtype,
                as_ref=input_arg.is_ref,
                preferred_dtype=default_dtype)
          except ValueError:
            # What type does convert_to_tensor think it has?
            observed = ops.convert_to_tensor(values,
                                             as_ref=input_arg.is_ref).dtype.name
            prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
                      (input_name, op_type_name, observed))
            if input_arg.type != types_pb2.DT_INVALID:
              raise TypeError("%s expected type of %s." %
                              (prefix, dtypes.as_dtype(input_arg.type).name))
            else:
              # Update the maps with the default, if needed.
              k = input_arg.type_attr
              if k in default_type_attr_map:
                if k not in attrs:
                  attrs[k] = default_type_attr_map[k]
                  if k not in inferred_from:
                    inferred_from[k] = "Default in OpDef"

              raise TypeError(
                  "%s type %s of argument '%s'." %
                  (prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
                   inferred_from[input_arg.type_attr]))

          types = [values.dtype]
          inputs.append(values)
        base_types = [x.base_dtype for x in types]

        if input_arg.number_attr:
          # <number-attr> * <type> or <number-attr> * <type-attr>
          if input_arg.number_attr in attrs:
            if len(values) != attrs[input_arg.number_attr]:
              raise ValueError(
                  "List argument '%s' to '%s' Op with length %d must match "
                  "length %d of argument '%s'." %
                  (input_name, op_type_name, len(values),
                   attrs[input_arg.number_attr],
                   inferred_from[input_arg.number_attr]))
          else:
            attrs[input_arg.number_attr] = len(values)
            inferred_from[input_arg.number_attr] = input_name
            num_attr = _Attr(op_def, input_arg.number_attr)
            if num_attr.has_minimum and len(values) < num_attr.minimum:
              raise ValueError(
                  "List argument '%s' to '%s' Op with length %d shorter "
                  "than minimum length %d." %
                  (input_name, op_type_name, len(values), num_attr.minimum))
          # All tensors must have the same base type.
          if any([bt != base_types[0] for bt in base_types]):
            raise TypeError(
                "All tensors passed to '%s' of '%s' Op "
                "must have the same type." %
                (input_name, op_type_name))
          if input_arg.type != types_pb2.DT_INVALID:
            # <number-attr> * <type> case
            if base_types and base_types[0] != input_arg.type:
              assert False, "Unreachable"
          elif input_arg.type_attr in attrs:
            # <number-attr> * <type-attr> case, where <type-attr> already
            # has an inferred value.
            if base_types and base_types[0] != attrs[input_arg.type_attr]:
              assert False, "Unreachable"
          else:
            # <number-attr> * <type-attr> case, where we are now setting
            # the <type-attr> based on this input
            if not base_types:
              raise TypeError(
                  "Don't know how to infer type variable from empty input "
                  "list passed to input '%s' of '%s' Op." %
                  (input_name, op_type_name))
            attrs[input_arg.type_attr] = base_types[0]
            inferred_from[input_arg.type_attr] = input_name
            type_attr = _Attr(op_def, input_arg.type_attr)
            _SatisfiesTypeConstraint(base_types[0], type_attr)
        elif input_arg.type_attr:
          # <type-attr>
          attr_value = base_types[0]
          if input_arg.type_attr in attrs:
            if attrs[input_arg.type_attr] != attr_value:
              assert False, "Unreachable"
          else:
            for base_type in base_types:
              _SatisfiesTypeConstraint(base_type,
                                       _Attr(op_def, input_arg.type_attr))
            attrs[input_arg.type_attr] = attr_value
            inferred_from[input_arg.type_attr] = input_name
        elif input_arg.type_list_attr:
          # <type-list-attr>
          attr_value = base_types
          if input_arg.type_list_attr in attrs:
            if attrs[input_arg.type_list_attr] != attr_value:
              raise TypeError(
                  "Input '%s' of '%s' Op has type list of %s that does not "
                  "match type list %s of argument '%s'." %
                  (input_name, op_type_name,
                   ", ".join(dtypes.as_dtype(x).name for x in attr_value),
                   ", ".join(dtypes.as_dtype(x).name
                             for x in attrs[input_arg.type_list_attr]),
                   inferred_from[input_arg.type_list_attr]))
          else:
            for base_type in base_types:
              _SatisfiesTypeConstraint(base_type,
                                       _Attr(op_def, input_arg.type_list_attr))
            attrs[input_arg.type_list_attr] = attr_value
            inferred_from[input_arg.type_list_attr] = input_name
        else:
          # single Tensor with specified type
          if base_types[0] != input_arg.type:
            assert False, "Unreachable"

        if input_arg.is_ref:
          if not all(x.is_ref_dtype for x in types):
            raise TypeError(
                "Input '%s' of '%s' Op requires l-value input" %
                (input_name, op_type_name))
          input_types.extend(types)
        else:
          input_types.extend(base_types)

      # Process remaining attrs
      for attr in op_def.attr:
        # Skip attrs that have already had their values inferred
        if attr.name in attrs:
          if attr.name in keywords:
            raise TypeError(
                "Should not specify value for inferred attr '%s'." % attr.name)
          continue
        if attr.name in keywords:
          attrs[attr.name] = keywords.pop(attr.name)
        elif attr.name + "_" in keywords:
          # Attrs whose names match Python keywords have an extra '_'
          # appended, so we must check for that as well.
          attrs[attr.name] = keywords.pop(attr.name + "_")
        else:
          raise TypeError("No argument for attr " + attr.name)

      # Convert attr values to AttrValue protos.
      attr_protos = {}
      for attr_def in op_def.attr:
        key = attr_def.name
        value = attrs[key]
        attr_value = attr_value_pb2.AttrValue()
        if attr_def.HasField("default_value") and value is None:
          attr_value.CopyFrom(attr_def.default_value)
          attr_protos[key] = attr_value
          continue
        if attr_def.type.startswith("list("):
          if not _IsListValue(value):
            raise TypeError("Expected list for attr " + key)
          if attr_def.has_minimum:
            if len(value) < attr_def.minimum:
              raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
                               "less than minimum %d." %
                               (key, op_type_name, len(value),
                                attr_def.minimum))
          attr_value.list.SetInParent()
        if attr_def.type == "string":
          attr_value.s = _MakeStr(value, key)
          if attr_def.HasField("allowed_values"):
            if attr_value.s not in attr_def.allowed_values.list.s:
              raise ValueError(
                  "Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
                  (key, op_type_name, compat.as_text(attr_value.s),
                   '", "'.join(map(compat.as_text,
                                   attr_def.allowed_values.list.s))))
        elif attr_def.type == "list(string)":
          attr_value.list.s.extend([_MakeStr(x, key) for x in value])
          if attr_def.HasField("allowed_values"):
            for x in attr_value.list.s:
              if x not in attr_def.allowed_values.list.s:
                raise ValueError(
                    "Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
                    (key, op_type_name, compat.as_text(x),
                     '", "'.join(map(compat.as_text,
                                     attr_def.allowed_values.list.s))))
        elif attr_def.type == "int":
          attr_value.i = _MakeInt(value, key)
          if attr_def.has_minimum:
            if attr_value.i < attr_def.minimum:
              raise ValueError(
                  "Attr '%s' of '%s' Op passed %d less than minimum %d." %
                  (key, op_type_name, attr_value.i, attr_def.minimum))
        elif attr_def.type == "list(int)":
          attr_value.list.i.extend([_MakeInt(x, key) for x in value])
        elif attr_def.type == "float":
          attr_value.f = _MakeFloat(value, key)
        elif attr_def.type == "list(float)":
          attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
        elif attr_def.type == "bool":
          attr_value.b = _MakeBool(value, key)
        elif attr_def.type == "list(bool)":
          attr_value.list.b.extend([_MakeBool(x, key) for x in value])
        elif attr_def.type == "type":
          attr_value.type = _MakeType(value, attr_def)
        elif attr_def.type == "list(type)":
          attr_value.list.type.extend(
              [_MakeType(x, attr_def) for x in value])
        elif attr_def.type == "shape":
          attr_value.shape.CopyFrom(_MakeShape(value, key))
        elif attr_def.type == "list(shape)":
          attr_value.list.shape.extend(
              [_MakeShape(x, key) for x in value])
        elif attr_def.type == "tensor":
          attr_value.tensor.CopyFrom(_MakeTensor(value, key))
        elif attr_def.type == "list(tensor)":
          attr_value.list.tensor.extend(
              [_MakeTensor(x, key) for x in value])
        elif attr_def.type == "func":
          if isinstance(value, compat.bytes_or_text_types):
            attr_value.func.name = value
          else:
            value.add_to_graph(ops.get_default_graph())
            attr_value.func.name = value.name
        else:
          raise TypeError("Unrecognized Attr type " + attr_def.type)

        attr_protos[key] = attr_value
      del attrs  # attrs is no longer authoritative, use attr_protos instead

      # Determine output types (possibly using attrs)
      output_types = []
      output_structure = []
      for arg in op_def.output_arg:
        types = []
        if arg.number_attr:
          n = _AttrValue(attr_protos, arg.number_attr).i
          if arg.type_attr:
            types = [_AttrValue(attr_protos, arg.type_attr).type] * n
          else:
            types = [arg.type] * n
          output_structure.append(n)
        elif arg.type_attr:
          t = _AttrValue(attr_protos, arg.type_attr)
          types = [t.type]
          output_structure.append(None)
        elif arg.type_list_attr:
          t = _AttrValue(attr_protos, arg.type_list_attr)
          types = t.list.type
          output_structure.append(len(types))
        else:
          types = [arg.type]
          output_structure.append(None)
        if arg.is_ref:
          types = [dtypes.as_dtype(x).as_ref for x in types]
        output_types.extend(types)

      if keywords:
        raise TypeError("apply_op() got unexpected keyword arguments: " +
                        ", ".join(sorted(keywords.keys())))

      # NOTE(mrry): We add an explicit colocation constraint between
      # the newly created op and any of its reference-typed inputs.
      must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
                              if arg.is_ref]
      with _MaybeColocateWith(must_colocate_inputs):
        # Add Op to graph
        op = g.create_op(op_type_name, inputs, output_types, name=scope,
                         input_types=input_types, attrs=attr_protos,
                         op_def=op_def)
        if output_structure:
          outputs = op.outputs
          res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
          if isinstance(res, list) and not res and op_def.is_stateful:
            return op
          else:
            return res
        else:
          return op
コード例 #42
0
def pipeline(computational_stages,
             pipeline_depth=None,
             gradient_accumulation_count=None,
             repeat_count=1,
             batch_serialization_iterations=1,
             inputs=None,
             infeed_queue=None,
             outfeed_queue=None,
             optimizer_function=None,
             device_mapping=None,
             pipeline_schedule=None,
             forward_propagation_stages_poplar_options=None,
             backward_propagation_stages_poplar_options=None,
             weight_update_poplar_options=None,
             offload_weight_update_variables=None,
             replicated_optimizer_state_sharding=False,
             offload_activations=None,
             offload_gradient_accumulation_buffers=None,
             replicated_weight_sharding=None,
             offload_weights=None,
             continuous_weight_updates=False,
             outfeed_loss=False,
             name=None):
  """
  Sets up a series of computational stages, where the outputs of one stage are
  the inputs to the next one. These stages are then executed in parallel across
  multiple IPUs. This approach can be used to split the model where layer(s)
  are executed on different IPUs.

  The first stage takes the `inputs` and the `infeed_queue` (if provided) as
  its inputs. If the `infeed_queue` is provided, it is automatically dequeued
  (similar to the ipu.loops API) therefore care needs to be taken to make sure
  the signature of the first pipeline stage matches both the arguments from
  `inputs` and the `infeed_queue`, otherwise an error is thrown.

  All tensors which are used in the pipeline which are not TensorFlow
  Variables need to be explicitly passed as inputs to the pipeline. If an
  input does not change its value during the execution of the pipeline op
  (for example hyperparameters such as learning rate), it needs to be passed
  as part of `inputs`. Alternatively, if these values change during execution
  (for example the model processes different batches of data) the input should
  be passed through the `infeed_queue`
  (see :class:`~tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQueue`).

  When training a model, an optional `optimizer_function` function can be
  provided. This function takes all the outputs from the last computational
  stage as inputs, and returns an instance of `OptimizerFunctionOutput` that
  is used to generate the backwards pass of the model using the TensorFlow
  Optimizer API. This will internally create corresponding backpropagation
  pipeline stages for each pipeline stage and colocate them such that the
  activations and weights required for the gradient calculation and
  application stay on the device in order to minimise the number of copies
  between IPUs.

  Note that the gradients, which are calculated by the `compute_gradients`
  function, will be accumulated automatically during the execution of the
  pipeline, unless `continuous_weight_updates` is enabled.

  If the last computational stage has any outputs, then an `outfeed_queue`
  (see :class:`~tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue`)
  is required and all the outputs from the last computational stage are enqueued
  to the `outfeed_queue`.

  Note that pipelining also supports recomputation, to enable it, use the
  `tensorflow.ipu.utils.set_recomputation_options()` function when configuring
  the device.

  For example a simple inference network for the MNIST can be split across two
  IPUs:

  .. code-block:: python

    from tensorflow import keras

    # Create the dataset
    #...

    # Create the data queues from/to IPU.
    infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, "infeed")
    outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("outfeed")

    # Create a pipelined model which is split accross two stages.
    def stage1(image):
      partial = keras.layers.Dense(256, activation=tf.nn.relu)(image)
      partial = keras.layers.Dense(128, activation=tf.nn.relu)(partial)
      return partial

    def stage2(partial):
      logits = keras.layers.Dense(10)(partial)
      probabilities = tf.nn.softmax(logits)
      classes = tf.argmax(input=logits, axis=1)
      return probabilities, classes

    def model():
      with variable_scope.variable_scope("vs", use_resource=True):
        pipeline_op = pipelining_ops.pipeline(
                          computational_stages=[stage1, stage2],
                          gradient_accumulation_count=250,
                          repeat_count=2,
                          inputs=[],
                          infeed_queue=infeed_queue,
                          outfeed_queue=outfeed_queue,
                          device_mapping=[3,1],
                          name="Pipeline")
      return pipeline_op

    with ops.device("/device:IPU:0"):
      compiled_model = ipu_compiler.compile(model, inputs=[])

    outfeed_op = outfeed_queue.dequeue()
    with tf.Session() as sess:
      result = sess.run(compiled_model)
      probabilities, classes = sess.run(outfeed_op)

  In this set up, the model is split across two IPUs. By default the first two
  layers would be executed on the first IPU and the third layer and the
  probabilities and classes on the second IPU but here `device_mapping` is
  used to override the default IPU allocation and instead the first two layers
  will be executed on the fourth IPU and the third layer and the probabilities
  and classed on the second IPU.

  This creates a pipeline of depth 250 (specified by the
  `gradient_accumulation_count`), which means each pipeline stage is executed
  250 times.

  This pipeline is then executed 2 times (specified by the `repeat_count`)
  The results of the pipeline (probabilities and classes) are returned to the
  host by the outfeed queue.

  We can also train this network by providing `optimizer_function`:

  .. code-block:: python

    from tensorflow import keras

    # Create the dataset
    #...

    # Create the data queues from/to IPU.
    infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, "infeed")
    outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue("outfeed")

    # Create a pipelined model which is split accross two stages.
    def stage1(lr, images, labels):
      partial = keras.layers.Dense(256, activation=tf.nn.relu)(images)
      partial = keras.layers.Dense(128, activation=tf.nn.relu)(partial)
      return lr, partial, labels

    def stage2(lr, partial, labels):
      logits = keras.layers.Dense(10)(partial)
      cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                            labels=labels, logits=logits)
      loss = tf.reduce_mean(cross_entropy)
      return lr, loss

    def optimizer_function(lr, loss):
      optimizer = tf.train.GradientDescentOptimizer(lr)
      return pipelining_ops.OptimizerFunctionOutput(optimizer, loss)

    def model(lr):
      with variable_scope.variable_scope("vs", use_resource=True):
        pipeline_op = pipelining_ops.pipeline(
                          computational_stages=[stage1, stage2],
                          gradient_accumulation_count=128,
                          repeat_count=10,
                          inputs=[lr],
                          infeed_queue=infeed_queue,
                          outfeed_queue=outfeed_queue,
                          optimizer_function=optimizer_function,
                          name="Pipeline")
      return pipeline_op

    with ops.device('cpu'):
      lr = tf.placeholder(np.float16, [])

    with ops.device("/device:IPU:0"):
      compiled_model = ipu_compiler.compile(model, inputs=[lr])

    outfeed_op = outfeed_queue.dequeue()
    with tf.Session() as sess:
      result = sess.run(compiled_model, {lr: 0.01})
      losses = sess.run(outfeed_op)

  Here the `tf.train.GradientDescentOptimizer` generates the pipeline stages
  which calculate the gradients and apply them to the weights. Note how the
  loss is returned to the host by the outfeed queue.

  If a model requires multiple computational pipeline stages to access the same
  `tf.Variable`, then all of these computational stages need to be placed on the
  same IPU using the `device_mapping` argument.

  Note that modifying `tf.Variable` values in a pipeline stage and/or during the
  gradient calculation will result in undefined behavior. These variables can
  only be modified by the `apply_gradients` member function of the applied
  Optimizer.

  Args:
    computational_stages: a list of python functions, where each function
      represents a computational pipeline stage. The function takes the
      outputs of the previous pipeline state as its inputs.
    gradient_accumulation_count: the number of times each pipeline stage will
      be executed.
    repeat_count: the number of times the pipeline will be executed.
    batch_serialization_iterations: number of times a loop executes to compute a
      batch on each pipeline stage execution. Currently only supported with the
      `PipelineSchedule.Sequential`.
    inputs: arguments passed to the first pipeline stage.
    infeed_queue: optional IPUInfeedQueue, if passed, it is dequeued and
      passed as an input in the first pipeline stage.
    outfeed_queue: IPUOutfeedQueue, required if the last computational stage
      has any outputs. The outputs of these are enqueued to this queue and
      they can be accessed on the host.
    optimizer_function: optional Python function which takes the output of the
      last computational stage as parameters and returns an instance of
      `pipelining_ops.OptimizerFunctionOutput` in order to generate the
      back-propagation and weight-update parts of the model suitable for
      training.
    device_mapping: If provided, a list of length equal to the number of
      computational stages. An element at index `i` in the list represents which
      IPU the computational stage `computational_stages[i]` should reside on.
      This can be used to make sure computational stages which share
      `tf.Variable`s are resident on the same IPU.
    pipeline_schedule: Which scheduling algorithm to use for pipeline
      lowering. Defaults to `PipelineSchedule.Grouped`.
    forward_propagation_stages_poplar_options: If provided, a list of length
      equal to the number of computational stages. Each element is a
      PipelineStageOptions object which allows for fine grain control of the
      Poplar options for a given forward propagation computational stage.
    backward_propagation_stages_poplar_options: If provided, a list of length
      equal to the number of computational stages. Each element is a
      PipelineStageOptions object which allows for fine grained control of the
      Poplar options for a given backward propagation computational stage.
    weight_update_poplar_options: If provided, a PipelineStageOptions object
      which allows for fine grained control of the Poplar options for the
      weight update stage.
    offload_weight_update_variables: When enabled, any `tf.Variable` which is
      only used by the weight update of the pipeline (for example the
      accumulator variable when using the `tf.MomentumOptimizer`), will be
      stored in the remote memory. During the weight update this variable will
      be streamed onto the device and then streamed back to the remote memory
      after it has been updated. Requires the machine to be configured with
      support for `Poplar remote buffers`. Offloading variables into remote
      memory can reduce maximum memory liveness, but can also increase the
      computation time of the weight update.
      When set to `None` the variables will be placed in either in-processor or
      remote memory automatically based on the current best placement strategy.
      Note that this option has no effect for inference only pipelines.
    replicated_optimizer_state_sharding: If True, any `tf.Variable` which is
      offloaded (for example the accumulator variable when using the
      `tf.MomentumOptimizer`), will be partitioned across the replicas. This
      can exploit the additional bandwidth of the IPU-Links to improve overall
      throughput.
      Note that this option has no effect for inference only pipelines.
    offload_activations: When enabled, all the activations for the batches which
      are not being executed by the pipeline stages at the given time are stored
      in remote memory. Requires the machine to be configured with support for
      `Poplar remote buffers`. Offloading activations into remote memory can
      reduce maximum memory liveness, but can also increase the computation time
      as activations have to be copied from/to the device(s).
      When set to `None`, the activations might be offloaded when beneficial.
      This feature is currently only supported when the pipeline schedule is
      `PipelineSchedule.Sequential` and `batch_serialization_iterations > 1`.
    offload_gradient_accumulation_buffers: When enabled, all the gradient
      accumulation buffers are stored in remote memory. Offloading gradient
      accumulation buffers into remote memory can reduce maximum memory
      liveness, but can also increase the computation time as the buffers have
      to be copied to the device, updated and the copied off the device.
      Requires the machine to be configured with support for `Poplar remote
      buffers`.
      When set to `None`, the `offload_gradient_accumulation_buffers` might be
      offloaded when beneficial.
      Note that this option has no effect for inference only pipelines.
    replicated_weight_sharding: When enabled and running a replicated model, any
      `tf.Variable`s used by the pipeline stage computations (excluding those
      only used by the weight update), will be partitioned across the replicas.
      Whenever the a partitioned `tf.Variable` is accessed, it will be first
      all-gathered across replicas to make sure each replica has access to the
      whole `tf.Variable`. This can exploit the additional bandwidth of the
      IPU-Links to improve overall throughput.
      When set to `None`, the activations might be offloaded when beneficial.
      This feature is enabled by default when the pipeline schedule is
      `PipelineSchedule.Sequential` and `batch_serialization_iterations > 1`,
      where this option can reduce the memory usage at the cost of extra
      communication.
    offload_weights: When enabled and `replicated_weight_sharding` is enabled,
      any `tf.Variable` which are partitioned across replicas will be stored in
      `Poplar remote buffers`.  Offloading variables into remote memory can
      further reduce maximum memory liveness, but can also increase the
      computation time due to extra communication. When set to `None` the
      variables will be placed in either in-processor or remote memory
      automatically based on the current best placement strategy.
    continuous_weight_updates: ** CURRENTLY UNIMPLEMENTED ** When training,
      this option will apply the gradients to the resource variables
      immediately, rather than accumulating the gradients and applying them
      at the end of each execution of the pipeline.
    outfeed_loss: If True, the loss given by the `optimizer_function` will
      be enqueued on the outfeed, instead of the outputs from the last
      computational stage.
    name: name of this pipeline.

  Returns:
    An `Operation` that executes the pipeline.

  """
  name = name if name else "pipeline"

  if pipeline_depth:
    gradient_accumulation_count = pipeline_depth

  if not gradient_accumulation_count:
    raise ValueError("gradient_accumulation_count must be specified.")

  # Ensure inputs is a list, without casting inputs to a boolean. Casting
  # a tf.Tensor to a boolean will be interpreted as an operation in the
  # graph by Autograph.
  inputs = inputs if not isinstance(inputs, type(None)) else []
  inputs = functional_ops._convert_to_list(inputs)  # pylint: disable=protected-access
  inputs = ops.convert_n_to_tensor(inputs)

  if continuous_weight_updates:
    raise NotImplementedError(
        "Continuous weight updates are currently not supported.")

  for i, input in enumerate(inputs):
    if input.dtype == dtypes.resource:
      logging.warn("Passing tensor {} by value.".format(str(input)))
      inputs[i] = input.value()

  if pipeline_schedule is None:
    pipeline_schedule = (PipelineSchedule.Sequential
                         if batch_serialization_iterations > 1 else
                         PipelineSchedule.Grouped)

  if not isinstance(pipeline_schedule, PipelineSchedule):
    raise TypeError("The given pipeline_schedule is not a member of the "
                    "PipelineSchedule enumeration.")

  if (batch_serialization_iterations > 1
      and pipeline_schedule != PipelineSchedule.Sequential):
    raise NotImplementedError("Batch serialization is only supported with the "
                              "`Sequential` schedule.")

  if offload_activations and (
      batch_serialization_iterations < 2
      or pipeline_schedule != PipelineSchedule.Sequential):
    raise NotImplementedError("Activation offloading is only supported with "
                              "the `Sequential` schedule and when "
                              "`batch_serialization_iterations > 1`.")

  if device_mapping is None:
    device_mapping = [0] * len(
        computational_stages) if batch_serialization_iterations > 1 else list(
            range(len(computational_stages)))

  if not isinstance(computational_stages, (list, tuple)):
    raise TypeError(
        "computational_stages argument needs to be a list or a tuple.")

  if infeed_queue:
    if not isinstance(infeed_queue, ipu_infeed_queue.IPUInfeedQueue):
      raise TypeError("infeed_queue is not an instance of "
                      "ipu_infeed_queue.IPUInfeedQueue")

  if outfeed_queue:
    if not isinstance(outfeed_queue, ipu_outfeed_queue.IPUOutfeedQueue):
      raise TypeError("outfeed_queue is not an instance of "
                      "ipu_outfeed_queue.IPUOutfeedQueue")

  # We expect at least one stage.
  if len(computational_stages) < 2:
    raise ValueError("Pipeline requires at least two computational stages.")

  if not isinstance(device_mapping, (list, tuple)):
    raise TypeError("device_mapping argument needs to be a list or a tuple.")

  if len(device_mapping) != len(computational_stages):
    raise ValueError(
        "Each stage must be mapped to an IPU: %d mappings != %d stages" %
        (len(device_mapping), len(computational_stages)))

  # TODO(T18660) interleaved schedule does not support multiple stages on the
  # same IPU during training.
  if pipeline_schedule == PipelineSchedule.Interleaved and len(
      device_mapping) != len(set(device_mapping)) and optimizer_function:
    raise NotImplementedError(
        "The pipelining schedule 'Interleaved' does not currently support "
        "multiple pipeline stages on the same device for training graphs. "
        "Please use a different pipeline schedule.")

  if (pipeline_schedule == PipelineSchedule.Sequential
      and batch_serialization_iterations > 1
      and len(set(device_mapping)) != 1):
    raise NotImplementedError(
        "When using batch serialization, all the pipeline stages need to be "
        "mapped to a single IPU.")

  def bool_to_three_state(value, default=None):
    if value is None:
      return default if default else backend_config_pb2.ThreeState.Name(
          backend_config_pb2.THREESTATE_UNDEFINED)
    elif value:
      return backend_config_pb2.ThreeState.Name(
          backend_config_pb2.THREESTATE_ON)
    return backend_config_pb2.ThreeState.Name(
        backend_config_pb2.THREESTATE_OFF)

  # Convert some of the binary options into three states.
  offload_weight_update_variables = bool_to_three_state(
      offload_weight_update_variables)
  replicated_optimizer_state_sharding = bool_to_three_state(
      replicated_optimizer_state_sharding,
      default=offload_weight_update_variables)
  offload_activations = bool_to_three_state(offload_activations)
  offload_gradient_accumulation_buffers = bool_to_three_state(
      offload_gradient_accumulation_buffers)
  replicated_weight_sharding = bool_to_three_state(replicated_weight_sharding)
  offload_weights = bool_to_three_state(offload_weights,
                                        default=replicated_weight_sharding)

  # Function for setting up and validating the per stage Poplar options.
  def validate_stage_options_and_populate_proto(stages_poplar_options,
                                                proto_list, name):
    if stages_poplar_options is None:
      stages_poplar_options = [
          PipelineStageOptions() for i in range(len(computational_stages))
      ]

    if not isinstance(stages_poplar_options, (list, tuple)):
      raise TypeError(
          "%s must be a list or a tuple of PipelineStageOptions objects." %
          (name))

    if len(stages_poplar_options) != len(computational_stages):
      raise ValueError(
          "%s must be a list or a tuple of PipelineStageOptions objects of "
          "length %d (same number as the number of computational stages) but "
          "is %d." %
          (name, len(computational_stages), len(stages_poplar_options)))

    for stage_options in stages_poplar_options:
      if not isinstance(stage_options, PipelineStageOptions):
        raise TypeError(
            "Expected all elements of %s to be of type PipelineStageOptions, "
            "but got %s instead." % (name, str(stage_options)))

    for stage_options in stages_poplar_options:
      proto_list.append(stage_options.get_proto())

  pipeline_poplar_config = pipeline_config_pb2.PipelinePoplarConfig()

  validate_stage_options_and_populate_proto(
      forward_propagation_stages_poplar_options,
      pipeline_poplar_config.forward_stages,
      "forward_propagation_stages_poplar_options")

  if optimizer_function:
    validate_stage_options_and_populate_proto(
        backward_propagation_stages_poplar_options,
        pipeline_poplar_config.backward_stages,
        "backward_propagation_stages_poplar_options")

    if weight_update_poplar_options is None:
      weight_update_poplar_options = PipelineStageOptions()

    if not isinstance(weight_update_poplar_options, PipelineStageOptions):
      raise TypeError(
          "weight_update_poplar_options to be of type PipelineStageOptions, "
          "but got %s instead." % (str(weight_update_poplar_options)))

    pipeline_poplar_config.resource_update.CopyFrom(
        weight_update_poplar_options.get_proto())

  if outfeed_loss and not optimizer_function:
    raise ValueError(
        "An optimizer_function must be provided when outfeed_loss is True")

  control_outputs = []

  def _pipeline(*args):
    outputs = args
    for stage_id, stage in enumerate(computational_stages):
      stage_infeed_queue = infeed_queue if stage_id == 0 else None
      if stage_id == len(computational_stages) - 1 and not optimizer_function:
        stage_outfeed_queue = outfeed_queue
      else:
        stage_outfeed_queue = None

      stage_name = name + "_stage_" + str(stage_id)
      outputs = _pipeline_stage(stage,
                                stage_id,
                                device_mapping[stage_id],
                                outputs,
                                training=optimizer_function is not None,
                                infeed_queue=stage_infeed_queue,
                                outfeed_queue=stage_outfeed_queue,
                                name=stage_name)

    if optimizer_function:
      outputs = functional_ops._convert_to_list(outputs)  # pylint: disable=protected-access

      # Get the output from the optimizer function
      opt_fn = optimizer_function(*outputs)
      loss = opt_fn.loss
      opt = opt_fn.opt

      # Enqueue loss or any output tensors to the outfeed.
      if outfeed_loss:
        if not outfeed_queue:
          raise ValueError(
              "An outfeed_queue must be provided when outfeed_loss is True")
        control_outputs.append(outfeed_queue.enqueue(opt_fn.loss))
      elif outputs:
        if not outfeed_queue:
          raise ValueError(
              "The last computational stage has tensor outputs: %s, but no"
              " outfeed_queue has been provided." %
              (', '.join(str(t) for t in outputs)))
        control_outputs.append(outfeed_queue.enqueue(outputs))

      # Call the compute gradients function - this will be automatically put
      # into pipeline stages.
      grads_and_vars = opt.compute_gradients(loss)
      # Insert gradient accumulation ops.
      accumulated_grads_and_vars = []
      for grad, var in grads_and_vars:
        if grad is not None:
          with ops.colocate_with(grad):
            # Create an accumulator - variable is used as reference for shape/layout.
            accumulator = gen_poputil_ops.gradient_accumulator_create(var)
            # Add the gradients to the accumulator.
            accumulator = gen_poputil_ops.gradient_accumulator_add(
                accumulator, grad)
            # Sink the accumulators.
            grad = gen_poputil_ops.gradient_accumulator_sink(
                accumulator, num_mini_batches=gradient_accumulation_count)
        # Use the accumulated gradients.
        accumulated_grads_and_vars.append((grad, var))

      # Create an explicit function call for the apply gradients - note that we
      # allow external caputres here.
      apply_grad_ops = []

      def resource_update_():
        apply_grads = opt.apply_gradients(accumulated_grads_and_vars)
        apply_grad_ops.append(apply_grads)

      with ops.name_scope(name + "/WU") as scope:
        func_graph, captured_args = functional_ops._compile_function(  # pylint: disable=protected-access
            resource_update_, [], scope, apply_grad_ops, True)

      # Create the pipeline resource update stage and lower the function into XLA.
      with ops.control_dependencies(list(func_graph.control_captures)):
        outputs = gen_functional_ops.resource_update(
            captured_args,
            to_apply=util.create_new_tf_function(func_graph),
            Tout=func_graph.output_types,
            output_shapes=func_graph.output_shapes,
            offload_weight_update_variables=offload_weight_update_variables,
            replicated_optimizer_state_sharding=
            replicated_optimizer_state_sharding,
            num_batches_to_accumulate=gradient_accumulation_count)

    if not isinstance(outputs, ops.Operation):
      if not outfeed_queue:
        raise ValueError(
            "The last computational stage has tensor outputs: %s, but no"
            " outfeed_queue has been provided." % (', '.join(
                str(t) for t in functional_ops._convert_to_list(outputs))))  # pylint: disable=protected-access

      else:
        raise ValueError(
            "Expected the pipeline resource update stage to output a "
            "tf.Operation, got %s instead." % (str(output)))

    control_outputs.append(outputs)

  with ops.name_scope(name) as scope:
    # pylint: disable=protected-access
    try:
      func_graph, captured_args = functional_ops._compile_function(
          _pipeline, inputs, scope, control_outputs)
    except functional_ops._InvalidCaptureException as e:
      raise ValueError(
          "Trying to capture the tensor %s which is not a resource. This tensor"
          " needs to be passed as either part of the `input` or `infeed_queue`"
          " of the pipeline." % (str(e)))
    # pylint: enable=protected-access

    # Create the pipeline and lower the function into XLA.
    with ops.control_dependencies(list(func_graph.control_captures)):
      output = gen_functional_ops.pipeline(
          captured_args,
          to_apply=util.create_new_tf_function(func_graph),
          Tout=func_graph.output_types,
          output_shapes=func_graph.output_shapes,
          gradient_accumulation_count=gradient_accumulation_count,
          batch_serialization_iterations=batch_serialization_iterations,
          repeat_count=repeat_count,
          schedule=int(pipeline_schedule),
          pipeline_poplar_config=json_format.MessageToJson(
              pipeline_poplar_config),
          offload_activations=offload_activations,
          offload_gradient_accumulation_buffers=
          offload_gradient_accumulation_buffers,
          replicated_weight_sharding=replicated_weight_sharding,
          offload_weights=offload_weights)
    if not isinstance(output, ops.Operation):
      raise ValueError(
          "Expected the pipeline to output a tf.Operation, got %s instead." %
          (str(output)))

    return output
def gradient_trees_prediction_eager_fallback(tree_ensemble_handle,
                                             seed,
                                             dense_float_features,
                                             sparse_float_feature_indices,
                                             sparse_float_feature_values,
                                             sparse_float_feature_shapes,
                                             sparse_int_feature_indices,
                                             sparse_int_feature_values,
                                             sparse_int_feature_shapes,
                                             learner_config,
                                             apply_dropout,
                                             apply_averaging,
                                             center_bias,
                                             reduce_dim,
                                             use_locking=False,
                                             name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function gradient_trees_prediction
  """
    _ctx = _context.context()
    if not isinstance(dense_float_features, (list, tuple)):
        raise TypeError("Expected list for 'dense_float_features' argument to "
                        "'gradient_trees_prediction' Op, not %r." %
                        dense_float_features)
    _attr_num_dense_float_features = len(dense_float_features)
    if not isinstance(sparse_float_feature_indices, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_float_feature_indices' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_float_feature_indices)
    _attr_num_sparse_float_features = len(sparse_float_feature_indices)
    if not isinstance(sparse_float_feature_values, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_float_feature_values' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_float_feature_values)
    if len(sparse_float_feature_values) != _attr_num_sparse_float_features:
        raise ValueError(
            "List argument 'sparse_float_feature_values' to 'gradient_trees_prediction' Op with length %d "
            "must match length %d of argument 'sparse_float_feature_indices'."
            % (len(sparse_float_feature_values),
               _attr_num_sparse_float_features))
    if not isinstance(sparse_float_feature_shapes, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_float_feature_shapes' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_float_feature_shapes)
    if len(sparse_float_feature_shapes) != _attr_num_sparse_float_features:
        raise ValueError(
            "List argument 'sparse_float_feature_shapes' to 'gradient_trees_prediction' Op with length %d "
            "must match length %d of argument 'sparse_float_feature_indices'."
            % (len(sparse_float_feature_shapes),
               _attr_num_sparse_float_features))
    if not isinstance(sparse_int_feature_indices, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_int_feature_indices' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_int_feature_indices)
    _attr_num_sparse_int_features = len(sparse_int_feature_indices)
    if not isinstance(sparse_int_feature_values, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_int_feature_values' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_int_feature_values)
    if len(sparse_int_feature_values) != _attr_num_sparse_int_features:
        raise ValueError(
            "List argument 'sparse_int_feature_values' to 'gradient_trees_prediction' Op with length %d "
            "must match length %d of argument 'sparse_int_feature_indices'." %
            (len(sparse_int_feature_values), _attr_num_sparse_int_features))
    if not isinstance(sparse_int_feature_shapes, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_int_feature_shapes' argument to "
            "'gradient_trees_prediction' Op, not %r." %
            sparse_int_feature_shapes)
    if len(sparse_int_feature_shapes) != _attr_num_sparse_int_features:
        raise ValueError(
            "List argument 'sparse_int_feature_shapes' to 'gradient_trees_prediction' Op with length %d "
            "must match length %d of argument 'sparse_int_feature_indices'." %
            (len(sparse_int_feature_shapes), _attr_num_sparse_int_features))
    learner_config = _execute.make_str(learner_config, "learner_config")
    apply_dropout = _execute.make_bool(apply_dropout, "apply_dropout")
    apply_averaging = _execute.make_bool(apply_averaging, "apply_averaging")
    center_bias = _execute.make_bool(center_bias, "center_bias")
    reduce_dim = _execute.make_bool(reduce_dim, "reduce_dim")
    if use_locking is None:
        use_locking = False
    use_locking = _execute.make_bool(use_locking, "use_locking")
    tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle,
                                                  _dtypes.resource)
    seed = _ops.convert_to_tensor(seed, _dtypes.int64)
    dense_float_features = _ops.convert_n_to_tensor(dense_float_features,
                                                    _dtypes.float32)
    sparse_float_feature_indices = _ops.convert_n_to_tensor(
        sparse_float_feature_indices, _dtypes.int64)
    sparse_float_feature_values = _ops.convert_n_to_tensor(
        sparse_float_feature_values, _dtypes.float32)
    sparse_float_feature_shapes = _ops.convert_n_to_tensor(
        sparse_float_feature_shapes, _dtypes.int64)
    sparse_int_feature_indices = _ops.convert_n_to_tensor(
        sparse_int_feature_indices, _dtypes.int64)
    sparse_int_feature_values = _ops.convert_n_to_tensor(
        sparse_int_feature_values, _dtypes.int64)
    sparse_int_feature_shapes = _ops.convert_n_to_tensor(
        sparse_int_feature_shapes, _dtypes.int64)
    _inputs_flat = [
        tree_ensemble_handle, seed
    ] + list(dense_float_features) + list(sparse_float_feature_indices) + list(
        sparse_float_feature_values
    ) + list(sparse_float_feature_shapes) + list(
        sparse_int_feature_indices) + list(sparse_int_feature_values) + list(
            sparse_int_feature_shapes)
    _attrs = ("learner_config", learner_config, "num_dense_float_features",
              _attr_num_dense_float_features, "num_sparse_float_features",
              _attr_num_sparse_float_features, "num_sparse_int_features",
              _attr_num_sparse_int_features, "use_locking", use_locking,
              "apply_dropout", apply_dropout, "apply_averaging",
              apply_averaging, "center_bias", center_bias, "reduce_dim",
              reduce_dim)
    _result = _execute.execute(b"GradientTreesPrediction",
                               2,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("GradientTreesPrediction", _inputs_flat, _attrs,
                             _result, name)
    _result = _GradientTreesPredictionOutput._make(_result)
    return _result
def gradient_trees_partition_examples_eager_fallback(
        tree_ensemble_handle,
        dense_float_features,
        sparse_float_feature_indices,
        sparse_float_feature_values,
        sparse_float_feature_shapes,
        sparse_int_feature_indices,
        sparse_int_feature_values,
        sparse_int_feature_shapes,
        use_locking=False,
        name=None):
    r"""This is the slowpath function for Eager mode.
  This is for function gradient_trees_partition_examples
  """
    _ctx = _context.context()
    if not isinstance(dense_float_features, (list, tuple)):
        raise TypeError("Expected list for 'dense_float_features' argument to "
                        "'gradient_trees_partition_examples' Op, not %r." %
                        dense_float_features)
    _attr_num_dense_float_features = len(dense_float_features)
    if not isinstance(sparse_float_feature_indices, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_float_feature_indices' argument to "
            "'gradient_trees_partition_examples' Op, not %r." %
            sparse_float_feature_indices)
    _attr_num_sparse_float_features = len(sparse_float_feature_indices)
    if not isinstance(sparse_float_feature_values, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_float_feature_values' argument to "
            "'gradient_trees_partition_examples' Op, not %r." %
            sparse_float_feature_values)
    if len(sparse_float_feature_values) != _attr_num_sparse_float_features:
        raise ValueError(
            "List argument 'sparse_float_feature_values' to 'gradient_trees_partition_examples' Op with length %d "
            "must match length %d of argument 'sparse_float_feature_indices'."
            % (len(sparse_float_feature_values),
               _attr_num_sparse_float_features))
    if not isinstance(sparse_float_feature_shapes, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_float_feature_shapes' argument to "
            "'gradient_trees_partition_examples' Op, not %r." %
            sparse_float_feature_shapes)
    if len(sparse_float_feature_shapes) != _attr_num_sparse_float_features:
        raise ValueError(
            "List argument 'sparse_float_feature_shapes' to 'gradient_trees_partition_examples' Op with length %d "
            "must match length %d of argument 'sparse_float_feature_indices'."
            % (len(sparse_float_feature_shapes),
               _attr_num_sparse_float_features))
    if not isinstance(sparse_int_feature_indices, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_int_feature_indices' argument to "
            "'gradient_trees_partition_examples' Op, not %r." %
            sparse_int_feature_indices)
    _attr_num_sparse_int_features = len(sparse_int_feature_indices)
    if not isinstance(sparse_int_feature_values, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_int_feature_values' argument to "
            "'gradient_trees_partition_examples' Op, not %r." %
            sparse_int_feature_values)
    if len(sparse_int_feature_values) != _attr_num_sparse_int_features:
        raise ValueError(
            "List argument 'sparse_int_feature_values' to 'gradient_trees_partition_examples' Op with length %d "
            "must match length %d of argument 'sparse_int_feature_indices'." %
            (len(sparse_int_feature_values), _attr_num_sparse_int_features))
    if not isinstance(sparse_int_feature_shapes, (list, tuple)):
        raise TypeError(
            "Expected list for 'sparse_int_feature_shapes' argument to "
            "'gradient_trees_partition_examples' Op, not %r." %
            sparse_int_feature_shapes)
    if len(sparse_int_feature_shapes) != _attr_num_sparse_int_features:
        raise ValueError(
            "List argument 'sparse_int_feature_shapes' to 'gradient_trees_partition_examples' Op with length %d "
            "must match length %d of argument 'sparse_int_feature_indices'." %
            (len(sparse_int_feature_shapes), _attr_num_sparse_int_features))
    if use_locking is None:
        use_locking = False
    use_locking = _execute.make_bool(use_locking, "use_locking")
    tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle,
                                                  _dtypes.resource)
    dense_float_features = _ops.convert_n_to_tensor(dense_float_features,
                                                    _dtypes.float32)
    sparse_float_feature_indices = _ops.convert_n_to_tensor(
        sparse_float_feature_indices, _dtypes.int64)
    sparse_float_feature_values = _ops.convert_n_to_tensor(
        sparse_float_feature_values, _dtypes.float32)
    sparse_float_feature_shapes = _ops.convert_n_to_tensor(
        sparse_float_feature_shapes, _dtypes.int64)
    sparse_int_feature_indices = _ops.convert_n_to_tensor(
        sparse_int_feature_indices, _dtypes.int64)
    sparse_int_feature_values = _ops.convert_n_to_tensor(
        sparse_int_feature_values, _dtypes.int64)
    sparse_int_feature_shapes = _ops.convert_n_to_tensor(
        sparse_int_feature_shapes, _dtypes.int64)
    _inputs_flat = [tree_ensemble_handle] + list(dense_float_features) + list(
        sparse_float_feature_indices
    ) + list(sparse_float_feature_values) + list(
        sparse_float_feature_shapes) + list(sparse_int_feature_indices) + list(
            sparse_int_feature_values) + list(sparse_int_feature_shapes)
    _attrs = ("num_dense_float_features", _attr_num_dense_float_features,
              "num_sparse_float_features", _attr_num_sparse_float_features,
              "num_sparse_int_features", _attr_num_sparse_int_features,
              "use_locking", use_locking)
    _result = _execute.execute(b"GradientTreesPartitionExamples",
                               1,
                               inputs=_inputs_flat,
                               attrs=_attrs,
                               ctx=_ctx,
                               name=name)
    _execute.record_gradient("GradientTreesPartitionExamples", _inputs_flat,
                             _attrs, _result, name)
    _result, = _result
    return _result
コード例 #45
0
  def __init__(self,
               filenames,
               record_defaults,
               compression_type=None,
               buffer_size=None,
               header=False,
               field_delim=",",
               use_quote_delim=True,
               na_value="",
               select_cols=None):
    """Creates a `CsvDataset` by reading and decoding CSV files.

    The elements of this dataset correspond to records from the file(s).
    RFC 4180 format is expected for CSV files
    (https://tools.ietf.org/html/rfc4180)
    Note that we allow leading and trailing spaces with int or float field.


    For example, suppose we have a file 'my_file0.csv' with four CSV columns of
    different data types:
    ```
    abcdefg,4.28E10,5.55E6,12
    hijklmn,-5.3E14,,2
    ```

    We can construct a CsvDataset from it as follows:

    ```python
    tf.compat.v1.enable_eager_execution()

     dataset = tf.data.experimental.CsvDataset(
        "my_file*.csv",
        [tf.float32,  # Required field, use dtype or empty tensor
         tf.constant([0.0], dtype=tf.float32),  # Optional field, default to 0.0
         tf.int32,  # Required field, use dtype or empty tensor
         ],
        select_cols=[1,2,3]  # Only parse last three columns
    )
    ```

    The expected output of its iterations is:

    ```python
    for element in dataset:
      print(element)

    >> (4.28e10, 5.55e6, 12)
    >> (-5.3e14, 0.0, 2)
    ```

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      record_defaults: A list of default values for the CSV fields. Each item in
        the list is either a valid CSV `DType` (float32, float64, int32, int64,
        string), or a `Tensor` object with one of the above types. One per
        column of CSV data, with either a scalar `Tensor` default value for the
        column if it is optional, or `DType` or empty `Tensor` if required. If
        both this and `select_columns` are specified, these must have the same
        lengths, and `column_defaults` is assumed to be sorted in order of
        increasing column index.
      compression_type: (Optional.) A `tf.string` scalar evaluating to one of
        `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no
        compression.
      buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
        to buffer while reading files. Defaults to 4MB.
      header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
        have header line(s) that should be skipped when parsing. Defaults to
        `False`.
      field_delim: (Optional.) A `tf.string` scalar containing the delimiter
        character that separates fields in a record. Defaults to `","`.
      use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats
        double quotation marks as regular characters inside of string fields
        (ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`.
      na_value: (Optional.) A `tf.string` scalar indicating a value that will
        be treated as NA/NaN.
      select_cols: (Optional.) A sorted list of column indices to select from
        the input data. If specified, only this subset of columns will be
        parsed. Defaults to parsing all columns.
    """
    self._filenames = ops.convert_to_tensor(
        filenames, dtype=dtypes.string, name="filenames")
    self._compression_type = convert.optional_param_to_tensor(
        "compression_type",
        compression_type,
        argument_default="",
        argument_dtype=dtypes.string)
    record_defaults = [
        constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
        for x in record_defaults
    ]
    self._record_defaults = ops.convert_n_to_tensor(
        record_defaults, name="record_defaults")
    self._buffer_size = convert.optional_param_to_tensor(
        "buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
    self._header = ops.convert_to_tensor(
        header, dtype=dtypes.bool, name="header")
    self._field_delim = ops.convert_to_tensor(
        field_delim, dtype=dtypes.string, name="field_delim")
    self._use_quote_delim = ops.convert_to_tensor(
        use_quote_delim, dtype=dtypes.bool, name="use_quote_delim")
    self._na_value = ops.convert_to_tensor(
        na_value, dtype=dtypes.string, name="na_value")
    self._select_cols = convert.optional_param_to_tensor(
        "select_cols",
        select_cols,
        argument_default=[],
        argument_dtype=dtypes.int64,
    )
    self._structure = structure.NestedStructure(
        tuple(structure.TensorStructure(d.dtype, [])
              for d in self._record_defaults))
    variant_tensor = gen_experimental_dataset_ops.experimental_csv_dataset(
        filenames=self._filenames,
        record_defaults=self._record_defaults,
        buffer_size=self._buffer_size,
        header=self._header,
        output_shapes=self._structure._flat_shapes,  # pylint: disable=protected-access
        field_delim=self._field_delim,
        use_quote_delim=self._use_quote_delim,
        na_value=self._na_value,
        select_cols=self._select_cols,
        compression_type=self._compression_type)
    super(CsvDatasetV2, self).__init__(variant_tensor)
コード例 #46
0
def piecewise_constant(x, boundaries, values, name=None):
    """Piecewise constant from boundaries and interval values.

  Example: use a learning rate that's 1.0 for the first 100000 steps, 0.5
    for steps 100001 to 110000, and 0.1 for any additional steps.

  ```python
  global_step = tf.Variable(0, trainable=False)
  boundaries = [100000, 110000]
  values = [1.0, 0.5, 0.1]
  learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)

  # Later, whenever we perform an optimization step, we increment global_step.
  ```

  Args:
    x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
      `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
    boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
      increasing entries, and with all elements having the same type as `x`.
    values: A list of `Tensor`s or float`s or `int`s that specifies the values
      for the intervals defined by `boundaries`. It should have one more element
      than `boundaries`, and all elements should have the same type.
    name: A string. Optional name of the operation. Defaults to
      'PiecewiseConstant'.

  Returns:
    A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
    `values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
    and values[-1] when `x > boundaries[-1]`.

  Raises:
    ValueError: if types of `x` and `boundaries` do not match, or types of all
        `values` do not match or
        the number of elements in the lists does not match.
  """
    if len(boundaries) != len(values) - 1:
        raise ValueError(
            "The length of boundaries should be 1 less than the length of values"
        )
    with ops.name_scope(name, "PiecewiseConstant",
                        [x, boundaries, values, name]) as name:
        x = ops.convert_to_tensor(x)
        # Avoid explicit conversion to x's dtype. This could result in faulty
        # comparisons, for example if floats are converted to integers.
        boundaries = ops.convert_n_to_tensor(boundaries)
        for i, b in enumerate(boundaries):
            if b.dtype.base_dtype != x.dtype.base_dtype:
                # We can promote int32 boundaries to int64 without loss of precision.
                # This covers the most common case where the user passes in boundaries
                # as an array of Python integers.
                if (b.dtype.base_dtype == dtypes.int32
                        and x.dtype.base_dtype == dtypes.int64):
                    b = math_ops.cast(b, x.dtype.base_dtype)
                    boundaries[i] = b
                else:
                    raise ValueError(
                        "Boundaries (%s) must have the same dtype as x (%s)." %
                        (b.dtype.base_dtype, x.dtype.base_dtype))
        # TODO(rdipietro): Ensure that boundaries' elements are strictly increasing.
        values = ops.convert_n_to_tensor(values)
        for v in values[1:]:
            if v.dtype.base_dtype != values[0].dtype.base_dtype:
                raise ValueError(
                    "Values must have elements all with the same dtype (%s vs %s)."
                    % (values[0].dtype.base_dtype, v.dtype.base_dtype))
        pred_fn_pairs = {}
        pred_fn_pairs[x <= boundaries[0]] = lambda: values[0]
        pred_fn_pairs[x > boundaries[-1]] = lambda: values[-1]
        for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
            # Need to bind v here; can do this with lambda v=v: ...
            pred = (x > low) & (x <= high)
            pred_fn_pairs[pred] = lambda v=v: v

        # The default isn't needed here because our conditions are mutually
        # exclusive and exhaustive, but tf.case requires it.
        default = lambda: values[0]
        return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
コード例 #47
0
def piecewise_constant(x, boundaries, values, name=None):
  """Piecewise constant from boundaries and interval values.

  Example: use a learning rate that's 1.0 for the first 100000 steps, 0.5
    for steps 100001 to 110000, and 0.1 for any additional steps.

  ```python
  global_step = tf.Variable(0, trainable=False)
  boundaries = [100000, 110000]
  values = [1.0, 0.5, 0.1]
  learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)

  # Later, whenever we perform an optimization step, we increment global_step.
  ```

  Args:
    x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
      `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
    boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
      increasing entries, and with all elements having the same type as `x`.
    values: A list of `Tensor`s or float`s or `int`s that specifies the values
      for the intervals defined by `boundaries`. It should have one more element
      than `boundaries`, and all elements should have the same type.
    name: A string. Optional name of the operation. Defaults to
      'PiecewiseConstant'.

  Returns:
    A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
    `values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
    and values[-1] when `x > boundaries[-1]`.

  Raises:
    ValueError: if types of `x` and `boundaries` do not match, or types of all
        `values` do not match or
        the number of elements in the lists does not match.
  """
  if len(boundaries) != len(values) - 1:
    raise ValueError(
        "The length of boundaries should be 1 less than the length of values")
  with ops.name_scope(name, "PiecewiseConstant",
                      [x, boundaries, values, name]) as name:
    x = ops.convert_to_tensor(x)
    # Avoid explicit conversion to x's dtype. This could result in faulty
    # comparisons, for example if floats are converted to integers.
    boundaries = ops.convert_n_to_tensor(boundaries)
    for i, b in enumerate(boundaries):
      if b.dtype.base_dtype != x.dtype.base_dtype:
        # We can promote int32 boundaries to int64 without loss of precision.
        # This covers the most common case where the user passes in boundaries
        # as an array of Python integers.
        if (b.dtype.base_dtype == dtypes.int32 and
            x.dtype.base_dtype == dtypes.int64):
          b = math_ops.cast(b, x.dtype.base_dtype)
          boundaries[i] = b
        else:
          raise ValueError(
              "Boundaries (%s) must have the same dtype as x (%s)." % (
                  b.dtype.base_dtype, x.dtype.base_dtype))
    # TODO(rdipietro): Ensure that boundaries' elements are strictly increasing.
    values = ops.convert_n_to_tensor(values)
    for v in values[1:]:
      if v.dtype.base_dtype != values[0].dtype.base_dtype:
        raise ValueError(
            "Values must have elements all with the same dtype (%s vs %s)." % (
                values[0].dtype.base_dtype, v.dtype.base_dtype))
    pred_fn_pairs = []
    pred_fn_pairs.append((x <= boundaries[0], lambda: values[0]))
    pred_fn_pairs.append((x > boundaries[-1], lambda: values[-1]))
    for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
      # Need to bind v here; can do this with lambda v=v: ...
      pred = (x > low) & (x <= high)
      pred_fn_pairs.append((pred, lambda v=v: v))

    # The default isn't needed here because our conditions are mutually
    # exclusive and exhaustive, but tf.case requires it.
    default = lambda: values[0]
    return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)