コード例 #1
0
ファイル: compat_test.py プロジェクト: AnishShah/tensorflow
  def test_decorator(self):
    compatibility_date = self._compatibility_date()
    one_day_after = self._n_days_after(1)
    with compat.forward_compatibility_horizon(*one_day_after):
      self.assertTrue(compat.forward_compatible(*compatibility_date))
      self.assertFalse(compat.forward_compatible(*one_day_after))

    # After exiting context manager, value should be reset.
    self.assertFalse(compat.forward_compatible(*compatibility_date))
コード例 #2
0
ファイル: loss_ops.py プロジェクト: ThunderQi/tensorflow
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  if compat.forward_compatible(2018, 11, 1):
    return math_ops.div_no_nan(numerator, denominator, name=name)
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator,
                   array_ops.where(
                       math_ops.equal(denominator, 0),
                       array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
コード例 #3
0
ファイル: xent_op_test.py プロジェクト: aritratony/tensorflow
  def testSecondGradient(self):
    with self.cached_session() as sess:
      l = constant_op.constant(
          [
              0.0, 0.0, 1.0 / 3, 0.0, 1.0 / 3, 0.0, 0.0, 0.0, 0.0, 0.5 / 3, 0.0,
              0.5 / 3
          ],
          shape=[12],
          dtype=dtypes.float64,
          name="l")
      f = constant_op.constant(
          [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
          shape=[12],
          dtype=dtypes.float64,
          name="f")
      x = nn_ops.softmax_cross_entropy_with_logits(
          labels=l, logits=f, name="xent")
      loss = math_ops.reduce_sum(x)

      gradients = gradients_impl.gradients(loss, [f])[0]

      err = gradient_checker.compute_gradient_error(f, [12], gradients, [12])

      # Check that second derivative is calculated.
      # (it is equivalent to being `BatchMatMul` op in the graph because of implementation of xentropy grad)
      op_names = [
          op.op_def.name for op in sess.graph.get_operations() if op.op_def
      ]
      if compat.forward_compatible(2019, 4, 25):
        self.assertIn("BatchMatMulV2", op_names)
      else:
        self.assertIn("BatchMatMul", op_names)

    print("cross entropy hessian err = ", err)
    self.assertLess(err, 5e-8)
コード例 #4
0
ファイル: lookup_ops.py プロジェクト: aeverall/tensorflow
  def initialize(self, table):
    """Initializes the given `table` with `keys` and `values` tensors.

    Args:
      table: The table to initialize.

    Returns:
      The operation that initializes the table.

    Raises:
      TypeError: when the keys and values data types do not match the table
      key and value data types.
    """
    _check_table_dtypes(table, self._keys.dtype, self._values.dtype)
    with ops.name_scope(
        self._name, values=(table.resource_handle, self._keys,
                            self._values)) as scope:
      if context.executing_eagerly():
        # Ensure a unique name when eager execution is enabled to avoid spurious
        # sharing issues.
        scope += str(ops.uid())
      if fwd_compat.forward_compatible(2018, 9, 19):
        init_op = gen_lookup_ops.lookup_table_import_v2(
            table.resource_handle, self._keys, self._values, name=scope)
      else:
        # To maintain forward compatibiltiy, use the old implementation.
        init_op = gen_lookup_ops.initialize_table_v2(
            table.resource_handle, self._keys, self._values, name=scope)
    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
    return init_op
コード例 #5
0
ファイル: readers.py プロジェクト: aeverall/tensorflow
 def _as_variant_tensor(self):
   if (self._compression_type is not None or
       compat.forward_compatible(2018, 11, 30)):
     return gen_dataset_ops.fixed_length_record_dataset_v2(
         self._filenames, self._header_bytes, self._record_bytes,
         self._footer_bytes, self._buffer_size, self._compression_type)
   else:
     return gen_dataset_ops.fixed_length_record_dataset(
         self._filenames, self._header_bytes, self._record_bytes,
         self._footer_bytes, self._buffer_size)
コード例 #6
0
  def testGradientAtSingularity(self):
    if not compat.forward_compatible(2019, 6, 14):
      self.skipTest("Skipping test for future functionality.")

    ops_and_singularity = [
        (gen_math_ops.reciprocal, (0.,)),
        (gen_math_ops.rsqrt, (0.,)),
        (gen_math_ops.sqrt, (0.,)),
        (gen_math_ops.sqrt_grad, (
            0.,
            0.,
        )),
        (gen_math_ops.reciprocal_grad, (
            1.,
            0.,
        )),
        (gen_math_ops.tan, (np.pi / 2,)),
        (gen_math_ops.log, (0.,)),
        (gen_math_ops.log1p, (-1.,)),
        (gen_math_ops.acosh, (0.,)),
        (gen_math_ops.asin, (1.,)),
        (gen_math_ops.acos, (1.,)),
        (gen_math_ops.atan2, (0., 0.)),
        (gen_math_ops.div, (1., 0.)),
        (gen_math_ops.div_no_nan, (1., 0.)),
        (gen_math_ops.real_div, (1., 0.)),
        (math_ops.pow, (0., -1.)),
    ]
    for op, singularity in ops_and_singularity:
      for dtype in (dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
                    dtypes_lib.complex64, dtypes_lib.complex128):
        if dtype.is_complex and op in [
            gen_math_ops.asin, gen_math_ops.acos, gen_math_ops.atan2
        ]:
          continue
        if dtype == dtypes_lib.half and op in [
            gen_math_ops.acosh, gen_math_ops.asin, gen_math_ops.acos,
            gen_math_ops.atan2
        ]:
          continue
        with self.cached_session():
          print("op = ", op, ", singularity = ", singularity, ", type = ",
                dtype)
          args = [constant_op.constant(s, dtype=dtype) for s in singularity]
          grad_y = constant_op.constant(0, dtype=dtype)
          y = op(*args)
          g = gradients_impl.gradients(y, args, grad_ys=grad_y)
          g_val = self.evaluate(g)
          self.assertAllEqual(g_val, np.zeros(len(singularity)))
コード例 #7
0
ファイル: compat_test.py プロジェクト: AnishShah/tensorflow
  def test_decorator_with_failure(self):
    compatibility_date = self._compatibility_date()
    one_day_after = self._n_days_after(1)

    class DummyError(Exception):
      pass

    try:
      with compat.forward_compatibility_horizon(*one_day_after):
        raise DummyError()
    except DummyError:
      pass  # silence DummyError

    # After exiting context manager, value should be reset.
    self.assertFalse(compat.forward_compatible(*compatibility_date))
コード例 #8
0
ファイル: readers.py プロジェクト: kylin9872/tensorflow
  def __init__(self,
               filenames,
               record_bytes,
               header_bytes=None,
               footer_bytes=None,
               buffer_size=None,
               compression_type=None):
    """Creates a `FixedLengthRecordDataset`.

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      record_bytes: A `tf.int64` scalar representing the number of bytes in
        each record.
      header_bytes: (Optional.) A `tf.int64` scalar representing the number of
        bytes to skip at the start of a file.
      footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
        bytes to ignore at the end of a file.
      buffer_size: (Optional.) A `tf.int64` scalar representing the number of
        bytes to buffer when reading.
      compression_type: (Optional.) A `tf.string` scalar evaluating to one of
        `""` (no compression), `"ZLIB"`, or `"GZIP"`.
    """
    self._filenames = ops.convert_to_tensor(
        filenames, dtype=dtypes.string, name="filenames")
    self._record_bytes = ops.convert_to_tensor(
        record_bytes, dtype=dtypes.int64, name="record_bytes")

    self._header_bytes = convert.optional_param_to_tensor(
        "header_bytes", header_bytes)
    self._footer_bytes = convert.optional_param_to_tensor(
        "footer_bytes", footer_bytes)
    self._buffer_size = convert.optional_param_to_tensor(
        "buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
    self._compression_type = convert.optional_param_to_tensor(
        "compression_type",
        compression_type,
        argument_default="",
        argument_dtype=dtypes.string)
    if (self._compression_type is not None or
        compat.forward_compatible(2018, 11, 30)):
      variant_tensor = gen_dataset_ops.fixed_length_record_dataset_v2(
          self._filenames, self._header_bytes, self._record_bytes,
          self._footer_bytes, self._buffer_size, self._compression_type)
    else:
      variant_tensor = gen_dataset_ops.fixed_length_record_dataset(
          self._filenames, self._header_bytes, self._record_bytes,
          self._footer_bytes, self._buffer_size)
    super(FixedLengthRecordDatasetV2, self).__init__(variant_tensor)
コード例 #9
0
ファイル: math_test.py プロジェクト: aritratony/tensorflow
  def test_batch_matmul_broadcast(self):
    if not compat.forward_compatible(2019, 4, 25):
      self.skipTest("Skipping test for future functionality.")
    for broadcast_a in (True, False):
      for broadcast_b in (True, False):
        for stack_a in (True, False):
          for stack_b in (True, False):
            shape_a = (2, 3, 5) if broadcast_a else (4, 2, 3, 5)
            shape_b = (2, 5, 7) if broadcast_b else (4, 2, 5, 7)
            shape_a = (2,) + shape_a if stack_a else shape_a
            shape_b = (2,) + shape_b if stack_b else shape_b
            x = random_ops.random_uniform(shape_a)
            y = random_ops.random_uniform(shape_b)

            # pylint: disable=cell-var-from-loop
            def loop_fn(i):
              a = array_ops.gather(x, i) if stack_a else x
              b = array_ops.gather(y, i) if stack_b else y
              return math_ops.matmul(a, b)

            # pylint: enable=cell-var-from-loop
            self._test_loop_fn(loop_fn, 2)
コード例 #10
0
    def testMultipleWhileLoopsWithFunc(self):
        if compat.forward_compatible(2019, 8, 23):
            x = constant_op.constant(2.)

            @def_function.function
            def Fn():
                ret1 = while_loop_v2(lambda v: v < 4.,
                                     lambda v: v * v, [x],
                                     return_same_structure=False,
                                     name="while_1")  # x**2
                ret2 = while_loop_v2(lambda v: v < 16.,
                                     lambda v: v * v, [x],
                                     return_same_structure=False,
                                     name="while_2")  # x**4
                return ret1, ret2

            concrete_fn = Fn.get_concrete_function()
            while_1 = concrete_fn.graph.get_operation_by_name("while_1")
            while_2 = concrete_fn.graph.get_operation_by_name("while_2")
            self.assertEqual(while_1.type, "StatelessWhile")
            self.assertEqual(while_2.type, "StatelessWhile")
            self.assertEmpty(while_1.control_inputs)
            self.assertEmpty(while_2.control_inputs)
コード例 #11
0
def cardinality(dataset):
    """Returns the cardinality of `dataset`, if known.

  The operation returns the cardinality of `dataset`. The operation may return
  `tf.data.experimental.INFINITE_CARDINALITY` if `dataset` contains an infinite
  number of elements or `tf.data.experimental.UNKNOWN_CARDINALITY` if the
  analysis fails to determine the number of elements in `dataset` (e.g. when the
  dataset source is a file).

  Args:
    dataset: A `tf.data.Dataset` for which to determine cardinality.

  Returns:
    A scalar `tf.int64` `Tensor` representing the cardinality of `dataset`. If
    the cardinality is infinite or unknown, the operation returns the named
    constant `INFINITE_CARDINALITY` and `UNKNOWN_CARDINALITY` respectively.
  """

    if compat.forward_compatible(2019, 8, 3):
        return ged_ops.dataset_cardinality(dataset._variant_tensor)  # pylint: disable=protected-access
    else:
        return ged_ops.experimental_dataset_cardinality(
            dataset._variant_tensor)  # pylint: disable=protected-access
コード例 #12
0
  def _testSquareBatch(self, dtype):
    with self.cached_session(use_gpu=True):
      v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
      mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
                            [[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
                             [0.0, 0.0, 6.0]]]).astype(dtype)
      self.assertEqual(mat_batch.shape, (2, 3, 3))
      mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
      self.assertEqual((2, 3), mat_batch_diag.get_shape())
      self.assertAllEqual(mat_batch_diag.eval(), v_batch)

      if compat.forward_compatible(2019, 7, 4):
        # Diagonal bands with padding.
        mat, tests = square_cases()
        for padding in [0, 555, -11]:
          for diags, pair in tests.items():
            solution, _ = pair
            mat_batch_diag = array_ops.matrix_diag_part(
                mat.astype(dtype), k=diags, padding=padding)
            mask = solution == 0
            solution = (solution + padding * mask).astype(dtype)
            self.assertEqual(mat_batch_diag.get_shape(), solution.shape)
            self.assertAllEqual(mat_batch_diag.eval(), solution)
コード例 #13
0
  def testRectangularBatch(self):
    with self.session(use_gpu=True):
      v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]])
      mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
                            [[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]])

      mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
                                     [[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]])
      output = array_ops.matrix_set_diag(mat_batch, v_batch)
      self.assertEqual((2, 2, 3), output.get_shape())
      self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))

      if compat.forward_compatible(2019, 7, 4):
        # Diagonal bands.
        for _, tests in [tall_cases(), fat_cases()]:
          for diags, pair in tests.items():
            vecs, banded_mat = pair
            mask = banded_mat == 0
            input_mat = np.random.randint(10, size=mask.shape)
            solution = input_mat * mask + banded_mat
            output = array_ops.matrix_set_diag(input_mat, vecs, k=diags)
            self.assertEqual(output.get_shape(), solution.shape)
            self.assertAllEqual(output.eval(), solution)
コード例 #14
0
  def _testGrad(self, input_shape, diag_shape, diags):
    with self.session(use_gpu=True):
      x = constant_op.constant(
          np.random.rand(*input_shape), dtype=dtypes_lib.float32)
      x_diag = constant_op.constant(
          np.random.rand(*diag_shape), dtype=dtypes_lib.float32)

      # LINT.IfChange
      if compat.forward_compatible(2019, 11, 30):
      # LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
        y = array_ops.matrix_set_diag(x, x_diag, k=diags)
      else:
        y = array_ops.matrix_set_diag(x, x_diag)
      error_x = gradient_checker.compute_gradient_error(x,
                                                        x.get_shape().as_list(),
                                                        y,
                                                        y.get_shape().as_list())
      self.assertLess(error_x, 1e-4)
      error_x_diag = gradient_checker.compute_gradient_error(
          x_diag,
          x_diag.get_shape().as_list(), y,
          y.get_shape().as_list())
      self.assertLess(error_x_diag, 1e-4)
コード例 #15
0
    def testSecondGradient(self):
        with self.cached_session() as sess:
            l = constant_op.constant([
                0.0, 0.0, 1.0 / 3, 0.0, 1.0 / 3, 0.0, 0.0, 0.0, 0.0, 0.5 / 3,
                0.0, 0.5 / 3
            ],
                                     shape=[12],
                                     dtype=dtypes.float64,
                                     name="l")
            f = constant_op.constant(
                [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
                shape=[12],
                dtype=dtypes.float64,
                name="f")
            x = nn_ops.softmax_cross_entropy_with_logits(labels=l,
                                                         logits=f,
                                                         name="xent")
            loss = math_ops.reduce_sum(x)

            gradients = gradients_impl.gradients(loss, [f])[0]

            err = gradient_checker.compute_gradient_error(
                f, [12], gradients, [12])

            # Check that second derivative is calculated.
            # (it is equivalent to being `BatchMatMul` op in the graph because of implementation of xentropy grad)
            op_names = [
                op.op_def.name for op in sess.graph.get_operations()
                if op.op_def
            ]
            if compat.forward_compatible(2019, 4, 25):
                self.assertIn("BatchMatMulV2", op_names)
            else:
                self.assertIn("BatchMatMul", op_names)

        print("cross entropy hessian err = ", err)
        self.assertLess(err, 5e-8)
コード例 #16
0
ファイル: readers.py プロジェクト: xutianming/tensorflow
    def __init__(self,
                 filenames,
                 compression_type=None,
                 buffer_size=None,
                 name=None):
        """Creates a `TextLineDataset`.

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      compression_type: (Optional.) A `tf.string` scalar evaluating to one of
        `""` (no compression), `"ZLIB"`, or `"GZIP"`.
      buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
        to buffer. A value of 0 results in the default buffering values chosen
        based on the compression type.
      name: (Optional.) A name for the tf.data operation.
    """
        self._filenames = filenames
        self._compression_type = convert.optional_param_to_tensor(
            "compression_type",
            compression_type,
            argument_default="",
            argument_dtype=dtypes.string)
        self._buffer_size = convert.optional_param_to_tensor(
            "buffer_size",
            buffer_size,
            argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)
        self._metadata = dataset_metadata_pb2.Metadata()
        if name:
            self._metadata.name = dataset_ops._validate_and_encode(name)
        kwargs = {}
        if name or compat.forward_compatible(2021, 9, 30):
            kwargs["metadata"] = self._metadata.SerializeToString()

        variant_tensor = gen_dataset_ops.text_line_dataset(
            self._filenames, self._compression_type, self._buffer_size,
            **kwargs)
        super(_TextLineDataset, self).__init__(variant_tensor)
コード例 #17
0
 def testNewStatePhilox(self):
     """Tests that the new state is correct (for Philox).
 """
     if compat.forward_compatible(2020, 10, 25):
         self.skipTest(
             "The expected values in this test is inconsistent with "
             "CPU/GPU. testXLAEqualsCPU has the correct checks of the "
             "new states for the new version.")
     with ops.device(xla_device_name()):
         counter_low = 57
         counter_high = 283
         key = 0x1234
         size = 47
         state = [counter_low, counter_high, key]
         gen = random.Generator(state=state, alg=random.RNG_ALG_PHILOX)
         gen.uniform_full_int(shape=(size, ), dtype=dtypes.uint32)
         self.assertAllEqual(
             [counter_low + (size + 3) // 4, counter_high, key],
             gen.state.read_value())
         gen.reset(state)
         gen.uniform_full_int(shape=(size, ), dtype=dtypes.uint64)
         self.assertAllEqual(
             [counter_low + (size + 1) // 2, counter_high, key],
             gen.state.read_value())
         # Tests that large counter_low will correctly overflows to counter_high
         counter_low = -1  # same as 0xffffffffffffffff
         counter_high = 283
         size = 47
         state = [counter_low, counter_high, key]
         gen = random.Generator(state=state, alg=random.RNG_ALG_PHILOX)
         gen.uniform_full_int(shape=(size, ), dtype=dtypes.uint32)
         self.assertAllEqual([(size + 3) // 4 - 1, counter_high + 1, key],
                             gen.state.read_value())
         gen.reset(state)
         gen.uniform_full_int(shape=(size, ), dtype=dtypes.uint64)
         self.assertAllEqual([(size + 1) // 2 - 1, counter_high + 1, key],
                             gen.state.read_value())
コード例 #18
0
 def testXLAEqualsCPU(self, dtype):
   """Tests that XLA and CPU kernels generate the same integers."""
   seed = 1234
   shape = [315, 49]
   if compat.forward_compatible(2020, 10, 25):
     with ops.device("/device:CPU:0"):
       cpu_gen = random.Generator.from_seed(
           seed=seed, alg=random.RNG_ALG_PHILOX)
     with ops.device(xla_device_name()):
       xla_gen = random.Generator.from_seed(
           seed=seed, alg=random.RNG_ALG_PHILOX)
     # Repeat multiple times to make sure that the state after
     # number-generation are the same between CPU and XLA.
     for _ in range(5):
       with ops.device("/device:CPU:0"):
         # Test both number-generation and skip
         cpu = cpu_gen.uniform_full_int(shape=shape, dtype=dtype)
         cpu_gen.skip(100)
       with ops.device(xla_device_name()):
         xla = xla_gen.uniform_full_int(shape=shape, dtype=dtype)
         xla_gen.skip(100)
       self.assertAllEqual(cpu, xla)
       self.assertAllEqual(cpu_gen.state, xla_gen.state)
   else:
     # The old version doesn't guarantee that CPU and XLA are in the same state
     # after number-generation, which is a bug.
     with ops.device("/device:CPU:0"):
       cpu = (
           random.Generator.from_seed(
               seed=seed, alg=random.RNG_ALG_PHILOX).uniform_full_int(
                   shape=shape, dtype=dtype))
     with ops.device(xla_device_name()):
       xla = (
           random.Generator.from_seed(
               seed=seed, alg=random.RNG_ALG_PHILOX).uniform_full_int(
                   shape=shape, dtype=dtype))
     self.assertAllEqual(cpu, xla)
コード例 #19
0
    def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
        var_device, var_dtype = var.device, var.dtype.base_dtype
        coefficients = ((apply_state or {}).get((var_device, var_dtype))
                        or self._fallback_apply_state(var_device, var_dtype))

        acc = self.get_slot(var, 'accumulator')
        if compat.forward_compatible(2019, 8, 20):
            return training_ops.resource_sparse_apply_adagrad_v2(
                var.handle,
                acc.handle,
                coefficients['lr_t'],
                coefficients['epsilon'],
                grad,
                indices,
                use_locking=self._use_locking)
        with ops.control_dependencies([
                resource_variable_ops.resource_scatter_add(
                    acc.handle, indices, math_ops.square(grad))
        ]):
            acc_t_slice = acc.sparse_read(indices)
        var_update = resource_variable_ops.resource_scatter_add(
            var.handle, indices, coefficients['neg_lr_t'] * grad /
            (math_ops.sqrt(acc_t_slice) + coefficients['epsilon']))
        return var_update
コード例 #20
0
 def _irfft(input_tensor, fft_length=None, name=None):
     """Wrapper irfft* that infers fft_length argument."""
     with _ops.name_scope(name, default_name,
                          [input_tensor, fft_length]) as name:
         input_tensor = _ops.convert_to_tensor(
             input_tensor, preferred_dtype=_dtypes.complex64)
         input_tensor.shape.with_rank_at_least(fft_rank)
         complex_dtype = input_tensor.dtype
         real_dtype = complex_dtype.real_dtype
         if fft_length is None:
             fft_length = _infer_fft_length_for_irfft(
                 input_tensor, fft_rank)
         else:
             fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
         input_tensor = _maybe_pad_for_rfft(input_tensor,
                                            fft_rank,
                                            fft_length,
                                            is_reverse=True)
         if not compat.forward_compatible(2019, 10, 12):
             return ifft_fn(input_tensor, fft_length, name=name)
         return ifft_fn(input_tensor,
                        fft_length,
                        Treal=real_dtype,
                        name=name)
コード例 #21
0
  def testSquare(self):
    with self.session(use_gpu=True):
      v = np.array([1.0, 2.0, 3.0])
      mat = np.diag(v)
      mat_diag = array_ops.matrix_diag_part(mat)
      self.assertEqual((3,), mat_diag.get_shape())
      self.assertAllEqual(mat_diag.eval(), v)

      # LINT.IfChange
      if compat.forward_compatible(2019, 11, 30):
      # LINT.ThenChange(//tensorflow/python/ops/array_ops.py)
        for offset in [-2, 3]:
          mat = np.diag(v, offset)
          mat_diag = array_ops.matrix_diag_part(mat, k=offset)
          self.assertEqual((3,), mat_diag.get_shape())
          self.assertAllEqual(mat_diag.eval(), v)

        # Diagonal bands.
        mat, tests = square_cases()
        for diags, pair in tests.items():
          solution, _ = pair
          mat_diag = array_ops.matrix_diag_part(mat[0], k=diags)
          self.assertEqual(mat_diag.get_shape(), solution[0].shape)
          self.assertAllEqual(mat_diag.eval(), solution[0])
コード例 #22
0
ファイル: writers.py プロジェクト: tftuner/tf_fast_recovery
  def write(self, dataset):
    """Returns a `tf.Operation` to write a dataset to a file.

    Args:
      dataset: a `tf.data.Dataset` whose elements are to be written to a file

    Returns:
      A `tf.Operation` that, when run, writes contents of `dataset` to a file.
    """
    if not isinstance(dataset, dataset_ops.DatasetV2):
      raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
    if not dataset_ops.get_structure(dataset).is_compatible_with(
        structure.TensorStructure(dtypes.string, [])):
      raise TypeError(
          "`dataset` must produce scalar `DT_STRING` tensors whereas it "
          "produces shape {0} and types {1}".format(
              dataset_ops.get_legacy_output_shapes(dataset),
              dataset_ops.get_legacy_output_types(dataset)))
    if compat.forward_compatible(2019, 8, 3):
      return gen_experimental_dataset_ops.dataset_to_tf_record(
          dataset._variant_tensor, self._filename, self._compression_type)  # pylint: disable=protected-access
    else:
      return gen_experimental_dataset_ops.experimental_dataset_to_tf_record(
          dataset._variant_tensor, self._filename, self._compression_type)  # pylint: disable=protected-access
コード例 #23
0
ファイル: string_ops.py プロジェクト: qwerzou1/shibie
def regex_full_match(input, pattern, name=None):
  r"""Match elements of `input` with regex `pattern`.

  Args:
    input: string `Tensor`, the source strings to process.
    pattern: string or scalar string `Tensor`, regular expression to use,
      see more details at https://github.com/google/re2/wiki/Syntax
    name: Name of the op.

  Returns:
    bool `Tensor` of the same shape as `input` with match results.
  """
  # TODO(b/112455102): Remove compat.forward_compatible once past the horizon.
  if not compat.forward_compatible(2018, 11, 10):
    return gen_string_ops.regex_full_match(
        input=input, pattern=pattern, name=name)
  if isinstance(pattern, util_compat.bytes_or_text_types):
    # When `pattern` is static through the life of the op we can
    # use a version which performs the expensive regex compilation once at
    # creation time.
    return gen_string_ops.static_regex_full_match(
        input=input, pattern=pattern, name=name)
  return gen_string_ops.regex_full_match(
      input=input, pattern=pattern, name=name)
コード例 #24
0
def safe_div(numerator, denominator):
    """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
    if compat.forward_compatible(2018, 11, 1):
        return math_ops.div_no_nan(numerator, denominator)
    return array_ops.where(
        math_ops.greater(denominator, 0),
        math_ops.div(
            numerator,
            array_ops.where(math_ops.equal(denominator, 0),
                            array_ops.ones_like(denominator), denominator)),
        array_ops.zeros_like(numerator))
コード例 #25
0
def regex_replace(source, pattern, rewrite, replace_global=True):
    r"""Replace elements of `source` matching regex `pattern` with `rewrite`.

  Args:
    source: string `Tensor`, the source strings to process.
    pattern: string or scalar string `Tensor`, regular expression to use,
      see more details at https://github.com/google/re2/wiki/Syntax
    rewrite: string or scalar string `Tensor`, value to use in match
      replacement, supports backslash-escaped digits (\1 to \9) can be to insert
      text matching corresponding parenthesized group.
    replace_global: `bool`, if `True` replace all non-overlapping matches,
      else replace only the first match.

  Returns:
    string `Tensor` of the same shape as `source` with specified replacements.
  """
    # TODO(b/112455102): Remove compat.forward_compatible once past the horizon.
    if not compat.forward_compatible(2018, 10, 10):
        return gen_string_ops.regex_replace(input=source,
                                            pattern=pattern,
                                            rewrite=rewrite,
                                            replace_global=replace_global)
    if (isinstance(pattern, util_compat.bytes_or_text_types)
            and isinstance(rewrite, util_compat.bytes_or_text_types)):
        # When `pattern` and `rewrite` are static through the life of the op we can
        # use a version which performs the expensive regex compilation once at
        # creation time.
        return gen_string_ops.static_regex_replace(
            input=source,
            pattern=pattern,
            rewrite=rewrite,
            replace_global=replace_global)
    return gen_string_ops.regex_replace(input=source,
                                        pattern=pattern,
                                        rewrite=rewrite,
                                        replace_global=replace_global)
コード例 #26
0
  def testSquare(self):
    with self.session(use_gpu=True):
      v = np.array([1.0, 2.0, 3.0])
      mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])
      mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0],
                               [1.0, 1.0, 3.0]])
      output = array_ops.matrix_set_diag(mat, v)
      self.assertEqual((3, 3), output.get_shape())
      self.assertAllEqual(mat_set_diag, self.evaluate(output))

      # LINT.IfChange
      if compat.forward_compatible(2019, 11, 30):
      # LINT.ThenChange(//tensorflow/python/ops/array_ops.py)

        # Diagonal bands.
        _, tests = square_cases()
        for diags, pair in tests.items():
          vecs, banded_mat = pair
          mask = banded_mat[0] == 0
          input_mat = np.random.randint(10, size=mask.shape)
          solution = input_mat * mask + banded_mat[0]
          output = array_ops.matrix_set_diag(input_mat, vecs[0], k=diags)
          self.assertEqual(output.get_shape(), solution.shape)
          self.assertAllEqual(output.eval(), solution)
コード例 #27
0
def _parse_example_raw(serialized, names, params, name):
  """Parses `Example` protos.

  Args:
    serialized: A vector (1-D Tensor) of strings, a batch of binary
      serialized `Example` protos.
    names: A vector (1-D Tensor) of strings (optional), the names of
      the serialized protos.
    params: A `ParseOpParams` containing the parameters for the parse op.
    name: A name for this operation (optional).

  Returns:
    A `dict` mapping keys to `Tensor`s and `SparseTensor`s and `RaggedTensor`s.

  """
  if params.num_features == 0:
    raise ValueError("Must provide at least one feature key")
  with ops.name_scope(name, "ParseExample", [serialized, names]):
    names = [] if names is None else names
    if compat.forward_compatible(2019, 10, 16) or params.ragged_keys:
      serialized = ops.convert_to_tensor(serialized, name="serialized")
      if params.ragged_keys and serialized.shape.ndims is None:
        raise ValueError("serialized must have statically-known rank to "
                         "parse ragged features.")
      outputs = gen_parsing_ops.parse_example_v2(
          serialized=serialized,
          names=names,
          sparse_keys=params.sparse_keys,
          dense_keys=params.dense_keys,
          ragged_keys=params.ragged_keys,
          dense_defaults=params.dense_defaults_vec,
          num_sparse=len(params.sparse_keys),
          sparse_types=params.sparse_types,
          ragged_value_types=params.ragged_value_types,
          ragged_split_types=params.ragged_split_types,
          dense_shapes=params.dense_shapes_as_proto,
          name=name)
      (sparse_indices, sparse_values, sparse_shapes, dense_values,
       ragged_values, ragged_row_splits) = outputs
      # pylint: disable=protected-access
      ragged_tensors = parsing_config._build_ragged_tensors(
          serialized.shape, ragged_values, ragged_row_splits)
    else:
      outputs = gen_parsing_ops.parse_example(
          serialized=serialized,
          names=names,
          dense_defaults=params.dense_defaults_vec,
          sparse_keys=params.sparse_keys,
          sparse_types=params.sparse_types,
          dense_keys=params.dense_keys,
          dense_shapes=params.dense_shapes_as_proto,
          name=name)

      (sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs
      ragged_tensors = []

    sparse_tensors = [
        sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
        in zip(sparse_indices, sparse_values, sparse_shapes)]

    return dict(
        zip(params.sparse_keys + params.dense_keys + params.ragged_keys,
            sparse_tensors + dense_values + ragged_tensors))
コード例 #28
0
ファイル: logging_ops.py プロジェクト: aritratony/tensorflow
def print_v2(*inputs, **kwargs):
  """Print the specified inputs.

  Returns an operator that prints the specified inputs to a desired
  output stream or logging level. The inputs may be dense or sparse Tensors,
  primitive python objects, data structures that contain Tensors, and printable
  python objects. Printed tensors will recursively show the first and last
  `summarize` elements of each dimension.

  With eager execution enabled and/or inside a `tf.contrib.eager.defun` this
  operator will automatically execute, and users only need to call `tf.print`
  without using the return value. When constructing graphs outside of a
  `tf.contrib.eager.defun`, one must either include the returned op
  in the input to `session.run`, or use the operator as a control dependency for
  executed ops by specifying `with tf.control_dependencies([print_op])`.

  @compatibility(python2)
  In python 2.7, make sure to import the following:
  `from __future__ import print_function`
  @end_compatibility

  Example:
    Single-input usage:
    ```python
    tf.compat.v1.enable_eager_execution()
    tensor = tf.range(10)
    tf.print(tensor, output_stream=sys.stderr)
    ```
    (This prints "[0 1 2 ... 7 8 9]" to sys.stderr)

    Multi-input usage:
    ```python
    tf.compat.v1.enable_eager_execution()
    tensor = tf.range(10)
    tf.print("tensors:", tensor, {2: tensor * 2}, output_stream=sys.stdout)
    ```
    (This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to
    sys.stdout)

    Usage in a defun:
    ```python
    tf.compat.v1.enable_eager_execution()

    @tf.contrib.eager.defun
    def f():
        tensor = tf.range(10)
        tf.print(tensor, output_stream=sys.stderr)
        return tensor

    range_tensor = f()
    ```
    (This prints "[0 1 2 ... 7 8 9]" to sys.stderr)

    Usage when constructing graphs:
    ```python
    sess = tf.compat.v1.Session()
    with sess.as_default():
        tensor = tf.range(10)
        print_op = tf.print("tensors:", tensor, {2: tensor * 2},
                            output_stream=sys.stdout)
        with tf.control_dependencies([print_op]):
          tripled_tensor = tensor * 3
        sess.run(tripled_tensor)
    ```
    (This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to
    sys.stdout)

  Note: In Jupyter notebooks and colabs, this operator prints to the notebook
    cell outputs. It will not write to the notebook kernel's console logs.

  Args:
    *inputs: Positional arguments that are the inputs to print. Inputs in the
      printed output will be separated by spaces. Inputs may be python
      primitives, tensors, data structures such as dicts and lists that may
      contain tensors (with the data structures possibly nested in arbitrary
      ways), and printable python objects.
    output_stream: The output stream, logging level, or file to print to.
      Defaults to sys.stderr, but sys.stdout, tf.compat.v1.logging.info,
      tf.compat.v1.logging.warning, and tf.compat.v1.logging.error are also
      supported. To print to
      a file, pass a string started with "file://" followed by the file path,
      e.g., "file:///tmp/foo.out".
    summarize: The first and last `summarize` elements within each dimension are
      recursively printed per Tensor. If None, then the first 3 and last 3
      elements of each dimension are printed for each tensor. If set to -1, it
      will print all elements of every tensor.
    sep: The string to use to separate the inputs. Defaults to " ".
    end: End character that is appended at the end the printed string.
      Defaults to the newline character.
    name: A name for the operation (optional).

  Returns:
    A print operator that prints the specified inputs in the specified output
    stream or logging level.

  Raises:
    ValueError: If an unsupported output stream is specified.
  """
  # Because we are using arbitrary-length positional arguments, python 2
  # does not support explicitly specifying the keyword arguments in the
  # function definition. So, we manually get the keyword arguments w/ default
  # values here.
  output_stream = kwargs.pop("output_stream", sys.stderr)
  name = kwargs.pop("name", None)
  summarize = kwargs.pop("summarize", 3)
  sep = kwargs.pop("sep", " ")
  end = kwargs.pop("end", os.linesep)
  if kwargs:
    raise ValueError("Unrecognized keyword arguments for tf.print: %s" % kwargs)
  format_name = None
  if name:
    format_name = name + "_format"

  # Match the C++ string constants representing the different output streams.
  # Keep this updated!
  output_stream_to_constant = {
      sys.stdout: "stdout",
      sys.stderr: "stderr",
      tf_logging.INFO: "log(info)",
      tf_logging.info: "log(info)",
      tf_logging.WARN: "log(warning)",
      tf_logging.warning: "log(warning)",
      tf_logging.warn: "log(warning)",
      tf_logging.ERROR: "log(error)",
      tf_logging.error: "log(error)",
  }

  if _is_filepath(output_stream):
    output_stream_string = output_stream
  else:
    output_stream_string = output_stream_to_constant.get(output_stream)
    if not output_stream_string:
      raise ValueError("Unsupported output stream, logging level, or file." +
                       str(output_stream) +
                       ". Supported streams are sys.stdout, "
                       "sys.stderr, tf.logging.info, "
                       "tf.logging.warning, tf.logging.error. " +
                       "File needs to be in the form of 'file://<filepath>'.")

  # If we are only printing a single string scalar, there is no need to format
  if (len(inputs) == 1 and tensor_util.is_tensor(inputs[0]) and
      (not isinstance(inputs[0], sparse_tensor.SparseTensor)) and
      (inputs[0].shape.ndims == 0) and (inputs[0].dtype == dtypes.string)):
    formatted_string = inputs[0]
  # Otherwise, we construct an appropriate template for the tensors we are
  # printing, and format the template using those tensors.
  else:
    # For each input to this print function, we extract any nested tensors,
    # and construct an appropriate template to format representing the
    # printed input.
    templates = []
    tensors = []
    tensor_free_structure = nest.map_structure(
        lambda x: "" if tensor_util.is_tensor(x) else x, inputs)
    tensor_free_template = " ".join(
        pprint.pformat(x) for x in tensor_free_structure)
    placeholder = _generate_placeholder_string(tensor_free_template)

    for input_ in inputs:
      placeholders = []
      # Use the nest utilities to flatten & process any nested elements in this
      # input. The placeholder for a tensor in the template should be the
      # placeholder string, and the placeholder for a non-tensor can just be
      # the printed value of the non-tensor itself.
      for x in nest.flatten(input_):
        # support sparse tensors
        if isinstance(x, sparse_tensor.SparseTensor):
          tensors.extend([x.indices, x.values, x.dense_shape])
          placeholders.append(
              "SparseTensor(indices={}, values={}, shape={})".format(
                  placeholder, placeholder, placeholder))
        elif tensor_util.is_tensor(x):
          tensors.append(x)
          placeholders.append(placeholder)
        else:
          placeholders.append(x)

      if isinstance(input_, six.string_types):
        # If the current input to format/print is a normal string, that string
        # can act as the template.
        cur_template = input_
      else:
        # We pack the placeholders into a data structure that matches the
        # input data structure format, then format that data structure
        # into a string template.
        #
        # NOTE: We must use pprint.pformat here for building the template for
        # unordered data structures such as `dict`, because `str` doesn't
        # guarantee orderings, while pprint prints in sorted order. pprint
        # will match the ordering of `nest.flatten`.
        # This even works when nest.flatten reorders OrderedDicts, because
        # pprint is printing *after* the OrderedDicts have been reordered.
        cur_template = pprint.pformat(
            nest.pack_sequence_as(input_, placeholders))
      templates.append(cur_template)

    # We join the templates for the various inputs into a single larger
    # template. We also remove all quotes surrounding the placeholders, so that
    # the formatted/printed output will not contain quotes around tensors.
    # (example of where these quotes might appear: if we have added a
    # placeholder string into a list, then pretty-formatted that list)
    template = sep.join(templates)
    template = template.replace("'" + placeholder + "'", placeholder)
    formatted_string = string_ops.string_format(
        inputs=tensors,
        template=template,
        placeholder=placeholder,
        summarize=summarize,
        name=format_name)

  if compat.forward_compatible(2019, 5, 27):
    return gen_logging_ops.print_v2(
        formatted_string, output_stream=output_stream_string, name=name,
        end=end)
  else:
    if end == os.linesep:
      end = ""
    return gen_logging_ops.print_v2(
        formatted_string + end, output_stream=output_stream_string, name=name)
コード例 #29
0
def split_compile_and_replicate(computation,
                                inputs=None,
                                infeed_queue=None,
                                device_assignment=None,
                                name=None,
                                use_tpu=True):
    """Builds graph operators that runs compilation and replicated computation.

  This is a lower level interface than replicate that returns a separate compile
  and execute output tensor. In the generated graph the compile op feeds into
  the execute op and no additional compilation is incurred when running the
  compile op before the execute op. The compile op returns additional
  information about the compilation but does not return the compiled program.

  Args:
    computation: A Python function that builds the computation to replicate.
    inputs: A list of lists of input tensors or `None` (equivalent to
      `[[]]`), indexed by `[replica_num][input_num]`. All replicas must
      have the same number of inputs.
    infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
      of arguments as inputs to computation.
    device_assignment: If not `None`, a `DeviceAssignment` describing the
      mapping between logical cores in the computation with physical cores in
      the TPU topology. Uses a default device assignment if `None`. The
      `DeviceAssignment` may be omitted if each replica of the computation uses
      only one core, and there is either only one replica, or the number of
      replicas is equal to the number of cores in the TPU system.
    name: (Deprecated) Does nothing.
    use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU
      backends. Currently, only supports a default placement (computation is
      placed on GPU if one is available, and on CPU if not).
  Returns:
    A list of lists with the first list corresponding to the compile op and the
    second a list of output tensors, indexed by `[replica_num][output_num]`.
  Raises:
    ValueError: If all replicas do not have equal numbers of input tensors.
    ValueError: If the number of inputs per replica does not match
      the number of formal parameters to `computation`.
  """
    del name
    inputs = [[]] if inputs is None else inputs

    metadata_kwargs = {}
    if device_assignment is not None:
        # Turn the Numpy array into a flattened list so we can pass it as an
        # operator attribute.
        metadata_kwargs = {
            "topology":
            device_assignment.topology.serialized(),
            "device_assignment":
            device_assignment.core_assignment.flatten().tolist()
        }
        # TODO(phawkins): remove this case after the forward compatibility window
        # expires on 2018-10-5.
        if api_compat.forward_compatible(2018, 10, 5):
            metadata_kwargs["num_cores_per_replica"] = (
                device_assignment.num_cores_per_replica)
        else:
            metadata_kwargs["computation_shape"] = [
                device_assignment.num_cores_per_replica
            ]

    if ((not isinstance(inputs, list))
            or any(not isinstance(inp, (list, tuple)) for inp in inputs)):
        raise TypeError(
            "tpu.replicate() inputs must be a list of lists/tuples")

    num_replicas = len(inputs)

    # No replicas? Nothing to do.
    if num_replicas == 0:
        return []

    # Converts inputs to Tensors.
    inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in inputs]

    # Verifies that all replicas have matching numbers and types of inputs
    input_types = [x.dtype for x in inputs[0]]
    input_arity = len(input_types)
    for i in range(num_replicas):
        if len(inputs[i]) != input_arity:
            raise ValueError("Replicas must have the same number of inputs. "
                             "Replica 0 had {} inputs, replica {} had {} "
                             "inputs.".format(input_arity, i, len(inputs[i])))

        types = [x.dtype for x in inputs[i]]
        if types != input_types:
            raise ValueError(
                "Replicas must have matching input types. Replica 0 had "
                "input types {}, replica {} had input types {}".format(
                    input_types, i, types))

    arg_error = xla.check_function_argument_count(computation, input_arity,
                                                  infeed_queue)
    if arg_error is not None:
        if infeed_queue is None:
            raise TypeError(
                "Supplied computation cannot be called with the specified inputs. "
                "You specified %d inputs: %s, but the computation needs %s" %
                (input_arity, str([i.name for i in inputs[0]]), arg_error))
        else:
            raise TypeError(
                "Supplied computation cannot be called with the specified inputs. "
                "You specified %d inputs: %s and %d additional inputs from infeed,"
                " but the computation needs %s" %
                (input_arity, str([i.name for i in inputs[0]]),
                 infeed_queue.number_of_tuple_elements, arg_error))

    graph = ops.get_default_graph()

    # Fan-in: Builds a TPUReplicatedInput node for each input.
    computation_inputs = []
    for i in range(0, input_arity):
        replicas = [inputs[replica][i] for replica in xrange(num_replicas)]
        computation_inputs.append(
            tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))

    cluster_name = graph.unique_name("cluster")
    pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
    context = TPUReplicateContext(name=cluster_name,
                                  num_replicas=num_replicas,
                                  pivot=pivot)
    try:
        context.Enter()

        metadata = tpu_ops.tpu_replicate_metadata(num_replicas=num_replicas,
                                                  use_tpu=use_tpu,
                                                  **metadata_kwargs)

        with tpu_function.tpu_shard_context(
                num_replicas), ops.control_dependencies([metadata]):

            # Add identity ops so even unused inputs are "consumed" by the
            # computation. This is to avoid orphaned TPUReplicatedInput nodes.
            # TODO(phawkins): consider instead pruning unused TPUReplicatedInput
            # and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs.
            computation_inputs = [
                array_ops.identity(x, name="replicated_input_{}".format(i))
                for i, x in enumerate(computation_inputs)
            ]

            # If there is an infeed queue, adds the dequeued values to the
            # computation's inputs.
            if infeed_queue is not None:
                infeed_queue.set_number_of_shards(num_replicas)
                for t in infeed_queue.generate_dequeue_op():
                    computation_inputs.append(t)

            # Only resource variables work inside a TPU computation, so turn on
            # resource variables for the computation.
            # TODO(phawkins): consider removing this code. It will
            # be less confusing to clients if they knowingly choose to use resource
            # variables.
            # Partitioned variables is not supported (b/112311320).
            vscope = variable_scope.get_variable_scope()
            saved_use_resource = vscope.use_resource
            saved_custom_getter = vscope.custom_getter

            def custom_getter(getter, name, *args, **kwargs):
                """Variables on TPU have a few restrictions."""
                partitioner = kwargs["partitioner"]
                if partitioner is not None:
                    kwargs["partitioner"] = None
                    logging.warning(
                        "Partitioned variables are not supported on TPU. Got "
                        "`partitioner` that is {} for variable {}. "
                        "Setting `partitioner` to `None`.".format(
                            partitioner, name))
                if saved_custom_getter is None:
                    return getter(name, *args, **kwargs)
                else:
                    return saved_custom_getter(getter, name, *args, **kwargs)

            vscope.set_use_resource(True)
            vscope.set_custom_getter(custom_getter)

            outputs = computation(*computation_inputs)

            vscope.set_use_resource(saved_use_resource)
            vscope.set_custom_getter(saved_custom_getter)

        # If the computation returns `None`, make it an empty tuple.
        if outputs is None:
            outputs = tuple()
        # If the computation only returned one value, makes it a tuple.
        if not isinstance(outputs, (list, tuple)):
            outputs = (outputs, )

        # Append `no_op` here so that fetching any return value of this function
        # will trigger TPUExecute node.
        outputs += (control_flow_ops.no_op(), )
        try:
            with ops.device(core(0)):
                outputs = [
                    o if isinstance(o, ops.Operation) else
                    ops.convert_to_tensor(o) for o in outputs
                ]
        except Exception as e:
            raise ValueError(
                "TPU function return values must all either be Operations or "
                "convertible to Tensors. Got '%s'" % str(e))

        # Separates the returned Operations and Tensors.
        output_operations = [
            o for o in outputs if isinstance(o, ops.Operation)
        ]
        output_tensors = [
            o for o in outputs if not isinstance(o, ops.Operation)
        ]

        if outputs != output_tensors + output_operations:
            raise ValueError(
                "TPU functions must return zero-or more Tensor values followed by "
                "zero or more Operations.")
        output_arity = len(output_tensors)

        # Wraps outputs in Identity ops. Otherwise a replicated input copied
        # straight to an output would bypass the replicate(). This would be bad
        # because the TPUReplicatedInput/TPUReplicatedOutput operator would not
        # be rewritten away, leading to a runtime error.
        # TODO(phawkins): extend the rewrite to elide these nodes instead.
        new_output_tensors = []
        for t in output_tensors:
            with ops.device(t.device if t.device else core(0)):
                new_output_tensors.append(array_ops.identity(t))
        output_tensors = new_output_tensors
        context.ExitResult(output_tensors)
    finally:
        context.report_unsupported_operations()
        context.Exit()
        host_compute_core = context.HostComputeCore()

    if host_compute_core:
        attr_value = attr_value_pb2.AttrValue()
        attr_value.list.s.extend(
            [compat.as_bytes(x) for x in host_compute_core])
        metadata._set_attr("host_compute_core", attr_value)  # pylint: disable=protected-access

    # Fan-out: Builds a TPUReplicatedOutput node for each output.
    outputs = [
        tpu_ops.tpu_replicated_output(output_tensors[i],
                                      num_replicas,
                                      name="output{}".format(i))
        for i in xrange(output_arity)
    ]

    with ops.control_dependencies([metadata]):
        if use_tpu:
            compile_status = tpu_ops.tpu_compilation_result()
            op = compile_status.op
            attr_value = attr_value_pb2.AttrValue(
                s=compat.as_bytes(cluster_name))
            op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value)  # pylint: disable=protected-access
        else:
            compile_status = control_flow_ops.no_op(name="compilation_status")

    with ops.control_dependencies(output_operations):
        if output_arity == 0:
            # Returns a list of NoOps dependent on the replication Op, indexed by
            # [replica_num].
            return [
                compile_status,
                [
                    control_flow_ops.no_op(name="shard_%d" % i)
                    for i in range(num_replicas)
                ]
            ]
        else:
            # Wraps the outputs in identity operators so the names of any possible
            # `fetch` nodes are preserved by the replication rewrite.
            return [
                compile_status,
                [[
                    array_ops.identity(outputs[out][replica],
                                       name="output_%d_shard_%d" %
                                       (out, replica))
                    for out in xrange(output_arity)
                ] for replica in xrange(num_replicas)]
            ]
コード例 #30
0
ファイル: metrics_utils.py プロジェクト: santakd/tensorflow
def update_confusion_matrix_variables(variables_to_update,
                                      y_true,
                                      y_pred,
                                      thresholds,
                                      top_k=None,
                                      class_id=None,
                                      sample_weight=None,
                                      multi_label=False,
                                      label_weights=None,
                                      thresholds_distributed_evenly=False):
  """Returns op to update the given confusion matrix variables.

  For every pair of values in y_true and y_pred:

  true_positive: y_true == True and y_pred > thresholds
  false_negatives: y_true == True and y_pred <= thresholds
  true_negatives: y_true == False and y_pred <= thresholds
  false_positive: y_true == False and y_pred > thresholds

  The results will be weighted and added together. When multiple thresholds are
  provided, we will repeat the same for every threshold.

  For estimation of these metrics over a stream of data, the function creates an
  `update_op` operation that updates the given variables.

  If `sample_weight` is `None`, weights default to 1.
  Use weights of 0 to mask values.

  Args:
    variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
      and corresponding variables to update as values.
    y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
    y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
      the range `[0, 1]`.
    thresholds: A float value, float tensor, python list, or tuple of float
      thresholds in `[0, 1]`, or NEG_INF (used when top_k is set).
    top_k: Optional int, indicates that the positive labels should be limited to
      the top k predictions.
    class_id: Optional int, limits the prediction and labels to the class
      specified by this argument.
    sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
      `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `y_true` dimension).
    multi_label: Optional boolean indicating whether multidimensional
      prediction/labels should be treated as multilabel responses, or flattened
      into a single label. When True, the valus of `variables_to_update` must
      have a second dimension equal to the number of labels in y_true and
      y_pred, and those tensors must not be RaggedTensors.
    label_weights: (optional) tensor of non-negative weights for multilabel
      data. The weights are applied when calculating TP, FP, FN, and TN without
      explicit multilabel handling (i.e. when the data is to be flattened).
    thresholds_distributed_evenly: Boolean, whether the thresholds are evenly
      distributed within the list. An optimized method will be used if this is
      the case. See _update_confusion_matrix_variables_optimized() for more
      details.

  Returns:
    Update op.

  Raises:
    ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
      `sample_weight` is not `None` and its shape doesn't match `y_pred`, or if
      `variables_to_update` contains invalid keys.
  """
  if multi_label and label_weights is not None:
    raise ValueError('`label_weights` for multilabel data should be handled '
                     'outside of `update_confusion_matrix_variables` when '
                     '`multi_label` is True.')
  if variables_to_update is None:
    return
  if not any(
      key for key in variables_to_update if key in list(ConfusionMatrix)):
    raise ValueError(
        'Please provide at least one valid confusion matrix '
        'variable to update. Valid variable key options are: "{}". '
        'Received: "{}"'.format(
            list(ConfusionMatrix), variables_to_update.keys()))

  variable_dtype = list(variables_to_update.values())[0].dtype

  y_true = math_ops.cast(y_true, dtype=variable_dtype)
  y_pred = math_ops.cast(y_pred, dtype=variable_dtype)

  if thresholds_distributed_evenly:
    # Check whether the thresholds has any leading or tailing epsilon added
    # for floating point imprecision. The leading and tailing threshold will be
    # handled bit differently as the corner case.
    # At this point, thresholds should be a list/array with more than 2 items,
    # and ranged between [0, 1]. See is_evenly_distributed_thresholds() for more
    # details.
    thresholds_with_epsilon = thresholds[0] < 0.0 or thresholds[-1] > 1.0

  thresholds = ops.convert_to_tensor_v2_with_dispatch(
      thresholds, dtype=variable_dtype)
  num_thresholds = thresholds.shape.as_list()[0]

  if multi_label:
    one_thresh = math_ops.equal(
        math_ops.cast(1, dtype=dtypes.int32),
        array_ops.rank(thresholds),
        name='one_set_of_thresholds_cond')
  else:
    [y_pred,
     y_true], _ = ragged_assert_compatible_and_get_flat_values([y_pred, y_true],
                                                               sample_weight)
    one_thresh = math_ops.cast(True, dtype=dtypes.bool)

  invalid_keys = [
      key for key in variables_to_update if key not in list(ConfusionMatrix)
  ]
  if invalid_keys:
    raise ValueError(
        'Invalid keys: {}. Valid variable key options are: "{}"'.format(
            invalid_keys, list(ConfusionMatrix)))

  with ops.control_dependencies([
      check_ops.assert_greater_equal(
          y_pred,
          math_ops.cast(0.0, dtype=y_pred.dtype),
          message='predictions must be >= 0'),
      check_ops.assert_less_equal(
          y_pred,
          math_ops.cast(1.0, dtype=y_pred.dtype),
          message='predictions must be <= 1')
  ]):
    if sample_weight is None:
      y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
          y_pred, y_true)
    else:
      sample_weight = math_ops.cast(sample_weight, dtype=variable_dtype)
      y_pred, y_true, sample_weight = (
          losses_utils.squeeze_or_expand_dimensions(
              y_pred, y_true, sample_weight=sample_weight))
  y_pred.shape.assert_is_compatible_with(y_true.shape)

  if top_k is not None:
    y_pred = _filter_top_k(y_pred, top_k)
  if class_id is not None:
    y_true = y_true[..., class_id]
    y_pred = y_pred[..., class_id]

  if thresholds_distributed_evenly and compat.forward_compatible(2021, 6, 8):
    # The new approach will take effect after 2021/6/8, to give enough time
    # for Brella release to pick up the new op tf.math.cumsum with float32.
    return _update_confusion_matrix_variables_optimized(
        variables_to_update, y_true, y_pred, thresholds,
        multi_label=multi_label, sample_weights=sample_weight,
        label_weights=label_weights,
        thresholds_with_epsilon=thresholds_with_epsilon)

  pred_shape = array_ops.shape(y_pred)
  num_predictions = pred_shape[0]
  if y_pred.shape.ndims == 1:
    num_labels = 1
  else:
    num_labels = gen_math_ops.Prod(input=pred_shape[1:], axis=0)
  thresh_label_tile = array_ops.where_v2(one_thresh, num_labels,
                                         array_ops.ones([], dtype=dtypes.int32))

  # Reshape predictions and labels, adding a dim for thresholding.
  if multi_label:
    predictions_extra_dim = array_ops.expand_dims(y_pred, 0)
    labels_extra_dim = array_ops.expand_dims(
        math_ops.cast(y_true, dtype=dtypes.bool), 0)
  else:
    # Flatten predictions and labels when not multilabel.
    predictions_extra_dim = array_ops.reshape(y_pred, [1, -1])
    labels_extra_dim = array_ops.reshape(
        math_ops.cast(y_true, dtype=dtypes.bool), [1, -1])

  # Tile the thresholds for every prediction.
  if multi_label:
    thresh_pretile_shape = [num_thresholds, 1, -1]
    thresh_tiles = [1, num_predictions, thresh_label_tile]
    data_tiles = [num_thresholds, 1, 1]
  else:
    thresh_pretile_shape = [num_thresholds, -1]
    thresh_tiles = [1, num_predictions * num_labels]
    data_tiles = [num_thresholds, 1]

  thresh_tiled = array_ops.tile(
      array_ops.reshape(thresholds, thresh_pretile_shape),
      array_ops.stack(thresh_tiles))

  # Tile the predictions for every threshold.
  preds_tiled = array_ops.tile(predictions_extra_dim, data_tiles)

  # Compare predictions and threshold.
  pred_is_pos = math_ops.greater(preds_tiled, thresh_tiled)

  # Tile labels by number of thresholds
  label_is_pos = array_ops.tile(labels_extra_dim, data_tiles)

  if sample_weight is not None:
    sample_weight = weights_broadcast_ops.broadcast_weights(
        math_ops.cast(sample_weight, dtype=variable_dtype), y_pred)
    weights_tiled = array_ops.tile(
        array_ops.reshape(sample_weight, thresh_tiles), data_tiles)
  else:
    weights_tiled = None

  if label_weights is not None and not multi_label:
    label_weights = array_ops.expand_dims(label_weights, 0)
    label_weights = weights_broadcast_ops.broadcast_weights(label_weights,
                                                            y_pred)
    label_weights_tiled = array_ops.tile(
        array_ops.reshape(label_weights, thresh_tiles), data_tiles)
    if weights_tiled is None:
      weights_tiled = label_weights_tiled
    else:
      weights_tiled = math_ops.multiply(weights_tiled, label_weights_tiled)

  update_ops = []

  def weighted_assign_add(label, pred, weights, var):
    label_and_pred = math_ops.cast(
        math_ops.logical_and(label, pred), dtype=var.dtype)
    if weights is not None:
      label_and_pred *= math_ops.cast(weights, dtype=var.dtype)
    return var.assign_add(math_ops.reduce_sum(label_and_pred, 1))

  loop_vars = {
      ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos),
  }
  update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
  update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update
  update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update

  if update_fn or update_tn:
    pred_is_neg = math_ops.logical_not(pred_is_pos)
    loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg)

  if update_fp or update_tn:
    label_is_neg = math_ops.logical_not(label_is_pos)
    loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos)
    if update_tn:
      loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (label_is_neg, pred_is_neg)

  for matrix_cond, (label, pred) in loop_vars.items():

    if matrix_cond in variables_to_update:
      update_ops.append(
          weighted_assign_add(label, pred, weights_tiled,
                              variables_to_update[matrix_cond]))

  return control_flow_ops.group(update_ops)
コード例 #31
0
ファイル: io.py プロジェクト: TheVinhLuong102/tensorflow
def save(dataset,
         path,
         compression=None,
         shard_func=None,
         checkpoint_args=None):
    """Saves the content of the given dataset.

  Example usage:

  >>> import tempfile
  >>> path = os.path.join(tempfile.gettempdir(), "saved_data")
  >>> # Save a dataset
  >>> dataset = tf.data.Dataset.range(2)
  >>> tf.data.experimental.save(dataset, path)
  >>> new_dataset = tf.data.experimental.load(path)
  >>> for elem in new_dataset:
  ...   print(elem)
  tf.Tensor(0, shape=(), dtype=int64)
  tf.Tensor(1, shape=(), dtype=int64)

  The saved dataset is saved in multiple file "shards". By default, the dataset
  output is divided to shards in a round-robin fashion but custom sharding can
  be specified via the `shard_func` function. For example, you can save the
  dataset to using a single shard as follows:

  ```python
  dataset = make_dataset()
  def custom_shard_func(element):
    return 0
  dataset = tf.data.experimental.save(
      path="/path/to/data", ..., shard_func=custom_shard_func)
  ```

  To enable checkpointing, pass in `checkpoint_args` to the `save` method
  as follows:

  ```python
  dataset = tf.data.Dataset.range(100)
  save_dir = "..."
  checkpoint_prefix = "..."
  step_counter = tf.Variable(0, trainable=False)
  checkpoint_args = {
    "checkpoint_interval": 50,
    "step_counter": step_counter,
    "directory": checkpoint_prefix,
    "max_to_keep": 20,
  }
  dataset.save(dataset, save_dir, checkpoint_args=checkpoint_args)
  ```

  NOTE: The directory layout and file format used for saving the dataset is
  considered an implementation detail and may change. For this reason, datasets
  saved through `tf.data.experimental.save` should only be consumed through
  `tf.data.experimental.load`, which is guaranteed to be backwards compatible.

  Args:
    dataset: The dataset to save.
    path: Required. A directory to use for saving the dataset.
    compression: Optional. The algorithm to use to compress data when writing
      it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
    shard_func: Optional. A function to control the mapping of dataset elements
      to file shards. The function is expected to map elements of the input
      dataset to int64 shard IDs. If present, the function will be traced and
      executed as graph computation.
    checkpoint_args: Optional args for checkpointing which will be passed into
      the `tf.train.CheckpointManager`. If `checkpoint_args` are not specified,
      then checkpointing will not be performed. The `save()` implementation
      creates a `tf.train.Checkpoint` object internally, so users should not
      set the `checkpoint` argument in `checkpoint_args`.
  Raises:
    ValueError if `checkpoint` is passed into `checkpoint_args`.
  """
    if (context.executing_eagerly() and checkpoint_args
            and compat.forward_compatible(2021, 6, 29)):
        save_dataset = _SaveDataset(dataset, path, shard_func, compression)
        save_iterator = iter(save_dataset)

        if "checkpoint" in checkpoint_args:
            raise ValueError(
                "'Invalid `checkpoint_args`. `checkpoint_args` are not allowed "
                "to include 'checkpoint'.")
        checkpoint = tracking.util.Checkpoint(iterator=save_iterator)
        checkpoint_args["checkpoint"] = checkpoint
        manager = checkpoint_management.CheckpointManager(**checkpoint_args)
        checkpoint.restore(manager.latest_checkpoint)

        for _ in enumerate(save_iterator):
            if "step_counter" in checkpoint_args:
                checkpoint_args["step_counter"].assign_add(delta=1)
            manager.save(check_interval=True)
    else:
        dataset, shard_func, use_shard_func, path = _set_save_dataset_attributes(
            dataset, shard_func, path)
        gen_experimental_dataset_ops.save_dataset(
            dataset._variant_tensor,  # pylint: disable=protected-access
            path=path,
            shard_func_other_args=shard_func.captured_inputs,
            compression=compression,
            shard_func=shard_func,
            use_shard_func=use_shard_func)
コード例 #32
0
    def from_string_handle(string_handle,
                           output_types,
                           output_shapes=None,
                           output_classes=None):
        """Creates a new, uninitialized `Iterator` based on the given handle.

    This method allows you to define a "feedable" iterator where you can choose
    between concrete iterators by feeding a value in a `tf.Session.run` call.
    In that case, `string_handle` would be a `tf.placeholder`, and you would
    feed it with the value of `tf.data.Iterator.string_handle` in each step.

    For example, if you had two iterators that marked the current position in
    a training dataset and a test dataset, you could choose which to use in
    each step as follows:

    ```python
    train_iterator = tf.data.Dataset(...).make_one_shot_iterator()
    train_iterator_handle = sess.run(train_iterator.string_handle())

    test_iterator = tf.data.Dataset(...).make_one_shot_iterator()
    test_iterator_handle = sess.run(test_iterator.string_handle())

    handle = tf.placeholder(tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(
        handle, train_iterator.output_types)

    next_element = iterator.get_next()
    loss = f(next_element)

    train_loss = sess.run(loss, feed_dict={handle: train_iterator_handle})
    test_loss = sess.run(loss, feed_dict={handle: test_iterator_handle})
    ```

    Args:
      string_handle: A scalar `tf.Tensor` of type `tf.string` that evaluates
        to a handle produced by the `Iterator.string_handle()` method.
      output_types: A nested structure of `tf.DType` objects corresponding to
        each component of an element of this dataset.
      output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
        corresponding to each component of an element of this dataset. If
        omitted, each component will have an unconstrainted shape.
      output_classes: (Optional.) A nested structure of Python `type` objects
        corresponding to each component of an element of this iterator. If
        omitted, each component is assumed to be of type `tf.Tensor`.

    Returns:
      An `Iterator`.
    """
        output_types = nest.map_structure(dtypes.as_dtype, output_types)
        if output_shapes is None:
            output_shapes = nest.map_structure(
                lambda _: tensor_shape.TensorShape(None), output_types)
        else:
            output_shapes = nest.map_structure_up_to(output_types,
                                                     tensor_shape.as_shape,
                                                     output_shapes)
        if output_classes is None:
            output_classes = nest.map_structure(lambda _: ops.Tensor,
                                                output_types)
        nest.assert_same_structure(output_types, output_shapes)
        output_structure = structure_lib.convert_legacy_structure(
            output_types, output_shapes, output_classes)
        string_handle = ops.convert_to_tensor(string_handle,
                                              dtype=dtypes.string)
        # pylint: disable=protected-access
        if compat.forward_compatible(2018, 8, 3):
            if _device_stack_is_empty():
                with ops.device("/cpu:0"):
                    iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
                        string_handle,
                        output_types=output_structure._flat_types,
                        output_shapes=output_structure._flat_shapes)
            else:
                iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
                    string_handle,
                    output_types=output_structure._flat_types,
                    output_shapes=output_structure._flat_shapes)
        else:
            iterator_resource = gen_dataset_ops.iterator_from_string_handle(
                string_handle,
                output_types=output_structure._flat_types,
                output_shapes=output_structure._flat_shapes)
        # pylint: enable=protected-access
        return Iterator(iterator_resource, None, output_types, output_shapes,
                        output_classes)
コード例 #33
0
ファイル: tpu.py プロジェクト: becster/tensorflow
def split_compile_and_replicate(computation,
                                inputs=None,
                                infeed_queue=None,
                                device_assignment=None,
                                name=None,
                                use_tpu=True):
  """Builds graph operators that runs compilation and replicated computation.

  This is a lower level interface than replicate that returns a separate compile
  and execute output tensor. In the generated graph the compile op feeds into
  the execute op and no additional compilation is incurred when running the
  compile op before the execute op. The compile op returns additional
  information about the compilation but does not return the compiled program.

  Args:
    computation: A Python function that builds the computation to replicate.
    inputs: A list of lists of input tensors or `None` (equivalent to
      `[[]]`), indexed by `[replica_num][input_num]`. All replicas must
      have the same number of inputs.
    infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
      of arguments as inputs to computation.
    device_assignment: If not `None`, a `DeviceAssignment` describing the
      mapping between logical cores in the computation with physical cores in
      the TPU topology. Uses a default device assignment if `None`. The
      `DeviceAssignment` may be omitted if each replica of the computation uses
      only one core, and there is either only one replica, or the number of
      replicas is equal to the number of cores in the TPU system.
    name: (Deprecated) Does nothing.
    use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU
      backends. Currently, only supports a default placement (computation is
      placed on GPU if one is available, and on CPU if not).
  Returns:
    A list of lists with the first list corresponding to the compile op and the
    second a list of output tensors, indexed by `[replica_num][output_num]`.
  Raises:
    ValueError: If all replicas do not have equal numbers of input tensors.
    ValueError: If the number of inputs per replica does not match
      the number of formal parameters to `computation`.
  """
  del name
  inputs = [[]] if inputs is None else inputs

  metadata_kwargs = {}
  if device_assignment is not None:
    # Turn the Numpy array into a flattened list so we can pass it as an
    # operator attribute.
    metadata_kwargs = {
        "topology":
            device_assignment.topology.serialized(),
        "device_assignment":
            device_assignment.core_assignment.flatten().tolist()
    }
    # TODO(phawkins): remove this case after the forward compatibility window
    # expires on 2018-10-5.
    if api_compat.forward_compatible(2018, 10, 5):
      metadata_kwargs["num_cores_per_replica"] = (
          device_assignment.num_cores_per_replica)
    else:
      metadata_kwargs["computation_shape"] = [
          device_assignment.num_cores_per_replica
      ]

  if ((not isinstance(inputs, list)) or
      any(not isinstance(inp, (list, tuple)) for inp in inputs)):
    raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")

  num_replicas = len(inputs)

  # No replicas? Nothing to do.
  if num_replicas == 0:
    return []

  # Converts inputs to Tensors.
  inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in inputs]

  # Verifies that all replicas have matching numbers and types of inputs
  input_types = [x.dtype for x in inputs[0]]
  input_arity = len(input_types)
  for i in range(num_replicas):
    if len(inputs[i]) != input_arity:
      raise ValueError("Replicas must have the same number of inputs. "
                       "Replica 0 had {} inputs, replica {} had {} "
                       "inputs.".format(input_arity, i, len(inputs[i])))

    types = [x.dtype for x in inputs[i]]
    if types != input_types:
      raise ValueError(
          "Replicas must have matching input types. Replica 0 had "
          "input types {}, replica {} had input types {}".format(
              input_types, i, types))

  arg_error = xla.check_function_argument_count(
      computation, input_arity, infeed_queue)
  if arg_error is not None:
    if infeed_queue is None:
      raise TypeError(
          "Supplied computation cannot be called with the specified inputs. "
          "You specified %d inputs: %s, but the computation needs %s" % (
              input_arity, str([i.name for i in inputs[0]]), arg_error))
    else:
      raise TypeError(
          "Supplied computation cannot be called with the specified inputs. "
          "You specified %d inputs: %s and %d additional inputs from infeed,"
          " but the computation needs %s" % (input_arity, str(
              [i.name
               for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
                                             arg_error))

  graph = ops.get_default_graph()

  # Fan-in: Builds a TPUReplicatedInput node for each input.
  computation_inputs = []
  for i in range(0, input_arity):
    replicas = [inputs[replica][i] for replica in xrange(num_replicas)]
    computation_inputs.append(
        tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))

  cluster_name = graph.unique_name("cluster")
  pivot = control_flow_ops.no_op(name=cluster_name + "/pivot")
  context = TPUReplicateContext(
      name=cluster_name, num_replicas=num_replicas, pivot=pivot)
  try:
    context.Enter()

    metadata = tpu_ops.tpu_replicate_metadata(
        num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)

    with tpu_function.tpu_shard_context(
        num_replicas), ops.control_dependencies([metadata]):

      # Add identity ops so even unused inputs are "consumed" by the
      # computation. This is to avoid orphaned TPUReplicatedInput nodes.
      # TODO(phawkins): consider instead pruning unused TPUReplicatedInput
      # and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs.
      computation_inputs = [
          array_ops.identity(x, name="replicated_input_{}".format(i))
          for i, x in enumerate(computation_inputs)
      ]

      # If there is an infeed queue, adds the dequeued values to the
      # computation's inputs.
      if infeed_queue is not None:
        infeed_queue.set_number_of_shards(num_replicas)
        for t in infeed_queue.generate_dequeue_op():
          computation_inputs.append(t)

      # Only resource variables work inside a TPU computation, so turn on
      # resource variables for the computation.
      # TODO(phawkins): consider removing this code. It will
      # be less confusing to clients if they knowingly choose to use resource
      # variables.
      # Partitioned variables is not supported (b/112311320).
      vscope = variable_scope.get_variable_scope()
      saved_use_resource = vscope.use_resource
      saved_custom_getter = vscope.custom_getter

      def custom_getter(getter, name, *args, **kwargs):
        """Variables on TPU have a few restrictions."""
        partitioner = kwargs["partitioner"]
        if partitioner is not None:
          kwargs["partitioner"] = None
          logging.warning(
              "Partitioned variables are not supported on TPU. Got "
              "`partitioner` that is {} for variable {}. "
              "Setting `partitioner` to `None`."
              .format(partitioner, name))
        if saved_custom_getter is None:
          return getter(name, *args, **kwargs)
        else:
          return saved_custom_getter(getter, name, *args, **kwargs)

      vscope.set_use_resource(True)
      vscope.set_custom_getter(custom_getter)

      outputs = computation(*computation_inputs)

      vscope.set_use_resource(saved_use_resource)
      vscope.set_custom_getter(saved_custom_getter)

    # If the computation returns `None`, make it an empty tuple.
    if outputs is None:
      outputs = tuple()
    # If the computation only returned one value, makes it a tuple.
    if not isinstance(outputs, (list, tuple)):
      outputs = (outputs,)

    # Append `no_op` here so that fetching any return value of this function
    # will trigger TPUExecute node.
    outputs += (control_flow_ops.no_op(),)
    try:
      with ops.device(core(0)):
        outputs = [
            o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
            for o in outputs
        ]
    except Exception as e:
      raise ValueError(
          "TPU function return values must all either be Operations or "
          "convertible to Tensors. Got '%s'" % str(e))

    # Separates the returned Operations and Tensors.
    output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
    output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]

    if outputs != output_tensors + output_operations:
      raise ValueError(
          "TPU functions must return zero-or more Tensor values followed by "
          "zero or more Operations.")
    output_arity = len(output_tensors)

    # Wraps outputs in Identity ops. Otherwise a replicated input copied
    # straight to an output would bypass the replicate(). This would be bad
    # because the TPUReplicatedInput/TPUReplicatedOutput operator would not
    # be rewritten away, leading to a runtime error.
    # TODO(phawkins): extend the rewrite to elide these nodes instead.
    new_output_tensors = []
    for t in output_tensors:
      with ops.device(t.device if t.device else core(0)):
        new_output_tensors.append(array_ops.identity(t))
    output_tensors = new_output_tensors
    context.ExitResult(output_tensors)
  finally:
    context.report_unsupported_operations()
    context.Exit()
    host_compute_core = context.HostComputeCore()

  if host_compute_core:
    attr_value = attr_value_pb2.AttrValue()
    attr_value.list.s.extend([compat.as_bytes(x) for x in host_compute_core])
    metadata._set_attr("host_compute_core", attr_value)  # pylint: disable=protected-access

  # Fan-out: Builds a TPUReplicatedOutput node for each output.
  outputs = [tpu_ops.tpu_replicated_output(output_tensors[i], num_replicas,
                                           name="output{}".format(i))
             for i in xrange(output_arity)]

  with ops.control_dependencies([metadata]):
    if use_tpu:
      compile_status = tpu_ops.tpu_compilation_result()
      op = compile_status.op
      attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))
      op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value)  # pylint: disable=protected-access
    else:
      compile_status = control_flow_ops.no_op(name="compilation_status")

  with ops.control_dependencies(output_operations):
    if output_arity == 0:
      # Returns a list of NoOps dependent on the replication Op, indexed by
      # [replica_num].
      return [
          compile_status, [
              control_flow_ops.no_op(name="shard_%d" % i)
              for i in range(num_replicas)
          ]
      ]
    else:
      # Wraps the outputs in identity operators so the names of any possible
      # `fetch` nodes are preserved by the replication rewrite.
      return [
          compile_status, [[
              array_ops.identity(
                  outputs[out][replica],
                  name="output_%d_shard_%d" % (out, replica))
              for out in xrange(output_arity)
          ]
                           for replica in xrange(num_replicas)]
      ]
コード例 #34
0
ファイル: random_ops.py プロジェクト: zk8085454/tensorflow
def random_uniform(shape,
                   minval=0,
                   maxval=None,
                   dtype=dtypes.float32,
                   seed=None,
                   name=None):
    """Outputs random values from a uniform distribution.

  The generated values follow a uniform distribution in the range
  `[minval, maxval)`. The lower bound `minval` is included in the range, while
  the upper bound `maxval` is excluded.

  For floats, the default range is `[0, 1)`.  For ints, at least `maxval` must
  be specified explicitly.

  In the integer case, the random integers are slightly biased unless
  `maxval - minval` is an exact power of two.  The bias is small for values of
  `maxval - minval` significantly smaller than the range of the output (either
  `2**32` or `2**64`).

  Examples:

  >>> tf.random.uniform(shape=[2])
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([..., ...], dtype=float32)>
  >>> tf.random.uniform(shape=[], minval=-1., maxval=0.)
  <tf.Tensor: shape=(), dtype=float32, numpy=-...>
  >>> tf.random.uniform(shape=[], minval=5, maxval=10, dtype=tf.int64)
  <tf.Tensor: shape=(), dtype=int64, numpy=...>

  The `seed` argument produces a deterministic sequence of tensors across
  multiple calls. To repeat that sequence, use `tf.random.set_seed`:

  >>> tf.random.set_seed(5)
  >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10)
  <tf.Tensor: shape=(), dtype=int32, numpy=2>
  >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10)
  <tf.Tensor: shape=(), dtype=int32, numpy=0>
  >>> tf.random.set_seed(5)
  >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10)
  <tf.Tensor: shape=(), dtype=int32, numpy=2>
  >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10)
  <tf.Tensor: shape=(), dtype=int32, numpy=0>

  Without `tf.random.set_seed` but with a `seed` argument is specified, small
  changes to function graphs or previously executed operations will change the
  returned value. See `tf.random.set_seed` for details.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    minval: A Tensor or Python value of type `dtype`, broadcastable with
      `maxval`. The lower bound on the range of random values to generate
      (inclusive).  Defaults to 0.
    maxval: A Tensor or Python value of type `dtype`, broadcastable with
      `minval`. The upper bound on the range of random values to generate
      (exclusive). Defaults to 1 if `dtype` is floating point.
    dtype: The type of the output: `float16`, `float32`, `float64`, `int32`,
      or `int64`.
    seed: A Python integer. Used in combination with `tf.random.set_seed` to
      create a reproducible sequence of tensors across multiple calls.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random uniform values.

  Raises:
    ValueError: If `dtype` is integral and `maxval` is not specified.
  """
    dtype = dtypes.as_dtype(dtype)
    if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,
                     dtypes.float64, dtypes.int32, dtypes.int64):
        raise ValueError("Invalid dtype %r" % dtype)
    if maxval is None:
        if dtype.is_integer:
            raise ValueError("Must specify maxval for integer dtype %r" %
                             dtype)
        maxval = 1
    with ops.name_scope(name, "random_uniform",
                        [shape, minval, maxval]) as name:
        shape = tensor_util.shape_tensor(shape)
        # TODO(b/143079601): Remove this once the compatible window is passed.
        if compat.forward_compatible(2019, 12, 3):
            # In case of [0,1) floating results, minval and maxval is unused. We do an
            # `is` comparison here since this is cheaper than isinstance or  __eq__.
            minval_is_zero = minval is 0  # pylint: disable=literal-comparison
            maxval_is_one = maxval is 1  # pylint: disable=literal-comparison
            if not minval_is_zero or not maxval_is_one or dtype.is_integer:
                minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
                maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
            seed1, seed2 = random_seed.get_seed(seed)
            if dtype.is_integer:
                result = gen_random_ops.random_uniform_int(shape,
                                                           minval,
                                                           maxval,
                                                           seed=seed1,
                                                           seed2=seed2,
                                                           name=name)
            else:
                result = gen_random_ops.random_uniform(shape,
                                                       dtype,
                                                       seed=seed1,
                                                       seed2=seed2)
                if minval_is_zero:
                    if not maxval_is_one:
                        result = result * maxval
                else:
                    result = math_ops.add(result * (maxval - minval),
                                          minval,
                                          name=name)
        else:
            minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
            maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
            seed1, seed2 = random_seed.get_seed(seed)
            if dtype.is_integer:
                result = gen_random_ops.random_uniform_int(shape,
                                                           minval,
                                                           maxval,
                                                           seed=seed1,
                                                           seed2=seed2,
                                                           name=name)
            else:
                rnd = gen_random_ops.random_uniform(shape,
                                                    dtype,
                                                    seed=seed1,
                                                    seed2=seed2)
                result = math_ops.add(rnd * (maxval - minval),
                                      minval,
                                      name=name)
        # TODO(b/132092188): C++ shape inference inside functional ops does not
        # cross FuncGraph boundaries since that information is only available in
        # python. So we manually get the static shape using
        # `constant_value_as_shape` which *does* cross function boundaries.
        tensor_util.maybe_set_static_shape(result, shape)
        return result
コード例 #35
0
  def test_environment_override(self):
    var_name = 'TF_FORWARD_COMPATIBILITY_DELTA_DAYS'

    def remove_os_environment_var():
      try:
        del os.environ[var_name]
      except KeyError:
        pass

    self.addCleanup(remove_os_environment_var)

    compatibility_date = self._compatibility_date()
    one_day_before = self._n_days_after(-1)
    one_day_after = self._n_days_after(1)
    ten_days_after = self._n_days_after(10)
    nine_days_after = self._n_days_after(9)

    self.assertTrue(compat.forward_compatible(*one_day_before))
    self.assertFalse(compat.forward_compatible(*compatibility_date))
    self.assertFalse(compat.forward_compatible(*one_day_after))
    self.assertFalse(compat.forward_compatible(*nine_days_after))
    self.assertFalse(compat.forward_compatible(*ten_days_after))

    os.environ[var_name] = '10'
    compat._update_forward_compatibility_date_number()
    self.assertTrue(compat.forward_compatible(*one_day_before))
    self.assertTrue(compat.forward_compatible(*compatibility_date))
    self.assertTrue(compat.forward_compatible(*one_day_after))
    self.assertTrue(compat.forward_compatible(*nine_days_after))
    self.assertFalse(compat.forward_compatible(*ten_days_after))

    del os.environ[var_name]
    compat._update_forward_compatibility_date_number()
    self.assertTrue(compat.forward_compatible(*one_day_before))
    self.assertFalse(compat.forward_compatible(*compatibility_date))
    self.assertFalse(compat.forward_compatible(*one_day_after))
    self.assertFalse(compat.forward_compatible(*nine_days_after))
    self.assertFalse(compat.forward_compatible(*ten_days_after))

    # Now test interaction between environment variable and context func.
    os.environ[var_name] = '10'
    compat._update_forward_compatibility_date_number()
    self.assertTrue(compat.forward_compatible(*one_day_after))
    with compat.forward_compatibility_horizon(*one_day_after):
      self.assertTrue(compat.forward_compatible(*one_day_before))
      self.assertTrue(compat.forward_compatible(*compatibility_date))
      self.assertFalse(compat.forward_compatible(*one_day_after))
      self.assertFalse(compat.forward_compatible(*nine_days_after))
      self.assertFalse(compat.forward_compatible(*ten_days_after))
    self.assertTrue(compat.forward_compatible(*one_day_after))
コード例 #36
0
 def test_past(self):
   with compat.forward_compatibility_horizon(2018, 9, 18):
     self.assertTrue(compat.forward_compatible(2020, 4, 4))
コード例 #37
0
 def test_basic(self):
   compatibility_date = self._compatibility_date()
   one_day_before = self._n_days_after(-1)
   self.assertTrue(compat.forward_compatible(*one_day_before))
   self.assertFalse(compat.forward_compatible(*compatibility_date))
コード例 #38
0
ファイル: readers.py プロジェクト: tftuner/tf_fast_recovery
    def __init__(self,
                 filenames,
                 record_defaults,
                 compression_type=None,
                 buffer_size=None,
                 header=False,
                 field_delim=",",
                 use_quote_delim=True,
                 na_value="",
                 select_cols=None):
        """Creates a `CsvDataset` by reading and decoding CSV files.

    The elements of this dataset correspond to records from the file(s).
    RFC 4180 format is expected for CSV files
    (https://tools.ietf.org/html/rfc4180)
    Note that we allow leading and trailing spaces with int or float field.


    For example, suppose we have a file 'my_file0.csv' with four CSV columns of
    different data types:
    ```
    abcdefg,4.28E10,5.55E6,12
    hijklmn,-5.3E14,,2
    ```

    We can construct a CsvDataset from it as follows:

    ```python
    tf.compat.v1.enable_eager_execution()

     dataset = tf.data.experimental.CsvDataset(
        "my_file*.csv",
        [tf.float32,  # Required field, use dtype or empty tensor
         tf.constant([0.0], dtype=tf.float32),  # Optional field, default to 0.0
         tf.int32,  # Required field, use dtype or empty tensor
         ],
        select_cols=[1,2,3]  # Only parse last three columns
    )
    ```

    The expected output of its iterations is:

    ```python
    for element in dataset:
      print(element)

    >> (4.28e10, 5.55e6, 12)
    >> (-5.3e14, 0.0, 2)
    ```

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      record_defaults: A list of default values for the CSV fields. Each item in
        the list is either a valid CSV `DType` (float32, float64, int32, int64,
        string), or a `Tensor` object with one of the above types. One per
        column of CSV data, with either a scalar `Tensor` default value for the
        column if it is optional, or `DType` or empty `Tensor` if required. If
        both this and `select_columns` are specified, these must have the same
        lengths, and `column_defaults` is assumed to be sorted in order of
        increasing column index.
      compression_type: (Optional.) A `tf.string` scalar evaluating to one of
        `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no
        compression.
      buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
        to buffer while reading files. Defaults to 4MB.
      header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
        have header line(s) that should be skipped when parsing. Defaults to
        `False`.
      field_delim: (Optional.) A `tf.string` scalar containing the delimiter
        character that separates fields in a record. Defaults to `","`.
      use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats
        double quotation marks as regular characters inside of string fields
        (ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`.
      na_value: (Optional.) A `tf.string` scalar indicating a value that will
        be treated as NA/NaN.
      select_cols: (Optional.) A sorted list of column indices to select from
        the input data. If specified, only this subset of columns will be
        parsed. Defaults to parsing all columns.
    """
        self._filenames = ops.convert_to_tensor(filenames,
                                                dtype=dtypes.string,
                                                name="filenames")
        self._compression_type = convert.optional_param_to_tensor(
            "compression_type",
            compression_type,
            argument_default="",
            argument_dtype=dtypes.string)
        record_defaults = [
            constant_op.constant([], dtype=x)
            if x in _ACCEPTABLE_CSV_TYPES else x for x in record_defaults
        ]
        self._record_defaults = ops.convert_n_to_tensor(record_defaults,
                                                        name="record_defaults")
        self._buffer_size = convert.optional_param_to_tensor(
            "buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
        self._header = ops.convert_to_tensor(header,
                                             dtype=dtypes.bool,
                                             name="header")
        self._field_delim = ops.convert_to_tensor(field_delim,
                                                  dtype=dtypes.string,
                                                  name="field_delim")
        self._use_quote_delim = ops.convert_to_tensor(use_quote_delim,
                                                      dtype=dtypes.bool,
                                                      name="use_quote_delim")
        self._na_value = ops.convert_to_tensor(na_value,
                                               dtype=dtypes.string,
                                               name="na_value")
        self._select_cols = convert.optional_param_to_tensor(
            "select_cols",
            select_cols,
            argument_default=[],
            argument_dtype=dtypes.int64,
        )
        self._structure = structure.NestedStructure(
            tuple(
                structure.TensorStructure(d.dtype, [])
                for d in self._record_defaults))
        if compat.forward_compatible(2019, 8, 3):
            variant_tensor = gen_experimental_dataset_ops.csv_dataset(
                filenames=self._filenames,
                record_defaults=self._record_defaults,
                buffer_size=self._buffer_size,
                header=self._header,
                output_shapes=self._flat_shapes,
                field_delim=self._field_delim,
                use_quote_delim=self._use_quote_delim,
                na_value=self._na_value,
                select_cols=self._select_cols,
                compression_type=self._compression_type)
        else:
            variant_tensor = gen_experimental_dataset_ops.experimental_csv_dataset(
                filenames=self._filenames,
                record_defaults=self._record_defaults,
                buffer_size=self._buffer_size,
                header=self._header,
                output_shapes=self._flat_shapes,
                field_delim=self._field_delim,
                use_quote_delim=self._use_quote_delim,
                na_value=self._na_value,
                select_cols=self._select_cols,
                compression_type=self._compression_type)
        super(CsvDatasetV2, self).__init__(variant_tensor)
コード例 #39
0
ファイル: compat_test.py プロジェクト: AnishShah/tensorflow
 def test_basic(self):
   compatibility_date = self._compatibility_date()
   one_day_before = self._n_days_after(-1)
   self.assertTrue(compat.forward_compatible(*one_day_before))
   self.assertFalse(compat.forward_compatible(*compatibility_date))
コード例 #40
0
  def testRectangularBatch(self):
    # LINT.IfChange
    if compat.forward_compatible(2019, 11, 30):
    # LINT.ThenChange(//tensorflow/python/ops/array_ops.py)

      with self.cached_session(use_gpu=True):
        # Stores expected num_rows and num_cols (when the other is given).
        # expected[(d_lower, d_upper)] = (expected_num_rows, expected_num_cols)
        test_list = list()

        # Square cases:
        expected = {
            (-1, -1): (5, 4),
            (-4, -3): (5, 2),
            (-2, 1): (5, 5),
            (2, 4): (3, 5),
        }
        test_list.append((expected, square_cases()))

        # More cases:
        expected = {(-3, -1): (5, 4), (-1, 1): (4, 4), (2, 4): (4, 6)}
        test_list.append((expected, self._moreCases()))

        # Tall cases
        expected = {
            (0, 0): (3, 3),
            (-4, -3): (5, 2),
            (-2, -1): (4, 3),
            (-2, 1): (3, 3),
            (1, 2): (2, 3)
        }
        test_list.append((expected, tall_cases()))

        # Fat cases
        expected = {
            (2, 2): (2, 4),
            (-2, 0): (3, 3),
            (-1, 1): (3, 3),
            (0, 3): (3, 3)
        }
        test_list.append((expected, fat_cases()))

        for padding_value in [0, 555, -11]:
          # Giving both num_rows and num_cols
          for _, tests in [tall_cases(), fat_cases()]:
            for diags, (vecs, solution) in tests.items():
              v_diags = array_ops.matrix_diag(
                  vecs,
                  k=diags,
                  num_rows=solution.shape[-2],
                  num_cols=solution.shape[-1],
                  padding_value=padding_value)
              mask = solution == 0
              solution = solution + padding_value * mask
              self.assertEqual(v_diags.get_shape(), solution.shape)
              self.assertAllEqual(v_diags.eval(), solution)

          # Giving just num_rows.
          for expected, (_, tests) in test_list:
            for diags, (_, new_num_cols) in expected.items():
              vecs, solution = tests[diags]
              solution = solution.take(indices=range(new_num_cols), axis=-1)
              v_diags = array_ops.matrix_diag(
                  vecs,
                  k=diags,
                  num_rows=solution.shape[-2],
                  padding_value=padding_value)
              mask = solution == 0
              solution = solution + padding_value * mask
              self.assertEqual(v_diags.get_shape(), solution.shape)
              self.assertAllEqual(v_diags.eval(), solution)

          # Giving just num_cols.
          for expected, (_, tests) in test_list:
            for diags, (new_num_rows, _) in expected.items():
              vecs, solution = tests[diags]
              solution = solution.take(indices=range(new_num_rows), axis=-2)
              v_diags = array_ops.matrix_diag(
                  vecs,
                  k=diags,
                  num_cols=solution.shape[-1],
                  padding_value=padding_value)
              mask = solution == 0
              solution = solution + padding_value * mask
              self.assertEqual(v_diags.get_shape(), solution.shape)
              self.assertAllEqual(v_diags.eval(), solution)
コード例 #41
0
    def from_structure(output_types,
                       output_shapes=None,
                       shared_name=None,
                       output_classes=None):
        """Creates a new, uninitialized `Iterator` with the given structure.

    This iterator-constructing method can be used to create an iterator that
    is reusable with many different datasets.

    The returned iterator is not bound to a particular dataset, and it has
    no `initializer`. To initialize the iterator, run the operation returned by
    `Iterator.make_initializer(dataset)`.

    The following is an example

    ```python
    iterator = Iterator.from_structure(tf.int64, tf.TensorShape([]))

    dataset_range = Dataset.range(10)
    range_initializer = iterator.make_initializer(dataset_range)

    dataset_evens = dataset_range.filter(lambda x: x % 2 == 0)
    evens_initializer = iterator.make_initializer(dataset_evens)

    # Define a model based on the iterator; in this example, the model_fn
    # is expected to take scalar tf.int64 Tensors as input (see
    # the definition of 'iterator' above).
    prediction, loss = model_fn(iterator.get_next())

    # Train for `num_epochs`, where for each epoch, we first iterate over
    # dataset_range, and then iterate over dataset_evens.
    for _ in range(num_epochs):
      # Initialize the iterator to `dataset_range`
      sess.run(range_initializer)
      while True:
        try:
          pred, loss_val = sess.run([prediction, loss])
        except tf.errors.OutOfRangeError:
          break

      # Initialize the iterator to `dataset_evens`
      sess.run(evens_initializer)
      while True:
        try:
          pred, loss_val = sess.run([prediction, loss])
        except tf.errors.OutOfRangeError:
          break
    ```

    Args:
      output_types: A nested structure of `tf.DType` objects corresponding to
        each component of an element of this dataset.
      output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
        corresponding to each component of an element of this dataset. If
        omitted, each component will have an unconstrainted shape.
      shared_name: (Optional.) If non-empty, this iterator will be shared under
        the given name across multiple sessions that share the same devices
        (e.g. when using a remote server).
      output_classes: (Optional.) A nested structure of Python `type` objects
        corresponding to each component of an element of this iterator. If
        omitted, each component is assumed to be of type `tf.Tensor`.

    Returns:
      An `Iterator`.

    Raises:
      TypeError: If the structures of `output_shapes` and `output_types` are
        not the same.
    """
        output_types = nest.map_structure(dtypes.as_dtype, output_types)
        if output_shapes is None:
            output_shapes = nest.map_structure(
                lambda _: tensor_shape.TensorShape(None), output_types)
        else:
            output_shapes = nest.map_structure_up_to(output_types,
                                                     tensor_shape.as_shape,
                                                     output_shapes)
        if output_classes is None:
            output_classes = nest.map_structure(lambda _: ops.Tensor,
                                                output_types)
        nest.assert_same_structure(output_types, output_shapes)
        output_structure = structure_lib.convert_legacy_structure(
            output_types, output_shapes, output_classes)
        if shared_name is None:
            shared_name = ""
        # pylint: disable=protected-access
        if compat.forward_compatible(2018, 8, 3):
            if _device_stack_is_empty():
                with ops.device("/cpu:0"):
                    iterator_resource = gen_dataset_ops.iterator_v2(
                        container="",
                        shared_name=shared_name,
                        output_types=output_structure._flat_types,
                        output_shapes=output_structure._flat_shapes)
            else:
                iterator_resource = gen_dataset_ops.iterator_v2(
                    container="",
                    shared_name=shared_name,
                    output_types=output_structure._flat_types,
                    output_shapes=output_structure._flat_shapes)
        else:
            iterator_resource = gen_dataset_ops.iterator(
                container="",
                shared_name=shared_name,
                output_types=output_structure._flat_types,
                output_shapes=output_structure._flat_shapes)
        # pylint: enable=protected-access
        return Iterator(iterator_resource, None, output_types, output_shapes,
                        output_classes)
コード例 #42
0
ファイル: iterator_ops.py プロジェクト: perfmjs/tensorflow
  def from_structure(output_types,
                     output_shapes=None,
                     shared_name=None,
                     output_classes=None):
    """Creates a new, uninitialized `Iterator` with the given structure.

    This iterator-constructing method can be used to create an iterator that
    is reusable with many different datasets.

    The returned iterator is not bound to a particular dataset, and it has
    no `initializer`. To initialize the iterator, run the operation returned by
    `Iterator.make_initializer(dataset)`.

    The following is an example

    ```python
    iterator = Iterator.from_structure(tf.int64, tf.TensorShape([]))

    dataset_range = Dataset.range(10)
    range_initializer = iterator.make_initializer(dataset_range)

    dataset_evens = dataset_range.filter(lambda x: x % 2 == 0)
    evens_initializer = iterator.make_initializer(dataset_evens)

    # Define a model based on the iterator; in this example, the model_fn
    # is expected to take scalar tf.int64 Tensors as input (see
    # the definition of 'iterator' above).
    prediction, loss = model_fn(iterator.get_next())

    # Train for `num_epochs`, where for each epoch, we first iterate over
    # dataset_range, and then iterate over dataset_evens.
    for _ in range(num_epochs):
      # Initialize the iterator to `dataset_range`
      sess.run(range_initializer)
      while True:
        try:
          pred, loss_val = sess.run([prediction, loss])
        except tf.errors.OutOfRangeError:
          break

      # Initialize the iterator to `dataset_evens`
      sess.run(evens_initializer)
      while True:
        try:
          pred, loss_val = sess.run([prediction, loss])
        except tf.errors.OutOfRangeError:
          break
    ```

    Args:
      output_types: A nested structure of `tf.DType` objects corresponding to
        each component of an element of this dataset.
      output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
        corresponding to each component of an element of this dataset. If
        omitted, each component will have an unconstrainted shape.
      shared_name: (Optional.) If non-empty, this iterator will be shared under
        the given name across multiple sessions that share the same devices
        (e.g. when using a remote server).
      output_classes: (Optional.) A nested structure of Python `type` objects
        corresponding to each component of an element of this iterator. If
        omitted, each component is assumed to be of type `tf.Tensor`.

    Returns:
      An `Iterator`.

    Raises:
      TypeError: If the structures of `output_shapes` and `output_types` are
        not the same.
    """
    output_types = nest.map_structure(dtypes.as_dtype, output_types)
    if output_shapes is None:
      output_shapes = nest.map_structure(
          lambda _: tensor_shape.TensorShape(None), output_types)
    else:
      output_shapes = nest.map_structure_up_to(
          output_types, tensor_shape.as_shape, output_shapes)
    if output_classes is None:
      output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
    nest.assert_same_structure(output_types, output_shapes)
    output_structure = structure_lib.convert_legacy_structure(
        output_types, output_shapes, output_classes)
    if shared_name is None:
      shared_name = ""
    # pylint: disable=protected-access
    if compat.forward_compatible(2018, 8, 3):
      if _device_stack_is_empty():
        with ops.device("/cpu:0"):
          iterator_resource = gen_dataset_ops.iterator_v2(
              container="",
              shared_name=shared_name,
              output_types=output_structure._flat_types,
              output_shapes=output_structure._flat_shapes)
      else:
        iterator_resource = gen_dataset_ops.iterator_v2(
            container="",
            shared_name=shared_name,
            output_types=output_structure._flat_types,
            output_shapes=output_structure._flat_shapes)
    else:
      iterator_resource = gen_dataset_ops.iterator(
          container="",
          shared_name=shared_name,
          output_types=output_structure._flat_types,
          output_shapes=output_structure._flat_shapes)
    # pylint: enable=protected-access
    return Iterator(iterator_resource, None, output_types, output_shapes,
                    output_classes)
コード例 #43
0
ファイル: sdca_ops.py プロジェクト: Ajaycs99/tensorflow
  def minimize(self, global_step=None, name=None):
    """Add operations to train a linear model by minimizing the loss function.

    Args:
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      name: Optional name for the returned operation.

    Returns:
      An Operation that updates the variables passed in the constructor.
    """
    # Technically, the op depends on a lot more than the variables,
    # but we'll keep the list short.
    with name_scope(name, 'sdca/minimize'):
      sparse_example_indices = []
      sparse_feature_indices = []
      sparse_features_values = []
      for sf in self._examples['sparse_features']:
        sparse_example_indices.append(sf.example_indices)
        sparse_feature_indices.append(sf.feature_indices)
        # If feature values are missing, sdca assumes a value of 1.0f.
        if sf.feature_values is not None:
          sparse_features_values.append(sf.feature_values)

      # pylint: disable=protected-access
      example_ids_hashed = gen_sdca_ops.sdca_fprint(
          internal_convert_to_tensor(self._examples['example_ids']))
      # pylint: enable=protected-access
      example_state_data = self._hashtable.lookup(example_ids_hashed)
      # Solver returns example_state_update, new delta sparse_feature_weights
      # and delta dense_feature_weights.

      sparse_weights = []
      sparse_indices = []
      # If we have partitioned variables, keep a few dictionaries of Tensors
      # around that we need for the assign_add after the op call to
      # gen_sdca_ops.sdca_optimizer().  These are keyed because we may have a
      # mix of partitioned and un-partitioned variables.
      num_partitions_by_var = {}
      p_assignments_by_var = {}
      gather_ids_by_var = {}
      for v_num, (w, i) in enumerate(
          zip(self._slots['unshrinked_sparse_features_weights'],
              sparse_feature_indices)):
        # Append the sparse_indices (in full-variable space).
        sparse_idx = math_ops.cast(
            array_ops.unique(math_ops.cast(i, dtypes.int32))[0],
            dtypes.int64)
        sparse_indices.append(sparse_idx)
        if isinstance(w, list) or isinstance(w, var_ops.PartitionedVariable):
          num_partitions = len(w)
          flat_ids = array_ops.reshape(sparse_idx, [-1])
          # We use div partitioning, which is easiest to support downstream.
          # Compute num_total_ids as the sum of dim-0 of w, then assign
          # to partitions based on a constant number of ids per partition.
          # Optimize if we already know the full shape statically.
          dim_0_size = self._get_first_dimension_size_statically(
              w, num_partitions)

          if tensor_shape.dimension_value(dim_0_size):
            num_total_ids = constant_op.constant(
                tensor_shape.dimension_value(dim_0_size),
                flat_ids.dtype)
          else:
            dim_0_sizes = []
            for p in range(num_partitions):
              if tensor_shape.dimension_value(w[p].shape[0]) is not None:
                dim_0_sizes.append(tensor_shape.dimension_value(w[p].shape[0]))
              else:
                with ops.colocate_with(w[p]):
                  dim_0_sizes.append(array_ops.shape(w[p])[0])
            num_total_ids = math_ops.reduce_sum(
                math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
          ids_per_partition = num_total_ids // num_partitions
          extras = num_total_ids % num_partitions

          p_assignments = math_ops.maximum(
              flat_ids // (ids_per_partition + 1),
              (flat_ids - extras) // ids_per_partition)

          # Emulate a conditional using a boolean indicator tensor
          new_ids = array_ops.where(p_assignments < extras,
                                    flat_ids % (ids_per_partition + 1),
                                    (flat_ids - extras) % ids_per_partition)

          # Cast partition assignments to int32 for use in dynamic_partition.
          # There really should not be more than 2^32 partitions.
          p_assignments = math_ops.cast(p_assignments, dtypes.int32)
          # Partition list of ids based on assignments into num_partitions
          # separate lists.
          gather_ids = data_flow_ops.dynamic_partition(new_ids,
                                                       p_assignments,
                                                       num_partitions)
          # Add these into the dictionaries for use in the later update.
          num_partitions_by_var[v_num] = num_partitions
          p_assignments_by_var[v_num] = p_assignments
          gather_ids_by_var[v_num] = gather_ids

          # Gather the weights from each partition.
          partition_gathered_weights = []
          for p in range(num_partitions):
            with ops.colocate_with(w[p]):
              partition_gathered_weights.append(
                  array_ops.gather(w[p], gather_ids[p]))

          # Stitch the weights back together in the same order they were before
          # we dynamic_partitioned them.
          condition_indices = data_flow_ops.dynamic_partition(
              math_ops.range(array_ops.shape(new_ids)[0]),
              p_assignments, num_partitions)
          batch_gathered_weights = data_flow_ops.dynamic_stitch(
              condition_indices, partition_gathered_weights)
        else:
          w_as_tensor = internal_convert_to_tensor(w)
          with ops.device(w_as_tensor.device):
            batch_gathered_weights = array_ops.gather(
                w_as_tensor, sparse_idx)
        sparse_weights.append(batch_gathered_weights)

      # pylint: disable=protected-access
      if compat.forward_compatible(year=2018, month=10, day=30):
        esu, sfw, dfw = gen_sdca_ops.sdca_optimizer_v2(
            sparse_example_indices,
            sparse_feature_indices,
            sparse_features_values,
            self._convert_n_to_tensor(self._examples['dense_features']),
            internal_convert_to_tensor(self._examples['example_weights']),
            internal_convert_to_tensor(self._examples['example_labels']),
            sparse_indices,
            sparse_weights,
            self._convert_n_to_tensor(self._slots[
                'unshrinked_dense_features_weights']),
            example_state_data,
            loss_type=self._options['loss_type'],
            l1=self._options['symmetric_l1_regularization'],
            l2=self._symmetric_l2_regularization(),
            num_loss_partitions=self._num_loss_partitions(),
            num_inner_iterations=1,
            adaptive=self._adaptive())
      else:
        esu, sfw, dfw = gen_sdca_ops.sdca_optimizer(
            sparse_example_indices,
            sparse_feature_indices,
            sparse_features_values,
            self._convert_n_to_tensor(self._examples['dense_features']),
            internal_convert_to_tensor(self._examples['example_weights']),
            internal_convert_to_tensor(self._examples['example_labels']),
            sparse_indices,
            sparse_weights,
            self._convert_n_to_tensor(self._slots[
                'unshrinked_dense_features_weights']),
            example_state_data,
            loss_type=self._options['loss_type'],
            l1=self._options['symmetric_l1_regularization'],
            l2=self._symmetric_l2_regularization(),
            num_loss_partitions=self._num_loss_partitions(),
            num_inner_iterations=1,
            adaptative=self._adaptive())
      # pylint: enable=protected-access

      with ops.control_dependencies([esu]):
        update_ops = [self._hashtable.insert(example_ids_hashed, esu)]
        # Update the weights before the proximal step.
        for v_num, (w, i, u) in enumerate(
            zip(self._slots['unshrinked_sparse_features_weights'],
                sparse_indices, sfw)):
          if (isinstance(w, var_ops.PartitionedVariable) or
              isinstance(w, list)):
            update_ops += self._get_partitioned_update_ops(
                v_num, num_partitions_by_var, p_assignments_by_var,
                gather_ids_by_var, w, u, p_assignments, num_partitions)
          else:
            update_ops.append(state_ops.scatter_add(w, i, u))
        for w, u in zip(self._slots['unshrinked_dense_features_weights'], dfw):
          if (isinstance(w, var_ops.PartitionedVariable) or
              isinstance(w, list)):
            split_updates = array_ops.split(
                u, num_or_size_splits=[v.shape.as_list()[0] for v in w])
            for v, split_update in zip(w, split_updates):
              update_ops.append(state_ops.assign_add(v, split_update))
          else:
            update_ops.append(state_ops.assign_add(w, u))
      if not global_step:
        return control_flow_ops.group(*update_ops)
      with ops.control_dependencies(update_ops):
        return state_ops.assign_add(global_step, 1, name=name).op
コード例 #44
0
    def minimize(self, global_step=None, name=None):
        """Add operations to train a linear model by minimizing the loss function.

    Args:
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      name: Optional name for the returned operation.

    Returns:
      An Operation that updates the variables passed in the constructor.
    """
        # Technically, the op depends on a lot more than the variables,
        # but we'll keep the list short.
        with name_scope(name, 'sdca/minimize'):
            sparse_example_indices = []
            sparse_feature_indices = []
            sparse_features_values = []
            for sf in self._examples['sparse_features']:
                sparse_example_indices.append(sf.example_indices)
                sparse_feature_indices.append(sf.feature_indices)
                # If feature values are missing, sdca assumes a value of 1.0f.
                if sf.feature_values is not None:
                    sparse_features_values.append(sf.feature_values)

            # pylint: disable=protected-access
            example_ids_hashed = gen_sdca_ops.sdca_fprint(
                internal_convert_to_tensor(self._examples['example_ids']))
            # pylint: enable=protected-access
            example_state_data = self._hashtable.lookup(example_ids_hashed)
            # Solver returns example_state_update, new delta sparse_feature_weights
            # and delta dense_feature_weights.

            sparse_weights = []
            sparse_indices = []
            # If we have partitioned variables, keep a few dictionaries of Tensors
            # around that we need for the assign_add after the op call to
            # gen_sdca_ops.sdca_optimizer().  These are keyed because we may have a
            # mix of partitioned and un-partitioned variables.
            num_partitions_by_var = {}
            p_assignments_by_var = {}
            gather_ids_by_var = {}
            for v_num, (w, i) in enumerate(
                    zip(self._slots['unshrinked_sparse_features_weights'],
                        sparse_feature_indices)):
                # Append the sparse_indices (in full-variable space).
                sparse_idx = math_ops.cast(
                    array_ops.unique(math_ops.cast(i, dtypes.int32))[0],
                    dtypes.int64)
                sparse_indices.append(sparse_idx)
                if isinstance(w, list) or isinstance(
                        w, var_ops.PartitionedVariable):
                    num_partitions = len(w)
                    flat_ids = array_ops.reshape(sparse_idx, [-1])
                    # We use div partitioning, which is easiest to support downstream.
                    # Compute num_total_ids as the sum of dim-0 of w, then assign
                    # to partitions based on a constant number of ids per partition.
                    # Optimize if we already know the full shape statically.
                    dim_0_size = self._get_first_dimension_size_statically(
                        w, num_partitions)

                    if tensor_shape.dimension_value(dim_0_size):
                        num_total_ids = constant_op.constant(
                            tensor_shape.dimension_value(dim_0_size),
                            flat_ids.dtype)
                    else:
                        dim_0_sizes = []
                        for p in range(num_partitions):
                            if tensor_shape.dimension_value(
                                    w[p].shape[0]) is not None:
                                dim_0_sizes.append(
                                    tensor_shape.dimension_value(
                                        w[p].shape[0]))
                            else:
                                with ops.colocate_with(w[p]):
                                    dim_0_sizes.append(
                                        array_ops.shape(w[p])[0])
                        num_total_ids = math_ops.reduce_sum(
                            math_ops.cast(array_ops.stack(dim_0_sizes),
                                          flat_ids.dtype))
                    ids_per_partition = num_total_ids // num_partitions
                    extras = num_total_ids % num_partitions

                    p_assignments = math_ops.maximum(
                        flat_ids // (ids_per_partition + 1),
                        (flat_ids - extras) // ids_per_partition)

                    # Emulate a conditional using a boolean indicator tensor
                    new_ids = array_ops.where(
                        p_assignments < extras,
                        flat_ids % (ids_per_partition + 1),
                        (flat_ids - extras) % ids_per_partition)

                    # Cast partition assignments to int32 for use in dynamic_partition.
                    # There really should not be more than 2^32 partitions.
                    p_assignments = math_ops.cast(p_assignments, dtypes.int32)
                    # Partition list of ids based on assignments into num_partitions
                    # separate lists.
                    gather_ids = data_flow_ops.dynamic_partition(
                        new_ids, p_assignments, num_partitions)
                    # Add these into the dictionaries for use in the later update.
                    num_partitions_by_var[v_num] = num_partitions
                    p_assignments_by_var[v_num] = p_assignments
                    gather_ids_by_var[v_num] = gather_ids

                    # Gather the weights from each partition.
                    partition_gathered_weights = []
                    for p in range(num_partitions):
                        with ops.colocate_with(w[p]):
                            partition_gathered_weights.append(
                                array_ops.gather(w[p], gather_ids[p]))

                    # Stitch the weights back together in the same order they were before
                    # we dynamic_partitioned them.
                    condition_indices = data_flow_ops.dynamic_partition(
                        math_ops.range(array_ops.shape(new_ids)[0]),
                        p_assignments, num_partitions)
                    batch_gathered_weights = data_flow_ops.dynamic_stitch(
                        condition_indices, partition_gathered_weights)
                else:
                    w_as_tensor = internal_convert_to_tensor(w)
                    with ops.device(w_as_tensor.device):
                        batch_gathered_weights = array_ops.gather(
                            w_as_tensor, sparse_idx)
                sparse_weights.append(batch_gathered_weights)

            # pylint: disable=protected-access
            if compat.forward_compatible(year=2018, month=10, day=30):
                esu, sfw, dfw = gen_sdca_ops.sdca_optimizer_v2(
                    sparse_example_indices,
                    sparse_feature_indices,
                    sparse_features_values,
                    self._convert_n_to_tensor(
                        self._examples['dense_features']),
                    internal_convert_to_tensor(
                        self._examples['example_weights']),
                    internal_convert_to_tensor(
                        self._examples['example_labels']),
                    sparse_indices,
                    sparse_weights,
                    self._convert_n_to_tensor(
                        self._slots['unshrinked_dense_features_weights']),
                    example_state_data,
                    loss_type=self._options['loss_type'],
                    l1=self._options['symmetric_l1_regularization'],
                    l2=self._symmetric_l2_regularization(),
                    num_loss_partitions=self._num_loss_partitions(),
                    num_inner_iterations=1,
                    adaptive=self._adaptive())
            else:
                esu, sfw, dfw = gen_sdca_ops.sdca_optimizer(
                    sparse_example_indices,
                    sparse_feature_indices,
                    sparse_features_values,
                    self._convert_n_to_tensor(
                        self._examples['dense_features']),
                    internal_convert_to_tensor(
                        self._examples['example_weights']),
                    internal_convert_to_tensor(
                        self._examples['example_labels']),
                    sparse_indices,
                    sparse_weights,
                    self._convert_n_to_tensor(
                        self._slots['unshrinked_dense_features_weights']),
                    example_state_data,
                    loss_type=self._options['loss_type'],
                    l1=self._options['symmetric_l1_regularization'],
                    l2=self._symmetric_l2_regularization(),
                    num_loss_partitions=self._num_loss_partitions(),
                    num_inner_iterations=1,
                    adaptative=self._adaptive())
            # pylint: enable=protected-access

            with ops.control_dependencies([esu]):
                update_ops = [self._hashtable.insert(example_ids_hashed, esu)]
                # Update the weights before the proximal step.
                for v_num, (w, i, u) in enumerate(
                        zip(self._slots['unshrinked_sparse_features_weights'],
                            sparse_indices, sfw)):
                    if (isinstance(w, var_ops.PartitionedVariable)
                            or isinstance(w, list)):
                        update_ops += self._get_partitioned_update_ops(
                            v_num, num_partitions_by_var, p_assignments_by_var,
                            gather_ids_by_var, w, u, p_assignments,
                            num_partitions)
                    else:
                        update_ops.append(state_ops.scatter_add(w, i, u))
                for w, u in zip(
                        self._slots['unshrinked_dense_features_weights'], dfw):
                    if (isinstance(w, var_ops.PartitionedVariable)
                            or isinstance(w, list)):
                        split_updates = array_ops.split(
                            u,
                            num_or_size_splits=[
                                v.shape.as_list()[0] for v in w
                            ])
                        for v, split_update in zip(w, split_updates):
                            update_ops.append(
                                state_ops.assign_add(v, split_update))
                    else:
                        update_ops.append(state_ops.assign_add(w, u))
            if not global_step:
                return control_flow_ops.group(*update_ops)
            with ops.control_dependencies(update_ops):
                return state_ops.assign_add(global_step, 1, name=name).op
コード例 #45
0
ファイル: iterator_ops.py プロジェクト: perfmjs/tensorflow
  def from_string_handle(string_handle,
                         output_types,
                         output_shapes=None,
                         output_classes=None):
    """Creates a new, uninitialized `Iterator` based on the given handle.

    This method allows you to define a "feedable" iterator where you can choose
    between concrete iterators by feeding a value in a `tf.Session.run` call.
    In that case, `string_handle` would be a `tf.placeholder`, and you would
    feed it with the value of `tf.data.Iterator.string_handle` in each step.

    For example, if you had two iterators that marked the current position in
    a training dataset and a test dataset, you could choose which to use in
    each step as follows:

    ```python
    train_iterator = tf.data.Dataset(...).make_one_shot_iterator()
    train_iterator_handle = sess.run(train_iterator.string_handle())

    test_iterator = tf.data.Dataset(...).make_one_shot_iterator()
    test_iterator_handle = sess.run(test_iterator.string_handle())

    handle = tf.placeholder(tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(
        handle, train_iterator.output_types)

    next_element = iterator.get_next()
    loss = f(next_element)

    train_loss = sess.run(loss, feed_dict={handle: train_iterator_handle})
    test_loss = sess.run(loss, feed_dict={handle: test_iterator_handle})
    ```

    Args:
      string_handle: A scalar `tf.Tensor` of type `tf.string` that evaluates
        to a handle produced by the `Iterator.string_handle()` method.
      output_types: A nested structure of `tf.DType` objects corresponding to
        each component of an element of this dataset.
      output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
        corresponding to each component of an element of this dataset. If
        omitted, each component will have an unconstrainted shape.
      output_classes: (Optional.) A nested structure of Python `type` objects
        corresponding to each component of an element of this iterator. If
        omitted, each component is assumed to be of type `tf.Tensor`.

    Returns:
      An `Iterator`.
    """
    output_types = nest.map_structure(dtypes.as_dtype, output_types)
    if output_shapes is None:
      output_shapes = nest.map_structure(
          lambda _: tensor_shape.TensorShape(None), output_types)
    else:
      output_shapes = nest.map_structure_up_to(
          output_types, tensor_shape.as_shape, output_shapes)
    if output_classes is None:
      output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
    nest.assert_same_structure(output_types, output_shapes)
    output_structure = structure_lib.convert_legacy_structure(
        output_types, output_shapes, output_classes)
    string_handle = ops.convert_to_tensor(string_handle, dtype=dtypes.string)
    # pylint: disable=protected-access
    if compat.forward_compatible(2018, 8, 3):
      if _device_stack_is_empty():
        with ops.device("/cpu:0"):
          iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
              string_handle,
              output_types=output_structure._flat_types,
              output_shapes=output_structure._flat_shapes)
      else:
        iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
            string_handle,
            output_types=output_structure._flat_types,
            output_shapes=output_structure._flat_shapes)
    else:
      iterator_resource = gen_dataset_ops.iterator_from_string_handle(
          string_handle,
          output_types=output_structure._flat_types,
          output_shapes=output_structure._flat_shapes)
    # pylint: enable=protected-access
    return Iterator(iterator_resource, None, output_types, output_shapes,
                    output_classes)