示例#1
0
  def testNestedFunctions(self):
    """Executes two nested TensorFlow functions."""

    def TimesTwo(x):
      return x * 2

    def APlus2B(a, b):
      return a + TimesTwo(b)

    aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
    bval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
    expected = APlus2B(aval, bval)

    with self.cached_session():

      @function.Defun(dtypes.float32, dtypes.float32)
      def Foo(a, b):
        return APlus2B(a, b)

      a = constant_op.constant(aval, name="a")
      b = constant_op.constant(bval, name="b")
      with self.test_scope():
        call_g = Foo(a, b)
      result = self.evaluate(call_g)
    self.assertAllClose(result, expected, rtol=1e-3)
  def testAnonymousVarsInInit(self):

    class Model(training.Model):

      def __init__(self):
        super(Model, self).__init__()
        self.w = resource_variable_ops.ResourceVariable(0.0)
        self.b = resource_variable_ops.ResourceVariable(0.0)
        self.vars = [self.w, self.b]

      def call(self, x):
        return x * self.w + self.b

    with context.eager_mode():
      model = Model()
      optimizer = adam.AdamOptimizer(learning_rate=0.05)
      checkpoint_directory = self.get_temp_dir()
      checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
      checkpoint = util.Checkpoint(
          model=model, optimizer=optimizer)
      for _ in range(2):
        checkpoint.save(checkpoint_prefix)
        with backprop.GradientTape() as tape:
          loss = (constant_op.constant(1.)
                  - model(constant_op.constant(1.))) ** 2
        grad = tape.gradient(loss, model.vars)
        optimizer.apply_gradients(
            [(g, v) for g, v in zip(grad, model.vars)])
示例#3
0
  def testOneShotIteratorInitializerFails(self):
    # Define a dataset whose initialization will always fail.
    dataset = dataset_ops.Dataset.from_tensors(
        array_ops.check_numerics(
            constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
    iterator = dataset.make_one_shot_iterator()
    next_element = iterator.get_next()

    with self.test_session() as sess:
      with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
        sess.run(next_element)

      # Test that subsequent attempts to use the iterator also fail.
      with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
        sess.run(next_element)

    with self.test_session() as sess:
      def consumer_thread():
        with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
          sess.run(next_element)

      num_threads = 8
      threads = [
          self.checkedThread(consumer_thread) for _ in range(num_threads)]
      for t in threads:
        t.start()
      for t in threads:
        t.join()
  def testFixedNonUniform(self):
    """Sets up the quantile summary op test as follows.

    Creates array dividing range [0, 1] to 1<<16 elements equally spaced
    with weight same as the value.
    """
    dense_float_tensor_0 = constant_op.constant(
        [(1.0 * i) / math.pow(2.0, 16)
         for i in range(0, int(math.pow(2, 16)) + 1)])
    example_weights = constant_op.constant(
        [(1.0 * i) / math.pow(2.0, 16)
         for i in range(0, int(math.pow(2, 16)) + 1)])

    config = self._gen_config(0.1, 10)

    with self.test_session():
      dense_buckets, _ = quantile_ops.quantile_buckets(
          [dense_float_tensor_0], [], [], [],
          example_weights=example_weights,
          dense_config=[config],
          sparse_config=[])
      self.assertAllClose(
          [0] + [math.sqrt((i + 1.0) / 10) for i in range(0, 10)],
          dense_buckets[0].eval(),
          atol=0.1)
 def testShapeWrong(self):
   with ops.Graph().as_default():
     with self.assertRaisesWithPredicateMatch(
         ValueError,
         lambda e: ("Too many elements provided. Needed at most 5, "
                    "but received 7" == str(e))):
       constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
示例#6
0
  def testCaptureByValue(self):
    g = ops.Graph()
    with g.as_default():
      w = constant_op.constant([[1.0]])
      b = constant_op.constant([2.0])

      # Foo() captures w and b.
      @function.Defun(dtypes.float32, capture_by_value=True)
      def Foo(x):

        # Plus() captures b.
        @function.Defun(dtypes.float32, capture_by_value=True)
        def Plus(y):
          return y + b

        self.assertEqual(0, len(Plus.captured_inputs))

        return Plus(math_ops.matmul(w, x))

      y = Foo(constant_op.constant([[10.]]))

    self.assertEqual(0, len(Foo.captured_inputs))

    with self.test_session(graph=g):
      self.assertAllEqual(y.eval(), [[12.0]])
示例#7
0
  def testCopyToGPU(self):
    if not test_util.is_gpu_available():
      self.skipTest("No GPU available")

    with ops.device("/cpu:0"):
      optional_with_value = optional_ops.Optional.from_value(
          (constant_op.constant(37.0), constant_op.constant("Foo"),
           constant_op.constant(42)))
      optional_none = optional_ops.Optional.none_from_structure(
          structure.TensorStructure(dtypes.float32, []))

    with ops.device("/gpu:0"):
      gpu_optional_with_value = optional_ops._OptionalImpl(
          array_ops.identity(optional_with_value._variant_tensor),
          optional_with_value.value_structure)
      gpu_optional_none = optional_ops._OptionalImpl(
          array_ops.identity(optional_none._variant_tensor),
          optional_none.value_structure)

      gpu_optional_with_value_has_value = gpu_optional_with_value.has_value()
      gpu_optional_with_value_values = gpu_optional_with_value.get_value()

      gpu_optional_none_has_value = gpu_optional_none.has_value()

    self.assertTrue(self.evaluate(gpu_optional_with_value_has_value))
    self.assertEqual((37.0, b"Foo", 42),
                     self.evaluate(gpu_optional_with_value_values))
    self.assertFalse(self.evaluate(gpu_optional_none_has_value))
示例#8
0
  def testStudentPDFAndLogPDF(self):
    with self.test_session():
      batch_size = 6
      df = constant_op.constant([3.] * batch_size)
      mu = constant_op.constant([7.] * batch_size)
      sigma = constant_op.constant([8.] * batch_size)
      df_v = 3.
      mu_v = 7.
      sigma_v = 8.
      t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
      student = student_t.StudentT(df, loc=mu, scale=-sigma)

      log_pdf = student.log_prob(t)
      self.assertEquals(log_pdf.get_shape(), (6,))
      log_pdf_values = self.evaluate(log_pdf)
      pdf = student.prob(t)
      self.assertEquals(pdf.get_shape(), (6,))
      pdf_values = self.evaluate(pdf)

      if not stats:
        return

      expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
      expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
      self.assertAllClose(expected_log_pdf, log_pdf_values)
      self.assertAllClose(np.log(expected_pdf), log_pdf_values)
      self.assertAllClose(expected_pdf, pdf_values)
      self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
示例#9
0
  def testStudentLogPDFMultidimensional(self):
    with self.test_session():
      batch_size = 6
      df = constant_op.constant([[1.5, 7.2]] * batch_size)
      mu = constant_op.constant([[3., -3.]] * batch_size)
      sigma = constant_op.constant([[-math.sqrt(10.), math.sqrt(15.)]] *
                                   batch_size)
      df_v = np.array([1.5, 7.2])
      mu_v = np.array([3., -3.])
      sigma_v = np.array([np.sqrt(10.), np.sqrt(15.)])
      t = np.array([[-2.5, 2.5, 4., 0., -1., 2.]], dtype=np.float32).T
      student = student_t.StudentT(df, loc=mu, scale=sigma)
      log_pdf = student.log_prob(t)
      log_pdf_values = self.evaluate(log_pdf)
      self.assertEqual(log_pdf.get_shape(), (6, 2))
      pdf = student.prob(t)
      pdf_values = self.evaluate(pdf)
      self.assertEqual(pdf.get_shape(), (6, 2))

      if not stats:
        return
      expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
      expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
      self.assertAllClose(expected_log_pdf, log_pdf_values)
      self.assertAllClose(np.log(expected_pdf), log_pdf_values)
      self.assertAllClose(expected_pdf, pdf_values)
      self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
示例#10
0
  def testCaptureHashTable(self):
    # NOTE(mrry): We must use the V2 variants of `HashTable`
    # etc. because these produce a `tf.resource`-typed output that is
    # compatible with the in-graph function implementation.
    default_val = -1
    keys = constant_op.constant(["brain", "salad", "surgery"])
    values = constant_op.constant([0, 1, 2], dtypes.int64)
    table = lookup_ops.HashTable(
        lookup_ops.KeyValueTensorInitializer(keys, values), default_val)

    input_sentences = dataset_ops.Dataset.from_tensor_slices(
        ["brain brain tank salad surgery", "surgery brain"])

    iterator = (input_sentences
                .map(lambda x: string_ops.string_split([x]).values)
                .map(table.lookup)
                .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(table.initializer)
      sess.run(init_op)
      sess.run(get_next)
      sess.run(get_next)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
示例#11
0
 def testStudentSampleMultiDimensional(self):
   with self.test_session():
     batch_size = 7
     df = constant_op.constant([[3., 7.]] * batch_size)
     mu = constant_op.constant([[3., -3.]] * batch_size)
     sigma = constant_op.constant([[math.sqrt(10.), math.sqrt(15.)]] *
                                  batch_size)
     df_v = [3., 7.]
     mu_v = [3., -3.]
     sigma_v = [np.sqrt(10.), np.sqrt(15.)]
     n = constant_op.constant(200000)
     student = student_t.StudentT(df=df, loc=mu, scale=sigma)
     samples = student.sample(n, seed=123456)
     sample_values = self.evaluate(samples)
     self.assertEqual(samples.get_shape(), (200000, batch_size, 2))
     self.assertAllClose(
         sample_values[:, 0, 0].mean(), mu_v[0], rtol=1e-2, atol=0)
     self.assertAllClose(
         sample_values[:, 0, 0].var(),
         sigma_v[0]**2 * df_v[0] / (df_v[0] - 2),
         rtol=1e-1,
         atol=0)
     self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 0])
     self.assertAllClose(
         sample_values[:, 0, 1].mean(), mu_v[1], rtol=1e-2, atol=0)
     self.assertAllClose(
         sample_values[:, 0, 1].var(),
         sigma_v[1]**2 * df_v[1] / (df_v[1] - 2),
         rtol=1e-1,
         atol=0)
     self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 1])
示例#12
0
 def testArray(self):
   with self.test_session():
     x = constant_op.constant([1.0, 2.0], dtypes.float64)
     y = constant_op.constant([2.0, 3.0], dtypes.float64)
     z = self.evaluate(script_ops.py_func(np_func, [x, y], [dtypes.float64]))
     self.assertAllEqual(z[0],
                         np_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
示例#13
0
 def testScalar(self):
   with self.test_session():
     x = constant_op.constant(1.0, dtypes.float32)
     y = constant_op.constant(2.0, dtypes.float32)
     z = self.evaluate(
         script_ops.eager_py_func(np_func, [x, y], [dtypes.float32]))
     self.assertEqual(z[0], np_func(1.0, 2.0).astype(np.float32))
示例#14
0
  def test_build_all_signature_defs_without_receiver_alternatives(self):
    receiver_tensor = array_ops.placeholder(dtypes.string)
    output_1 = constant_op.constant([1.])
    output_2 = constant_op.constant(["2"])
    output_3 = constant_op.constant(["3"])
    export_outputs = {
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.RegressionOutput(value=output_1),
        "head-2": export_output.ClassificationOutput(classes=output_2),
        "head-3": export_output.PredictOutput(outputs={
            "some_output_3": output_3
        }),
    }

    signature_defs = export.build_all_signature_defs(
        receiver_tensor, export_outputs)

    expected_signature_defs = {
        "serving_default":
            signature_def_utils.regression_signature_def(receiver_tensor,
                                                         output_1),
        "head-2":
            signature_def_utils.classification_signature_def(receiver_tensor,
                                                             output_2, None),
        "head-3":
            signature_def_utils.predict_signature_def({
                "input": receiver_tensor
            }, {"some_output_3": output_3})
    }

    self.assertDictEqual(expected_signature_defs, signature_defs)
示例#15
0
  def test_serving_input_receiver_receiver_tensors_invalid(self):
    features = {
        "feature0": constant_op.constant([0]),
        u"feature1": constant_op.constant([1]),
        "feature2": sparse_tensor.SparseTensor(
            indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
    }

    with self.assertRaisesRegexp(
        ValueError, "receiver_tensors must be defined"):
      export.ServingInputReceiver(
          features=features,
          receiver_tensors=None)

    with self.assertRaisesRegexp(
        ValueError, "receiver_tensors keys must be strings"):
      export.ServingInputReceiver(
          features=features,
          receiver_tensors={
              1: array_ops.placeholder(dtypes.string, name="example0")})

    with self.assertRaisesRegexp(
        ValueError, "receiver_tensor example1 must be a Tensor"):
      export.ServingInputReceiver(
          features=features,
          receiver_tensors={"example1": [1]})
示例#16
0
  def testIndexedSliceAsArgumentWithDefun(self):

    @function.defun
    def f(indexed_slice):
      return indexed_slice

    def validate(arg):
      output = f(arg)
      self.assertTrue(isinstance(output, ops.IndexedSlices))
      self.assertAllEqual(arg.values, output.values)
      self.assertAllEqual(arg.indices, output.indices)
      self.assertAllEqual(arg.dense_shape, output.dense_shape)

    indexed_slice = ops.IndexedSlices(
        values=constant_op.constant([1]),
        indices=constant_op.constant([0]),
        dense_shape=constant_op.constant([1]))
    validate(indexed_slice)

    # Test that `f` works even when `dense_shape` is None.
    indexed_slice = ops.IndexedSlices(
        values=constant_op.constant([1]),
        indices=constant_op.constant([0]),
        dense_shape=None)
    validate(indexed_slice)
示例#17
0
  def testDifferentiableFunctionNoneOutputs(self):

    @function.defun
    def my_function(x):
      return x, None

    def wrapper(x):
      return my_function(x)[0]

    g = backprop.gradients_function(wrapper, [0])(constant_op.constant(0.0))
    self.assertAllEqual(g[0], 1.)

    @function.defun
    def foo(a):
      return None, a * a

    x = constant_op.constant(5.0)
    with backprop.GradientTape() as tp:
      tp.watch(x)
      none, r = foo(x)
    g = tp.gradient(r, x)

    self.assertIs(none, None)
    self.assertAllEqual(r, 25.0)
    self.assertAllEqual(g, 2 * 5.0)
示例#18
0
 def split(self, value, lengths, name=None):
   """See TensorArray."""
   # error checking to match graph-mode errors
   value = constant_op.constant(value)
   lengths = constant_op.constant(lengths)
   sum_lengths = math_ops.reduce_sum(lengths)
   if lengths.shape.ndims != 1:
     raise errors_impl.InvalidArgumentError(
         None, None, "Expected lengths to be a vector, received shape: %s" %
         lengths.shape.as_list())
   elif value.shape.ndims == 0:
     raise errors_impl.InvalidArgumentError(
         None, None, "Expected value to be at least a vector, "
         "but received shape: %s" % value.shape.as_list())
   elif sum_lengths.numpy() != value.shape.as_list()[0]:
     raise errors_impl.InvalidArgumentError(
         None, None, "Expected sum of lengths to be equal to "
         "values.shape[0], but sum of lengths is %d and "
         "value's shape is: %s " % (sum_lengths.numpy(),
                                    value.shape.as_list()))
   elif not self._dynamic_size and lengths.shape[0] != len(self._tensor_array):
     raise errors_impl.InvalidArgumentError(
         None, None, "TensorArray's size is not equal to the size of "
         "lengths (%d vs. %d), and the TensorArray is not marked as "
         "dynamically resizeable" % (len(self._tensor_array),
                                     lengths.shape[0]))
   else:
     ta = self._identity_without_array()
     tensor_array = array_ops.split(value, lengths, name=name)
     ta._implementation._tensor_array = tensor_array  # pylint: disable=protected-access
     return ta
示例#19
0
  def testReturningIndexedSlicesWithDefun(self):

    def validate(indexed_slice):
      def f():
        return indexed_slice

      output = function.defun(f)()
      self.assertTrue(isinstance(output, ops.IndexedSlices))
      self.assertAllEqual(indexed_slice.values, output.values)
      self.assertAllEqual(indexed_slice.indices, output.indices)
      self.assertAllEqual(indexed_slice.dense_shape, output.dense_shape)

      self.assertEqual(
          function.make_defun_op(f).output_shapes, indexed_slice.values.shape)

    arg = ops.IndexedSlices(
        values=constant_op.constant([1, 2]),
        indices=constant_op.constant([0, 1]),
        dense_shape=constant_op.constant([2]))
    validate(arg)

    arg = ops.IndexedSlices(
        values=constant_op.constant([1, 2]),
        indices=constant_op.constant([0, 1]),
        dense_shape=None)
    validate(arg)
 def testWrongDimensions(self):
   # The matrix and right-hand sides should have the same number of rows.
   with self.test_session():
     matrix = constant_op.constant([[1., 0.], [0., 1.]])
     rhs = constant_op.constant([[1., 0.]])
     with self.assertRaises(ValueError):
       linalg_ops.matrix_solve(matrix, rhs)
示例#21
0
  def testTimelineGpu(self):
    if not test.is_gpu_available(cuda_only=True):
      return

    run_options = config_pb2.RunOptions(
        trace_level=config_pb2.RunOptions.FULL_TRACE)
    run_metadata = config_pb2.RunMetadata()

    with self.session(force_gpu=True) as sess:
      const1 = constant_op.constant(1.0, name='const1')
      const2 = constant_op.constant(2.0, name='const2')
      result = math_ops.add(const1, const2) + const1 * const2
      sess.run(result, options=run_options, run_metadata=run_metadata)
    self.assertTrue(run_metadata.HasField('step_stats'))
    step_stats = run_metadata.step_stats
    devices = [d.device for d in step_stats.dev_stats]
    self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:0' in devices)
    self.assertTrue('/device:GPU:0/stream:all' in devices)
    tl = timeline.Timeline(step_stats)
    ctf = tl.generate_chrome_trace_format()
    self._validateTrace(ctf)
    tl = timeline.Timeline(step_stats)
    ctf = tl.generate_chrome_trace_format(show_dataflow=False)
    self._validateTrace(ctf)
    tl = timeline.Timeline(step_stats)
    ctf = tl.generate_chrome_trace_format(show_memory=False)
    self._validateTrace(ctf)
    tl = timeline.Timeline(step_stats)
    ctf = tl.generate_chrome_trace_format(
        show_memory=False, show_dataflow=False)
    self._validateTrace(ctf)
示例#22
0
 def testSparseStability(self):
   for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
     with self.test_session():
       shape = [1, 6]
       var0 = variables.Variable(
           [[
               0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
               -0.0105945
           ]],
           dtype=dtype)
       grads0 = ops.IndexedSlices(
           constant_op.constant(
               [[
                   -5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
                   -8.4877e-05, -9.48906e-05
               ]],
               shape=shape,
               dtype=dtype),
           constant_op.constant([0]),
           constant_op.constant(shape))
       ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
       ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
       self.assertEqual(["accumulator"], ada_opt.get_slot_names())
       slot0 = ada_opt.get_slot(var0, "accumulator")
       init = variables.global_variables_initializer()
       for _ in range(100):
         init.run()
         ada_update.run()
         self.assertAllCloseAccordingToType(
             np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
         self.assertAllCloseAccordingToType(
             np.array([[
                 0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
                 -0.01029443
             ]]), var0.eval())
示例#23
0
 def testAcceptsIndexedSlices(self):
   values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2])
   indices = constant_op.constant([0, 2, 5])
   x = math_ops.scalar_mul(-3, ops.IndexedSlices(values, indices))
   with self.test_session(use_gpu=True):
     self.assertAllEqual(x.values.eval(), [[-6, -9], [-15, -21], [0, 3]])
     self.assertAllEqual(x.indices.eval(), [0, 2, 5])
示例#24
0
 def testInvalidSlice(self):
   with self.test_session() as sess:
     foo = constant_op.constant([1, 2, 3])
     with self.assertRaisesRegexp(ValueError, "Sliced assignment"
                                  " is only supported for variables"):
       bar = foo[:2].assign(constant_op.constant([1, 2]))
       sess.run(bar)
示例#25
0
  def testTensorLearningRate(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with self.cached_session():
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

        var0 = variables.Variable(var0_np)
        var1 = variables.Variable(var1_np)
        grads0 = constant_op.constant(grads0_np)
        grads1 = constant_op.constant(grads1_np)
        opt = adamax.Adamax(constant_op.constant(0.001))
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        variables.global_variables_initializer().run()

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())

        beta1_power = get_beta_accumulators(opt, dtype)

        # Run 3 steps of Adamax
        for t in range(3):
          self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
          update.run()

          var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, var0.eval())
          self.assertAllCloseAccordingToType(var1_np, var1.eval())
 def testContainsIndexedSlices_PerReplica(self):
   t0 = math_ops._as_indexed_slices(
       constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
   t1 = math_ops._as_indexed_slices(
       constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
   per_replica = value_lib.PerReplica({"/gpu:0": t0, "/cpu:0": t1})
   self.assertTrue(cross_device_utils.contains_indexed_slices(per_replica))
def _make_indexed_slices(values, indices, dense_shape, device):
  with ops.device(device):
    tensor = ops.IndexedSlices(
        values=constant_op.constant(values),
        indices=constant_op.constant(indices),
        dense_shape=constant_op.constant(dense_shape))
  return tensor
  def testGradientsRank7SliceUpdate(self):
    for dtype in GRADIENT_TESTS_DTYPES:
      indices = constant_op.constant(
          [[[[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
             [[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]]]],
          dtype=dtypes.int32)
      updates = constant_op.constant(
          [[[[[[[5, 6], [2, 4]]]], [[[[1, 3], [6, 8]]]]]]], dtype=dtype)
      shape = constant_op.constant([1, 1, 2, 1, 1, 2, 2], dtype=dtypes.int32)
      input_ = array_ops.zeros(shape, dtype=dtype)
      outputs = self.scatter_nd(indices, updates, shape, input_)

      grad_vals = constant_op.constant(
          [[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]], dtype=dtype)
      updates_grad, input_grad = gradients_impl.gradients(
          [outputs], [updates, input_], [grad_vals])
      expected_updates_grad = np.array(
          [[[[[[[3, 4], [5, 6]]]], [[[[1, 2], [7, 8]]]]]]],
          dtype=dtype.as_numpy_dtype())
      expected_input_grad = np.array(
          [[[[[[[1, 2], [3, 4]]]], [[[[5, 6], [7, 8]]]]]]],
          dtype=dtype.as_numpy_dtype())
      with self.cached_session():
        self.assertAllEqual(expected_updates_grad, updates_grad.eval())
        if self.non_aliasing_add_test:
          self.assertAllEqual(expected_input_grad, input_grad.eval())
示例#29
0
 def _testDropoutWrapper(self, batch_size=None, time_steps=None,
                         parallel_iterations=None, **kwargs):
   with self.test_session() as sess:
     with variable_scope.variable_scope(
         "root", initializer=init_ops.constant_initializer(0.5)):
       if batch_size is None and time_steps is None:
         # 2 time steps, batch size 1, depth 3
         batch_size = 1
         time_steps = 2
         x = constant_op.constant(
             [[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
         m = rnn_cell_impl.LSTMStateTuple(
             *[constant_op.constant([[0.1, 0.1, 0.1]], dtype=dtypes.float32)
              ] * 2)
       else:
         x = constant_op.constant(
             np.random.randn(time_steps, batch_size, 3).astype(np.float32))
         m = rnn_cell_impl.LSTMStateTuple(*[
             constant_op.constant(
                 [[0.1, 0.1, 0.1]] * batch_size, dtype=dtypes.float32)
         ] * 2)
       outputs, final_state = rnn.dynamic_rnn(
           cell=rnn_cell_impl.DropoutWrapper(
               rnn_cell_impl.LSTMCell(3), dtype=x.dtype, **kwargs),
           time_major=True,
           parallel_iterations=parallel_iterations,
           inputs=x,
           initial_state=m)
       sess.run([variables_lib.global_variables_initializer()])
       res = sess.run([outputs, final_state])
       self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
       self.assertEqual(res[1].c.shape, (batch_size, 3))
       self.assertEqual(res[1].h.shape, (batch_size, 3))
       return res
示例#30
0
  def testSharing(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with self.test_session():
        var0 = variables.Variable([1.0, 2.0], dtype=dtype)
        var1 = variables.Variable([3.0, 4.0], dtype=dtype)
        grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
        grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
        ada_opt = adagrad.AdagradOptimizer(3.0)
        # Apply the optimizer twice.  Both applications will use
        # the same accums.
        ada_update1 = ada_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        ada_update2 = ada_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        self.assertEqual(["accumulator"], ada_opt.get_slot_names())
        slot0 = ada_opt.get_slot(var0, "accumulator")
        self.assertEquals(slot0.get_shape(), var0.get_shape())
        slot1 = ada_opt.get_slot(var1, "accumulator")
        self.assertEquals(slot1.get_shape(), var1.get_shape())
        variables.global_variables_initializer().run()

        # Fetch params to validate initial values.
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Mix the first and the second adagrad for 3 steps.
        ada_update1.run()
        ada_update2.run()
        ada_update1.run()
        # Validate updated params (the same as with only 1 Adagrad).
        self.assertAllCloseAccordingToType(
            np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.715679168701172, 3.715679168701172]), var1.eval())
示例#31
0
def experimental_tpu_test_loop(model,
                               dataset,
                               verbose=0,
                               steps=None,
                               callbacks=None):
  """Test loop for evaluating with TPU DistributionStrategy.

  Arguments:
      model: Keras Model instance.
      dataset: Dataset for input data.
      verbose: Integer, Verbosity mode 0 or 1.
      steps: Total number of steps (batches of samples)
          before declaring predictions finished.
          Ignored with the default value of `None`.
      callbacks: List of callbacks to be called during training

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the outputs.
  """
  mode = ModeKeys.TEST
  current_strategy = model._distribution_strategy
  iterator = distributed_training_utils.get_iterator(dataset,
                                                     current_strategy)
  steps = training_utils.infer_steps_for_dataset(dataset, steps,
                                                 steps_name='steps')

  scope = distributed_training_utils.distributed_scope(
      strategy=current_strategy, learning_phase=0)
  scope.__enter__()

  def _per_device_eval_function(model):
    model._make_eval_function()
    return (model._eval_function.inputs, model._eval_function.outputs,
            model._eval_function.updates_op,
            model._eval_function.session_kwargs)

  def step_fn(ctx, inputs):
    """Clones the model and calls make_eval_function."""
    inputs, targets = inputs
    if model._compile_distribution:
      distributed_training_utils.clone_model_on_replicas(
          model, current_strategy, mode=mode, inputs=inputs, targets=targets)
    else:
      distributed_training_utils._build_distributed_network(
          model, current_strategy, mode, inputs, targets)

    (grouped_inputs, grouped_outputs, grouped_updates,
     grouped_session_args) = current_strategy.extended.call_for_each_replica(
         _per_device_eval_function,
         args=(distributed_training_utils.get_distributed_model(
             model, ModeKeys.TEST),))

    (all_inputs, all_outputs, all_updates,
     all_session_args) = distributed_training_utils.unwrap_values(
         current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
         grouped_session_args)

    combined_fn = K.function(
        all_inputs, all_outputs,
        updates=all_updates,
        name='distributed_test_function',
        **all_session_args)

    for label, output in zip(model.metrics_names, combined_fn.outputs):
      if label == 'loss':
        reduce_op = ds_reduce_util.ReduceOp.SUM
      else:
        # We reduce all other metrics using mean for now. This is temporary
        # workaround until new metrics are in place.
        reduce_op = ds_reduce_util.ReduceOp.MEAN
      ctx.set_last_step_output(label, output, reduce_op)

    return combined_fn.updates_op

  # Add initial dummy values for loss and other metric tensors.
  initial_loop_values = {}
  initial_loop_values['loss'] = constant_op.constant(1e7)
  for name in model.metrics_names[1:]:
    tensor = model._all_stateful_metrics_tensors[name]
    initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)

  # TODO(priyag): Use steps_per_run when we use new metrics as they will
  # allow handling metric computation at each step using variables.
  ctx = current_strategy.extended.experimental_run_steps_on_iterator(
      step_fn, iterator, iterations=1,
      initial_loop_values=initial_loop_values)

  test_op = ctx.run_op
  output_tensors = ctx.last_step_outputs

  if verbose == 1:
    progbar = Progbar(target=steps)

  if model._compile_distribution:
    distributed_training_utils._copy_weights_to_distributed_model(model, mode)

  distributed_training_utils._reset_metrics(model)

  callbacks = cbks.configure_callbacks(
      callbacks,
      model,
      do_validation=False,
      epochs=1,
      steps_per_epoch=steps,
      verbose=verbose,
      count_mode='steps',
      mode=ModeKeys.TEST)
  callbacks._call_begin_hook(mode)

  outs = [0.] * len(model.metrics_names)
  if steps is not None:
    target_steps = steps
  else:
    target_steps = np.inf

  current_step = 0
  while current_step < target_steps:
    batch_logs = {'batch': current_step, 'size': 1}
    callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
    try:
      _, batch_outs = K.get_session().run([test_op, output_tensors])
    except errors.OutOfRangeError:
      if steps is not None:
        warning_msg = 'Make sure that your dataset can generate at least '
        '`steps` batches (in this case, {} batches).'.format(steps)
      else:
        warning_msg = 'Number of steps ran: {} steps'.format(current_step)

      logging.warning('Your dataset iterator ran out of data; '
                      'interrupting evaluation. ' + warning_msg)
      target_steps = current_step
      break
    for i, label in enumerate(model.metrics_names):
      if i == 0:
        # Loss is stateless metrics.
        outs[i] += batch_outs[label]
      else:
        # For all stateful metrics, the aggregation is handled by mirrored vars.
        outs[i] = batch_outs[label]

    batch_logs = cbks.make_logs(model, batch_logs, outs, mode)
    callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
    if verbose >= 1:
      progbar.update(current_step + 1)
    current_step += 1

  callbacks._call_end_hook(mode)

  scope.__exit__(None, None, None)
  if len(outs) >= 0:
    outs[0] /= (target_steps)

  if len(outs) == 1:
    return outs[0]
  return outs
示例#32
0
def _ImagGrad(_, grad):
    """Returns 'grad' as the imaginary part and set the real part 0."""
    zero = constant_op.constant(0, dtype=grad.dtype)
    return math_ops.complex(zero, grad)
示例#33
0
 def _test_fn():
   with backprop.GradientTape() as tape:
     x = array_ops.ones([5, 5])
     tape.watch(x)
     y = math_ops.reduce_sum(x, axis=constant_op.constant(1))
   return y, tape.gradient(y, x)
示例#34
0
class StructureTest(test.TestCase, parameterized.TestCase):
    # pylint disable=protected-access

    @parameterized.parameters(
        (constant_op.constant(37.0), structure.TensorStructure,
         [dtypes.float32], [[]]),
        (sparse_tensor.SparseTensor(
            indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
         structure.SparseTensorStructure, [dtypes.variant], [[3]]),
        ((constant_op.constant(37.0), constant_op.constant([1, 2, 3])),
         structure.NestedStructure, [dtypes.float32, dtypes.int32], [[], [3]]),
        ({
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }, structure.NestedStructure, [dtypes.float32, dtypes.int32], [[], [3]
                                                                       ]),
        ({
            "a":
            constant_op.constant(37.0),
            "b": (sparse_tensor.SparseTensor(
                indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
                  sparse_tensor.SparseTensor(
                      indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
        }, structure.NestedStructure,
         [dtypes.float32, dtypes.variant, dtypes.variant], [[], [3], [3]]))
    def testFlatStructure(self, value, expected_structure, expected_types,
                          expected_shapes):
        s = structure.Structure.from_value(value)
        self.assertIsInstance(s, expected_structure)
        self.assertEqual(expected_types, s._flat_types)
        self.assertEqual(expected_shapes, s._flat_shapes)

    @parameterized.parameters(
        (constant_op.constant(37.0), [
            constant_op.constant(38.0),
            array_ops.placeholder(dtypes.float32),
            variables.Variable(100.0), 42.0,
            np.array(42.0, dtype=np.float32)
        ], [constant_op.constant([1.0, 2.0]),
            constant_op.constant(37)]),
        (sparse_tensor.SparseTensor(
            indices=[[3, 4]], values=[-1], dense_shape=[4, 5]), [
                sparse_tensor.SparseTensor(indices=[[1, 1], [3, 4]],
                                           values=[10, -1],
                                           dense_shape=[4, 5]),
                sparse_tensor.SparseTensorValue(indices=[[1, 1], [3, 4]],
                                                values=[10, -1],
                                                dense_shape=[4, 5]),
                array_ops.sparse_placeholder(dtype=dtypes.int32),
                array_ops.sparse_placeholder(dtype=dtypes.int32,
                                             shape=[None, None])
            ], [
                constant_op.constant(37, shape=[4, 5]),
                sparse_tensor.SparseTensor(
                    indices=[[3, 4]], values=[-1], dense_shape=[5, 6]),
                array_ops.sparse_placeholder(dtype=dtypes.int32,
                                             shape=[None, None, None]),
                sparse_tensor.SparseTensor(
                    indices=[[3, 4]], values=[-1.0], dense_shape=[4, 5])
            ]),
        ({
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }, [{
            "a": constant_op.constant(15.0),
            "b": constant_op.constant([4, 5, 6])
        }], [{
            "a": constant_op.constant(15.0),
            "b": constant_op.constant([4, 5, 6, 7])
        }, {
            "a": constant_op.constant(15),
            "b": constant_op.constant([4, 5, 6])
        }, {
            "a":
            constant_op.constant(15),
            "b":
            sparse_tensor.SparseTensor(
                indices=[[0], [1], [2]], values=[4, 5, 6], dense_shape=[3])
        }, (constant_op.constant(15.0), constant_op.constant([4, 5, 6]))]),
    )
    def testIsCompatibleWithStructure(self, original_value, compatible_values,
                                      incompatible_values):
        s = structure.Structure.from_value(original_value)
        for compatible_value in compatible_values:
            self.assertTrue(
                s.is_compatible_with(
                    structure.Structure.from_value(compatible_value)))
        for incompatible_value in incompatible_values:
            self.assertFalse(
                s.is_compatible_with(
                    structure.Structure.from_value(incompatible_value)))

    # NOTE(mrry): The arguments must be lifted into lambdas because otherwise they
    # will be executed before the (eager- or graph-mode) test environment has been
    # set up.
    # pylint: disable=g-long-lambda
    @parameterized.parameters(
        (lambda: constant_op.constant(37.0), ),
        (lambda: sparse_tensor.SparseTensor(
            indices=[[3, 4]], values=[-1], dense_shape=[4, 5]), ),
        (lambda: {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }, ),
        (lambda: {
            "a":
            constant_op.constant(37.0),
            "b":
            (sparse_tensor.
             SparseTensor(indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
             sparse_tensor.SparseTensor(
                 indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
        }, ),
    )
    def testRoundTripConversion(self, value_fn):
        value = value_fn()
        s = structure.Structure.from_value(value)
        before = self.evaluate(value)
        after = self.evaluate(s._from_tensor_list(s._to_tensor_list(value)))

        flat_before = nest.flatten(before)
        flat_after = nest.flatten(after)
        for b, a in zip(flat_before, flat_after):
            if isinstance(b, sparse_tensor.SparseTensorValue):
                self.assertAllEqual(b.indices, a.indices)
                self.assertAllEqual(b.values, a.values)
                self.assertAllEqual(b.dense_shape, a.dense_shape)
            else:
                self.assertAllEqual(b, a)

    # pylint: enable=g-long-lambda

    def testIncompatibleStructure(self):
        # Define three mutually incompatible values/structures, and assert that:
        # 1. Using one structure to flatten a value with an incompatible structure
        #    fails.
        # 2. Using one structure to restructre a flattened value with an
        #    incompatible structure fails.
        value_tensor = constant_op.constant(42.0)
        s_tensor = structure.Structure.from_value(value_tensor)
        flat_tensor = s_tensor._to_tensor_list(value_tensor)

        value_sparse_tensor = sparse_tensor.SparseTensor(indices=[[0, 0]],
                                                         values=[1],
                                                         dense_shape=[1, 1])
        s_sparse_tensor = structure.Structure.from_value(value_sparse_tensor)
        flat_sparse_tensor = s_sparse_tensor._to_tensor_list(
            value_sparse_tensor)

        value_nest = {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }
        s_nest = structure.Structure.from_value(value_nest)
        flat_nest = s_nest._to_tensor_list(value_nest)

        with self.assertRaisesRegexp(
                ValueError,
                r"SparseTensor.* is not convertible to a tensor with "
                r"dtype.*float32.* and shape \(\)"):
            s_tensor._to_tensor_list(value_sparse_tensor)
        with self.assertRaisesRegexp(
                ValueError,
                r"Value \{.*\} is not convertible to a tensor with "
                r"dtype.*float32.* and shape \(\)"):
            s_tensor._to_tensor_list(value_nest)

        with self.assertRaisesRegexp(TypeError,
                                     "Input must be a SparseTensor"):
            s_sparse_tensor._to_tensor_list(value_tensor)

        with self.assertRaisesRegexp(TypeError,
                                     "Input must be a SparseTensor"):
            s_sparse_tensor._to_tensor_list(value_nest)

        with self.assertRaisesRegexp(
                ValueError,
                "Tensor.* not compatible with the nested structure "
                ".*TensorStructure.*TensorStructure"):
            s_nest._to_tensor_list(value_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.* not compatible with the nested structure "
                ".*TensorStructure.*TensorStructure"):
            s_nest._to_tensor_list(value_sparse_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                r"Cannot convert.*with dtype.*float32.* and shape \(\)"):
            s_tensor._from_tensor_list(flat_sparse_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "TensorStructure corresponds to a single tf.Tensor."):
            s_tensor._from_tensor_list(flat_nest)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensorStructure corresponds to a single tf.variant "
                "vector of length 3."):
            s_sparse_tensor._from_tensor_list(flat_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensorStructure corresponds to a single tf.variant "
                "vector of length 3."):
            s_sparse_tensor._from_tensor_list(flat_nest)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 1."):
            s_nest._from_tensor_list(flat_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 1."):
            s_nest._from_tensor_list(flat_sparse_tensor)

    def testIncompatibleNestedStructure(self):
        # Define three mutually incompatible nested values/structures, and assert
        # that:
        # 1. Using one structure to flatten a value with an incompatible structure
        #    fails.
        # 2. Using one structure to restructre a flattened value with an
        #    incompatible structure fails.

        value_0 = {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }
        s_0 = structure.Structure.from_value(value_0)
        flat_s_0 = s_0._to_tensor_list(value_0)

        # `value_1` has compatible nested structure with `value_0`, but different
        # classes.
        value_1 = {
            "a":
            constant_op.constant(37.0),
            "b":
            sparse_tensor.SparseTensor(indices=[[0, 0]],
                                       values=[1],
                                       dense_shape=[1, 1])
        }
        s_1 = structure.Structure.from_value(value_1)
        flat_s_1 = s_1._to_tensor_list(value_1)

        # `value_2` has incompatible nested structure with `value_0` and `value_1`.
        value_2 = {
            "a":
            constant_op.constant(37.0),
            "b": (sparse_tensor.SparseTensor(indices=[[0, 0]],
                                             values=[1],
                                             dense_shape=[1, 1]),
                  sparse_tensor.SparseTensor(indices=[[3, 4]],
                                             values=[-1],
                                             dense_shape=[4, 5]))
        }
        s_2 = structure.Structure.from_value(value_2)
        flat_s_2 = s_2._to_tensor_list(value_2)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.* not compatible with the nested structure "
                ".*TensorStructure"):
            s_0._to_tensor_list(value_1)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.*SparseTensor.* not compatible with the "
                "nested structure .*TensorStructure"):
            s_0._to_tensor_list(value_2)

        with self.assertRaisesRegexp(
                ValueError,
                "Tensor.* not compatible with the nested structure "
                ".*SparseTensorStructure"):
            s_1._to_tensor_list(value_0)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.*SparseTensor.* not compatible with the "
                "nested structure .*TensorStructure"):
            s_0._to_tensor_list(value_2)

        # NOTE(mrry): The repr of the dictionaries is not sorted, so the regexp
        # needs to account for "a" coming before or after "b". It might be worth
        # adding a deterministic repr for these error messages (among other
        # improvements).
        with self.assertRaisesRegexp(
                ValueError,
                "Tensor.*Tensor.* not compatible with the nested structure "
                ".*(TensorStructure.*SparseTensorStructure.*SparseTensorStructure|"
                "SparseTensorStructure.*SparseTensorStructure.*TensorStructure)"
        ):
            s_2._to_tensor_list(value_0)

        with self.assertRaisesRegexp(
                ValueError, "(Tensor.*SparseTensor|SparseTensor.*Tensor).* "
                "not compatible with the nested structure .*"
                "(TensorStructure.*SparseTensorStructure.*SparseTensorStructure|"
                "SparseTensorStructure.*SparseTensorStructure.*TensorStructure)"
        ):
            s_2._to_tensor_list(value_1)

        with self.assertRaisesRegexp(
                ValueError,
                r"Cannot convert.*with dtype.*int32.* and shape \(3,\)"):
            s_0._from_tensor_list(flat_s_1)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 3."):
            s_0._from_tensor_list(flat_s_2)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensorStructure corresponds to a single tf.variant "
                "vector of length 3."):
            s_1._from_tensor_list(flat_s_0)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 3."):
            s_1._from_tensor_list(flat_s_2)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 3 flat values in NestedStructure but got 2."):
            s_2._from_tensor_list(flat_s_0)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 3 flat values in NestedStructure but got 2."):
            s_2._from_tensor_list(flat_s_1)

    @parameterized.named_parameters(
        ("Tensor", dtypes.float32, tensor_shape.scalar(), ops.Tensor,
         structure.TensorStructure(dtypes.float32, [])),
        ("SparseTensor", dtypes.int32, tensor_shape.matrix(
            2, 2), sparse_tensor.SparseTensor,
         structure.SparseTensorStructure(dtypes.int32, [2, 2])),
        ("Nest", {
            "a": dtypes.float32,
            "b": (dtypes.int32, dtypes.string)
        }, {
            "a": tensor_shape.scalar(),
            "b": (tensor_shape.matrix(2, 2), tensor_shape.scalar())
        }, {
            "a": ops.Tensor,
            "b": (sparse_tensor.SparseTensor, ops.Tensor)
        },
         structure.NestedStructure({
             "a":
             structure.TensorStructure(dtypes.float32, []),
             "b": (structure.SparseTensorStructure(dtypes.int32, [2, 2]),
                   structure.TensorStructure(dtypes.string, []))
         })),
    )
    def testFromLegacyStructure(self, output_types, output_shapes,
                                output_classes, expected_structure):
        actual_structure = structure.Structure._from_legacy_structure(
            output_types, output_shapes, output_classes)
        self.assertTrue(
            expected_structure.is_compatible_with(actual_structure))
        self.assertTrue(
            actual_structure.is_compatible_with(expected_structure))
示例#35
0
    def testIncompatibleNestedStructure(self):
        # Define three mutually incompatible nested values/structures, and assert
        # that:
        # 1. Using one structure to flatten a value with an incompatible structure
        #    fails.
        # 2. Using one structure to restructre a flattened value with an
        #    incompatible structure fails.

        value_0 = {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }
        s_0 = structure.Structure.from_value(value_0)
        flat_s_0 = s_0._to_tensor_list(value_0)

        # `value_1` has compatible nested structure with `value_0`, but different
        # classes.
        value_1 = {
            "a":
            constant_op.constant(37.0),
            "b":
            sparse_tensor.SparseTensor(indices=[[0, 0]],
                                       values=[1],
                                       dense_shape=[1, 1])
        }
        s_1 = structure.Structure.from_value(value_1)
        flat_s_1 = s_1._to_tensor_list(value_1)

        # `value_2` has incompatible nested structure with `value_0` and `value_1`.
        value_2 = {
            "a":
            constant_op.constant(37.0),
            "b": (sparse_tensor.SparseTensor(indices=[[0, 0]],
                                             values=[1],
                                             dense_shape=[1, 1]),
                  sparse_tensor.SparseTensor(indices=[[3, 4]],
                                             values=[-1],
                                             dense_shape=[4, 5]))
        }
        s_2 = structure.Structure.from_value(value_2)
        flat_s_2 = s_2._to_tensor_list(value_2)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.* not compatible with the nested structure "
                ".*TensorStructure"):
            s_0._to_tensor_list(value_1)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.*SparseTensor.* not compatible with the "
                "nested structure .*TensorStructure"):
            s_0._to_tensor_list(value_2)

        with self.assertRaisesRegexp(
                ValueError,
                "Tensor.* not compatible with the nested structure "
                ".*SparseTensorStructure"):
            s_1._to_tensor_list(value_0)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.*SparseTensor.* not compatible with the "
                "nested structure .*TensorStructure"):
            s_0._to_tensor_list(value_2)

        # NOTE(mrry): The repr of the dictionaries is not sorted, so the regexp
        # needs to account for "a" coming before or after "b". It might be worth
        # adding a deterministic repr for these error messages (among other
        # improvements).
        with self.assertRaisesRegexp(
                ValueError,
                "Tensor.*Tensor.* not compatible with the nested structure "
                ".*(TensorStructure.*SparseTensorStructure.*SparseTensorStructure|"
                "SparseTensorStructure.*SparseTensorStructure.*TensorStructure)"
        ):
            s_2._to_tensor_list(value_0)

        with self.assertRaisesRegexp(
                ValueError, "(Tensor.*SparseTensor|SparseTensor.*Tensor).* "
                "not compatible with the nested structure .*"
                "(TensorStructure.*SparseTensorStructure.*SparseTensorStructure|"
                "SparseTensorStructure.*SparseTensorStructure.*TensorStructure)"
        ):
            s_2._to_tensor_list(value_1)

        with self.assertRaisesRegexp(
                ValueError,
                r"Cannot convert.*with dtype.*int32.* and shape \(3,\)"):
            s_0._from_tensor_list(flat_s_1)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 3."):
            s_0._from_tensor_list(flat_s_2)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensorStructure corresponds to a single tf.variant "
                "vector of length 3."):
            s_1._from_tensor_list(flat_s_0)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 3."):
            s_1._from_tensor_list(flat_s_2)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 3 flat values in NestedStructure but got 2."):
            s_2._from_tensor_list(flat_s_0)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 3 flat values in NestedStructure but got 2."):
            s_2._from_tensor_list(flat_s_1)
示例#36
0
    def testIncompatibleStructure(self):
        # Define three mutually incompatible values/structures, and assert that:
        # 1. Using one structure to flatten a value with an incompatible structure
        #    fails.
        # 2. Using one structure to restructre a flattened value with an
        #    incompatible structure fails.
        value_tensor = constant_op.constant(42.0)
        s_tensor = structure.Structure.from_value(value_tensor)
        flat_tensor = s_tensor._to_tensor_list(value_tensor)

        value_sparse_tensor = sparse_tensor.SparseTensor(indices=[[0, 0]],
                                                         values=[1],
                                                         dense_shape=[1, 1])
        s_sparse_tensor = structure.Structure.from_value(value_sparse_tensor)
        flat_sparse_tensor = s_sparse_tensor._to_tensor_list(
            value_sparse_tensor)

        value_nest = {
            "a": constant_op.constant(37.0),
            "b": constant_op.constant([1, 2, 3])
        }
        s_nest = structure.Structure.from_value(value_nest)
        flat_nest = s_nest._to_tensor_list(value_nest)

        with self.assertRaisesRegexp(
                ValueError,
                r"SparseTensor.* is not convertible to a tensor with "
                r"dtype.*float32.* and shape \(\)"):
            s_tensor._to_tensor_list(value_sparse_tensor)
        with self.assertRaisesRegexp(
                ValueError,
                r"Value \{.*\} is not convertible to a tensor with "
                r"dtype.*float32.* and shape \(\)"):
            s_tensor._to_tensor_list(value_nest)

        with self.assertRaisesRegexp(TypeError,
                                     "Input must be a SparseTensor"):
            s_sparse_tensor._to_tensor_list(value_tensor)

        with self.assertRaisesRegexp(TypeError,
                                     "Input must be a SparseTensor"):
            s_sparse_tensor._to_tensor_list(value_nest)

        with self.assertRaisesRegexp(
                ValueError,
                "Tensor.* not compatible with the nested structure "
                ".*TensorStructure.*TensorStructure"):
            s_nest._to_tensor_list(value_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensor.* not compatible with the nested structure "
                ".*TensorStructure.*TensorStructure"):
            s_nest._to_tensor_list(value_sparse_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                r"Cannot convert.*with dtype.*float32.* and shape \(\)"):
            s_tensor._from_tensor_list(flat_sparse_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "TensorStructure corresponds to a single tf.Tensor."):
            s_tensor._from_tensor_list(flat_nest)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensorStructure corresponds to a single tf.variant "
                "vector of length 3."):
            s_sparse_tensor._from_tensor_list(flat_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "SparseTensorStructure corresponds to a single tf.variant "
                "vector of length 3."):
            s_sparse_tensor._from_tensor_list(flat_nest)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 1."):
            s_nest._from_tensor_list(flat_tensor)

        with self.assertRaisesRegexp(
                ValueError,
                "Expected 2 flat values in NestedStructure but got 1."):
            s_nest._from_tensor_list(flat_sparse_tensor)
示例#37
0
 def f(x1, x2):
   return array_ops.where_v2(
       x1 < 0, constant_op.constant(0, dtype=x2.dtype),
       array_ops.where_v2(x1 > 0, constant_op.constant(1, dtype=x2.dtype), x2))
示例#38
0
文件: laplace.py 项目: Harryi0/tinyML
 def _event_shape_tensor(self):
   return constant_op.constant([], dtype=dtypes.int32)
示例#39
0
 def dummy_get_state():
   return (constant_op.constant(0),)
示例#40
0
def experimental_tpu_fit_loop(model,
                              dataset,
                              epochs=100,
                              verbose=1,
                              callbacks=None,
                              initial_epoch=0,
                              steps_per_epoch=None,
                              val_dataset=None,
                              validation_steps=None,
                              validation_freq=1):
  """Fit loop for training with TPU DistributionStrategy.

  Arguments:
      model: Keras Model instance.
      dataset: Dataset that returns inputs and targets
      epochs: Number of times to iterate over the data
      verbose: Integer, Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      initial_epoch: Epoch at which to start training
          (useful for resuming a previous training run)
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch. Ignored with the default value of `None`.
      val_dataset: Dataset for validation data.
      validation_steps: Number of steps to run validation for
          (only if doing validation from data tensors).
          Ignored with the default value of `None`.
      validation_freq: Only relevant if validation data is provided. Integer or
          `collections.Container` instance (e.g. list, tuple, etc.). If an
          integer, specifies how many training epochs to run before a new
          validation run is performed, e.g. `validation_freq=2` runs
          validation every 2 epochs. If a Container, specifies the epochs on
          which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
          validation at the end of the 1st, 2nd, and 10th epochs.

  Returns:
      Returns `None`.

  Raises:
      ValueError: in case of invalid arguments.
  """
  mode = ModeKeys.TRAIN
  # TODO(fchollet): add support for `steps_per_epoch=None` in TPU loops.
  current_strategy = model._distribution_strategy
  iterator = distributed_training_utils.get_iterator(dataset, current_strategy)
  steps_per_epoch = training_utils.infer_steps_for_dataset(
      dataset, steps_per_epoch, epochs, steps_name='steps_per_epoch')
  if (current_strategy.extended.steps_per_run != 1 and
      steps_per_epoch is None):
    raise ValueError('`steps_per_epoch` should be specified when calling '
                     '`fit` on the model with TPUStrategy when '
                     '`steps_per_run` != 1 .')

  scope = distributed_training_utils.distributed_scope(
      strategy=current_strategy, learning_phase=1)
  scope.__enter__()

  def _per_device_fit_function(model):
    model._make_fit_function()
    return (model._fit_function.inputs, model._fit_function.outputs,
            model._fit_function.updates_op, model._fit_function.session_kwargs)

  out_labels = model.metrics_names or []

  def step_fn(ctx, inputs):
    """Clones the model and calls make_fit_function."""
    inputs, targets = inputs
    if model._compile_distribution:
      distributed_training_utils.clone_model_on_replicas(
          model, current_strategy, mode, inputs=inputs, targets=targets)
    else:
      distributed_training_utils._build_distributed_network(
          model, current_strategy, mode, inputs, targets)

    (grouped_inputs, grouped_outputs, grouped_updates,
     grouped_session_args) = current_strategy.extended.call_for_each_replica(
         _per_device_fit_function,
         args=(distributed_training_utils.get_distributed_model(
             model, ModeKeys.TRAIN),))
    (all_inputs, all_outputs, all_updates,
     all_session_args) = distributed_training_utils.unwrap_values(
         current_strategy, grouped_inputs, grouped_outputs,
         grouped_updates, grouped_session_args)
    combined_fn = K.function(
        all_inputs,
        all_outputs,
        updates=all_updates,
        name='distributed_fit_function',
        **all_session_args)

    for label, output in zip(out_labels, combined_fn.outputs):
      if label == 'loss':
        reduce_op = ds_reduce_util.ReduceOp.SUM
      else:
        # We reduce all other metrics using mean for now. This is temporary
        # workaround until new metrics are in place.
        reduce_op = ds_reduce_util.ReduceOp.MEAN
      ctx.set_last_step_output(label, output, reduce_op)

    # TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
    # feed_dict, session kwargs, run options, run_metadata for now. These should
    # be handled appropriately
    return combined_fn.updates_op

  # Add initial dummy values for loss and other metric tensors.
  initial_loop_values = {}
  initial_loop_values['loss'] = constant_op.constant(1e7)
  for name in model.metrics_names[1:]:
    tensor = model._all_stateful_metrics_tensors[name]
    initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)

  use_steps = steps_per_epoch is not None
  if use_steps:
    iteration_value = min(steps_per_epoch,
                          current_strategy.extended.steps_per_run)
  else:
    iteration_value = current_strategy.extended.steps_per_run

  steps_per_run = K.variable(
      value=iteration_value,
      dtype='int32',
      name='steps_per_run')
  ctx = current_strategy.extended.experimental_run_steps_on_iterator(
      step_fn, iterator, iterations=steps_per_run,
      initial_loop_values=initial_loop_values)
  train_op = ctx.run_op
  output_tensors = ctx.last_step_outputs

  do_validation = bool(validation_steps)

  if model._compile_distribution:
    distributed_training_utils._copy_weights_to_distributed_model(model, mode)

  callbacks = cbks.configure_callbacks(
      callbacks,
      model,
      do_validation=do_validation,
      epochs=epochs,
      steps_per_epoch=steps_per_epoch,
      verbose=verbose,
      count_mode='steps',
      mode=mode)

  # Calculate the steps each time on the device.
  if use_steps:
    steps_to_run = ([current_strategy.extended.steps_per_run] *
                    (steps_per_epoch //
                     current_strategy.extended.steps_per_run))
    if steps_per_epoch % current_strategy.extended.steps_per_run:
      steps_to_run.append(
          steps_per_epoch % current_strategy.extended.steps_per_run)
    target_steps = len(steps_to_run)
  else:
    target_steps = np.inf

  callbacks._call_begin_hook(mode)
  for epoch in range(initial_epoch, epochs):
    distributed_training_utils._reset_metrics(model)
    callbacks.on_epoch_begin(epoch)
    epoch_logs = {}
    step_index = 0
    prev_step_count = None
    current_step = 0
    while current_step < target_steps:
      step_count = steps_to_run[current_step] if use_steps else 1
      batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count}
      callbacks._call_batch_hook(mode, 'begin', step_index, batch_logs)
      if prev_step_count is None or step_count != prev_step_count:
        steps_per_run.load(step_count, K.get_session())
        prev_step_count = step_count
      try:
        _, outputs = K.get_session().run([train_op, output_tensors])
      except errors.OutOfRangeError:
        if use_steps:
          logging.warning('Your dataset iterator ran out of data; '
                          'interrupting training. Make sure that your dataset '
                          'can generate at least `steps_per_epoch * epochs` '
                          'batches (in this case, %d batches).' %
                          steps_per_epoch * epochs)
        else:
          target_steps = current_step
          logging.info('Dataset iterator ran out of data. Inferring the '
                       'value of `steps_per_epoch` as %s  .' % target_steps)
          distributed_training_utils.initialize_iterator(iterator,
                                                         current_strategy)
        break

      batch_logs.update(outputs)
      callbacks._call_batch_hook(mode, 'end', step_index, batch_logs)
      step_index = step_index + step_count
      current_step += 1

      if callbacks.model.stop_training:
        break

    if (do_validation and
        training_utils.should_run_validation(validation_freq, epoch)):
      logging.info('Running validation at fit epoch: %s', epoch)

      if model._compile_distribution:
        # Since we create a new clone from the original model we need to copy
        # the weights back to the original model before we can run validation.
        distributed_training_utils._copy_weights_to_original_model(
            model, ModeKeys.TRAIN)

      val_outs = experimental_tpu_test_loop(  # pylint: disable=undefined-variable
          model,
          val_dataset,
          steps=validation_steps,
          verbose=verbose,
          callbacks=callbacks)
      if not isinstance(val_outs, list):
        val_outs = [val_outs]
      # Same labels assumed.
      for label, val_out in zip(out_labels, val_outs):
        epoch_logs['val_' + label] = val_out

    callbacks.on_epoch_end(epoch, epoch_logs)
    if callbacks.model.stop_training:
      break
  callbacks._call_end_hook(mode)

  if model._compile_distribution:
    # Copy the weights back from the replicated model to the original model.
    distributed_training_utils._copy_weights_to_original_model(
        model, ModeKeys.TRAIN)
  scope.__exit__(None, None, None)
  return model.history
示例#41
0
 def testPreventGradient(self):
   with ops.Graph().as_default():
     inp = constant(1.0, shape=[100, 32], name="in")
     out = array_ops.prevent_gradient(inp)
     with self.assertRaisesRegexp(LookupError, "No gradient defined"):
       _ = gradients.gradients(out, inp)
示例#42
0
def _streaming_sum(scalar_tensor):
  """Create a sum metric and update op."""
  sum_metric = framework.local_variable(constant_op.constant(0.0))
  sum_update = sum_metric.assign_add(scalar_tensor)
  return sum_metric, sum_update
示例#43
0
 def test_fn():
     return constant_op.constant(0)
示例#44
0
def _tf_dataset_for_stmt(
    ds, extra_test, body, get_state, set_state, symbol_names, opts):
  """Overload of _dataset_for_stmt with early stopping. See for_stmt."""
  # Note: This is easier to follow with the insight that the computations in
  # a dataset pipeline are transposed (aka fused).
  # For example, given a pipeline input -> scan -> take_while -> reduce,
  # and a dataset with input [1, 2, 3], the computations occur in the following
  # order:
  #  reduce(take_while(scan(1)))
  #  reduce(take_while(scan(2)))
  #  reduce(take_while(scan(3)))

  init_vars = get_state()
  _verify_loop_init_vars(init_vars, symbol_names)

  # Workaround for Dataset.reduce not allowing empty state tensors - create
  # a dummy state variable that remains unused.
  # TODO(mdan): reduce should allow and match empty structures.
  if not init_vars:
    init_vars = (constant_op.constant(0),)
    symbol_names = ('<internal dummy>',)

    def dummy_set_state(unused_dummy):
      pass

    def dummy_get_state():
      return (constant_op.constant(0),)

    get_state, set_state = dummy_get_state, dummy_set_state

  def scan_body(scan_state, scan_inputs):
    """Main body of the Dataset.scan."""
    loop_vars, iterate = scan_state, scan_inputs
    set_state(loop_vars)

    def main_path():
      body(iterate)
      new_loop_vars = get_state()
      _verify_tf_loop_vars(
          init_vars, loop_vars, new_loop_vars, symbol_names, opts,
          check_shapes=False)
      return new_loop_vars

    if extra_test is not None:
      extra_cond = extra_test()
      new_loop_vars = control_flow_ops.cond(
          extra_cond, main_path, lambda: loop_vars)
    else:
      # TODO(mdan): the optimizer should be able to remove an invariant cond?
      extra_cond = (constant_op.constant(True),)  # dummy value, unused
      new_loop_vars = main_path()

    scan_outputs = new_loop_vars, extra_cond
    new_scan_state = new_loop_vars
    return new_scan_state, scan_outputs

  def take_while_predicate(unused_loop_vars, extra_cond):
    return extra_cond

  def reduce_body(unused_reduce_state, scan_outputs):
    output_loop_vars, unused_extra_cond = scan_outputs
    new_reduce_state = output_loop_vars
    return new_reduce_state

  ds = _general_purpose_scan(ds, init_vars, scan_body)
  if extra_test is not None:
    ds = ds.apply(take_while_ops.take_while(take_while_predicate))
  final_loop_vars = ds.reduce(init_vars, reduce_body)
  set_state(final_loop_vars)
示例#45
0
 def _wrap_initializer(obj):
     obj._initialize()  # pylint: disable=protected-access
     return constant_op.constant(1.)  # Dummy control output
示例#46
0
 def testStopGradient(self):
   with ops.Graph().as_default():
     inp = constant(1.0, shape=[100, 32], name="in")
     out = array_ops.stop_gradient(inp)
     igrad = gradients.gradients(out, inp)[0]
   assert igrad is None
def experimental_fit_loop(model,
                          iterator,
                          epochs=100,
                          verbose=1,
                          callbacks=None,
                          initial_epoch=0,
                          steps_per_epoch=None,
                          val_iterator=None,
                          validation_steps=None):
    """Fit loop for training with TPU DistributionStrategy.

  Arguments:
      model: Keras Model instance.
      iterator: Iterator that returns inputs and targets
      epochs: Number of times to iterate over the data
      verbose: Integer, Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      initial_epoch: Epoch at which to start training
          (useful for resuming a previous training run)
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch. Ignored with the default value of `None`.
      val_iterator: Iterator for validation data.
      validation_steps: Number of steps to run validation for
          (only if doing validation from data tensors).
          Ignored with the default value of `None`.

  Returns:
      Returns `None`.

  Raises:
      ValueError: in case of invalid arguments.
  """
    current_strategy = model._distribution_strategy

    K.get_session().run(current_strategy.initialize())

    def _per_device_fit_function(model):
        model._make_fit_function()
        return (model._fit_function.inputs, model._fit_function.outputs,
                model._fit_function.updates_op,
                model._fit_function.session_kwargs)

    # TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
    K.set_learning_phase(1)
    out_labels = model.metrics_names or []

    def step_fn(ctx, inputs):
        """Clones the model and calls make_fit_function."""
        # TODO(priyag, sourabhbajaj): The model gets cloned every time
        # fit/test/predict is called. We should look into caching this keyed on
        # input shapes.
        inputs, targets = inputs
        clone_model_on_replicas(model,
                                current_strategy,
                                make_callback_model=True,
                                inputs=inputs,
                                targets=targets,
                                mode=_Mode.TRAIN)

        (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args
         ) = current_strategy.extended.call_for_each_replica(
             _per_device_fit_function, args=(model._grouped_model_train, ))
        (all_inputs, all_outputs, all_updates,
         all_session_args) = distributed_training_utils.unwrap_values(
             current_strategy, grouped_inputs, grouped_outputs,
             grouped_updates, grouped_session_args)
        combined_fn = K.function(all_inputs,
                                 all_outputs,
                                 updates=all_updates,
                                 name='distributed_fit_function',
                                 **all_session_args)

        for label, output in zip(out_labels, combined_fn.outputs):
            if label == 'loss':
                reduce_op = distribute_lib.get_loss_reduction()
            else:
                # We reduce all other metrics using mean for now. This is temporary
                # workaround until new metrics are in place.
                reduce_op = ds_reduce_util.ReduceOp.MEAN
            ctx.set_last_step_output(label, output, reduce_op)

        # TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
        # feed_dict, session kwargs, run options, run_metadata for now. These should
        # be handled appropriately
        return combined_fn.updates_op

    # Add initial dummy values for loss and other metric tensors.
    initial_loop_values = {}
    initial_loop_values['loss'] = constant_op.constant(1e7)
    for name in model.metrics_names[1:]:
        tensor = model._all_stateful_metrics_tensors[name]
        initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)

    if steps_per_epoch is None:
        raise ValueError('`steps_per_epoch` should be specified when calling '
                         '`fit` on the model.')
    steps_per_run = K.variable(value=min(
        steps_per_epoch, current_strategy.extended.steps_per_run),
                               dtype='int32',
                               name='steps_per_run')

    with current_strategy.scope():
        ctx = current_strategy.extended.experimental_run_steps_on_iterator(
            step_fn,
            iterator,
            iterations=steps_per_run,
            initial_loop_values=initial_loop_values)

    train_op = ctx.run_op
    output_tensors = ctx.last_step_outputs

    do_validation = bool(validation_steps)

    # Copy the weights from the original model to each of the replicated models.
    with current_strategy.scope():
        _copy_weights_to_distributed_model(model, model._grouped_model_train)

    callbacks = cbks.configure_callbacks(callbacks,
                                         model,
                                         do_validation=do_validation,
                                         epochs=epochs,
                                         steps_per_epoch=steps_per_epoch,
                                         verbose=verbose)

    # Calculate the steps each time on the device.
    steps_to_run = [current_strategy.extended.steps_per_run] * (
        steps_per_epoch // current_strategy.extended.steps_per_run)
    if steps_per_epoch % current_strategy.extended.steps_per_run:
        steps_to_run.append(steps_per_epoch %
                            current_strategy.extended.steps_per_run)

    callbacks.on_train_begin()
    for epoch in range(initial_epoch, epochs):
        with current_strategy.scope():
            _reset_metrics(model, model._grouped_model_train)
        callbacks.on_epoch_begin(epoch)
        epoch_logs = {}
        step_index = 0
        prev_step_count = None
        for step_count in steps_to_run:
            batch_logs = {
                'batch': step_index,
                'size': 1,
                'num_steps': step_count
            }
            callbacks.on_batch_begin(step_index, batch_logs)
            if prev_step_count is None or step_count != prev_step_count:
                steps_per_run.load(step_count, K.get_session())
                prev_step_count = step_count
            try:
                _, outputs = K.get_session().run([train_op, output_tensors])
            except errors.OutOfRangeError:
                logging.warning(
                    'Your dataset iterator ran out of data; '
                    'interrupting training. Make sure that your dataset '
                    'can generate at least `steps_per_epoch * epochs` '
                    'batches (in this case, %d batches).' % steps_per_epoch *
                    epochs)
                break

            batch_logs.update(outputs)
            callbacks.on_batch_end(step_index, batch_logs)
            step_index = step_index + step_count
            if callbacks.model.stop_training:
                break

        if do_validation:
            logging.info('Running validation at fit epoch: %s', epoch)

            # Since we create a new clone from the original model we need to copy
            # the weights back to the original model before we can run validation.
            with current_strategy.scope():
                _copy_weights_to_original_model(model,
                                                model._grouped_model_train,
                                                'train')

            val_outs = experimental_test_loop(  # pylint: disable=undefined-variable
                model,
                val_iterator,
                steps=validation_steps,
                verbose=verbose,
                initialize_finalize_strategy=False)
            if not isinstance(val_outs, list):
                val_outs = [val_outs]
            # Same labels assumed.
            for label, val_out in zip(out_labels, val_outs):
                epoch_logs['val_' + label] = val_out

        callbacks.on_epoch_end(epoch, epoch_logs)
        if callbacks.model.stop_training:
            break
    callbacks.on_train_end()

    # Copy the weights back from the replicated model to the original model.
    with current_strategy.scope():
        _copy_weights_to_original_model(model, model._grouped_model_train,
                                        'train')

    K.get_session().run(current_strategy.finalize())
    return model.history
示例#48
0
        vocab, embed = build_vocab(FLAGS.data_dir, data_queries + data_docs)

        model = LSTMDSSM(
            FLAGS.units,
            embed,
            FLAGS.neg_num)
        if FLAGS.log_parameters:
            model.print_parameters()

        if tf.train.get_checkpoint_state(FLAGS.train_dir):
            print("Reading model parameters from %s" % FLAGS.train_dir)
            model.saver.restore(sess, tf.train.latest_checkpoint(FLAGS.train_dir))
        else:
            print("Created model with fresh parameters.")
            tf.global_variables_initializer().run()
            op_in = model.word2index.insert(constant_op.constant(vocab),
                                              constant_op.constant(list(range(FLAGS.symbols)), dtype=tf.int64))
            sess.run(op_in)

        summary_writer = tf.summary.FileWriter('%s/log' % FLAGS.train_dir, sess.graph)
        total_train_time = 0.0
        while model.epoch.eval() < FLAGS.epoch:
            epoch = model.epoch.eval()
            random_idxs = range(len(data_queries))
            random.shuffle(random_idxs)
            data_queries = [data_queries[i] for i in random_idxs]
            data_docs = np.reshape(data_docs, (len(data_queries), -1))
            data_docs = [data_docs[i] for i in random_idxs]
            data_docs = np.reshape(data_docs, len(data_queries) * (FLAGS.neg_num + 1))
            start_time = time.time()
            loss = train(model, sess, data_queries, data_docs)
示例#49
0
 def _train_op_fn(loss):
     return string_ops.string_join([
         constant_op.constant(expected_train_result),
         string_ops.as_string(loss, precision=3)
     ])
示例#50
0
 def _constant_state():
   return constant_op.constant(self._state_callback(), dtype=dtypes.string)
示例#51
0
def experimental_tpu_fit_loop(model,
                              dataset,
                              epochs=100,
                              verbose=1,
                              callbacks=None,
                              initial_epoch=0,
                              steps_per_epoch=None,
                              val_dataset=None,
                              validation_steps=None,
                              validation_freq=1):
    """Fit loop for training with TPU tf.distribute.Strategy.

  Arguments:
      model: Keras Model instance.
      dataset: Dataset that returns inputs and targets
      epochs: Number of times to iterate over the data
      verbose: Integer, Verbosity mode, 0, 1 or 2
      callbacks: List of callbacks to be called during training
      initial_epoch: Epoch at which to start training
          (useful for resuming a previous training run)
      steps_per_epoch: Total number of steps (batches of samples)
          before declaring one epoch finished and starting the
          next epoch. Ignored with the default value of `None`.
      val_dataset: Dataset for validation data.
      validation_steps: Number of steps to run validation for
          (only if doing validation from data tensors).
          Ignored with the default value of `None`.
      validation_freq: Only relevant if validation data is provided. Integer or
          `collections.Container` instance (e.g. list, tuple, etc.). If an
          integer, specifies how many training epochs to run before a new
          validation run is performed, e.g. `validation_freq=2` runs
          validation every 2 epochs. If a Container, specifies the epochs on
          which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
          validation at the end of the 1st, 2nd, and 10th epochs.

  Returns:
      Returns `None`.

  Raises:
      ValueError: in case of invalid arguments.
  """
    mode = ModeKeys.TRAIN
    # TODO(fchollet): add support for `steps_per_epoch=None` in TPU loops.
    current_strategy = model._distribution_strategy
    iterator = distributed_training_utils.get_iterator(dataset,
                                                       current_strategy)
    steps_per_epoch = training_utils.infer_steps_for_dataset(
        dataset, steps_per_epoch, epochs, steps_name='steps_per_epoch')

    scope = distributed_training_utils.distributed_scope(
        strategy=current_strategy, learning_phase=1)
    scope.__enter__()

    out_labels = model.metrics_names or []

    step_fn = _make_train_step_fn(model, ModeKeys.TRAIN, current_strategy,
                                  out_labels)

    # Add initial dummy values for loss and other metric tensors.
    initial_loop_values = {}
    initial_loop_values['loss'] = constant_op.constant(1e7)
    for name in model.metrics_names[1:]:
        tensor = model._all_metrics_tensors[name]
        initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)

    if steps_per_epoch is not None:
        iteration_value = min(steps_per_epoch,
                              current_strategy.extended.steps_per_run)
    else:
        raise ValueError('Number of steps could not be infered from the data, '
                         'please pass the steps_per_epoch argument.')

    steps_per_run = K.variable(value=iteration_value,
                               dtype='int32',
                               name='steps_per_run')
    ctx = current_strategy.extended.experimental_run_steps_on_iterator(
        step_fn,
        iterator,
        iterations=steps_per_run,
        initial_loop_values=initial_loop_values)
    train_op = ctx.run_op
    output_tensors = ctx.last_step_outputs

    do_validation = bool(validation_steps)

    if model._compile_distribution:
        distributed_training_utils._copy_weights_to_distributed_model(
            model, mode)

    callbacks = cbks.configure_callbacks(callbacks,
                                         model,
                                         do_validation=do_validation,
                                         epochs=epochs,
                                         steps_per_epoch=steps_per_epoch,
                                         verbose=verbose,
                                         count_mode='steps',
                                         mode=mode)

    # Calculate the steps each time on the device.
    steps_to_run = (
        [current_strategy.extended.steps_per_run] *
        (steps_per_epoch // current_strategy.extended.steps_per_run))
    if steps_per_epoch % current_strategy.extended.steps_per_run:
        steps_to_run.append(steps_per_epoch %
                            current_strategy.extended.steps_per_run)
    target_steps = len(steps_to_run)

    callbacks._call_begin_hook(mode)
    for epoch in range(initial_epoch, epochs):
        distributed_training_utils._reset_metrics(model)
        callbacks.on_epoch_begin(epoch)
        epoch_logs = {}
        step_index = 0
        prev_step_count = None
        current_step = 0
        while current_step < target_steps:
            step_count = steps_to_run[current_step]
            batch_logs = {
                'batch': step_index,
                'size': 1,
                'num_steps': step_count
            }
            callbacks._call_batch_hook(mode, 'begin', step_index, batch_logs)
            if prev_step_count is None or step_count != prev_step_count:
                steps_per_run.load(step_count, K.get_session())
                prev_step_count = step_count
            try:
                _, outputs = K.batch_get_value([train_op, output_tensors])
            except errors.OutOfRangeError:
                logging.warning(
                    'Your dataset iterator ran out of data; '
                    'interrupting training. Make sure that your dataset '
                    'can generate at least `steps_per_epoch * epochs` '
                    'batches (in this case, %d batches).' % steps_per_epoch *
                    epochs)
                break

            batch_logs.update(outputs)
            callbacks._call_batch_hook(mode, 'end', step_index, batch_logs)
            step_index = step_index + step_count
            current_step += 1

            if callbacks.model.stop_training:
                break

        if (do_validation and training_utils.should_run_validation(
                validation_freq, epoch)):
            logging.info('Running validation at fit epoch: %s', epoch)

            if model._compile_distribution:
                # Since we create a new clone from the original model we need to copy
                # the weights back to the original model before we can run validation.
                distributed_training_utils._copy_weights_to_original_model(
                    model, ModeKeys.TRAIN)

            val_outs = experimental_tpu_test_loop(  # pylint: disable=undefined-variable
                model,
                val_dataset,
                steps=validation_steps,
                verbose=verbose,
                callbacks=callbacks)
            if not isinstance(val_outs, list):
                val_outs = [val_outs]
            # Same labels assumed.
            for label, val_out in zip(out_labels, val_outs):
                epoch_logs['val_' + label] = val_out

        callbacks.on_epoch_end(epoch, epoch_logs)
        if callbacks.model.stop_training:
            break
    callbacks._call_end_hook(mode)

    if model._compile_distribution:
        # Copy the weights back from the replicated model to the original model.
        distributed_training_utils._copy_weights_to_original_model(
            model, ModeKeys.TRAIN)
    scope.__exit__(None, None, None)
    return model.history
def experimental_test_loop(model,
                           iterator,
                           verbose=0,
                           steps=None,
                           initialize_finalize_strategy=True):
    """Test loop for evaluating with TPU DistributionStrategy.

  Arguments:
      model: Keras Model instance.
      iterator: Iterator for input data.
      verbose: Integer, Verbosity mode 0 or 1.
      steps: Total number of steps (batches of samples)
          before declaring predictions finished.
          Ignored with the default value of `None`.
      initialize_finalize_strategy: Should the strategy initialize and finalize
          functions be called.

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the outputs.
  """
    current_strategy = model._distribution_strategy
    if initialize_finalize_strategy:
        K.get_session().run(current_strategy.initialize())

    def _per_device_eval_function(model):
        model._make_eval_function()
        return (model._eval_function.inputs, model._eval_function.outputs,
                model._eval_function.updates_op,
                model._eval_function.session_kwargs)

    # TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
    K.set_learning_phase(0)

    def step_fn(ctx, inputs):
        """Clones the model and calls make_eval_function."""
        # TODO(priyag, sourabhbajaj): The model gets cloned every time
        # fit/test/predict is called. We should look into caching this keyed on
        # input shapes.
        inputs, targets = inputs
        clone_model_on_replicas(model,
                                current_strategy,
                                make_callback_model=False,
                                inputs=inputs,
                                targets=targets,
                                mode=_Mode.TEST)

        (grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args
         ) = current_strategy.extended.call_for_each_replica(
             _per_device_eval_function, args=(model._grouped_model_test, ))

        (all_inputs, all_outputs, all_updates,
         all_session_args) = distributed_training_utils.unwrap_values(
             current_strategy, grouped_inputs, grouped_outputs,
             grouped_updates, grouped_session_args)

        combined_fn = K.function(all_inputs,
                                 all_outputs,
                                 updates=all_updates,
                                 name='distributed_test_function',
                                 **all_session_args)

        for label, output in zip(model.metrics_names, combined_fn.outputs):
            if label == 'loss':
                reduce_op = distribute_lib.get_loss_reduction()
            else:
                # We reduce all other metrics using mean for now. This is temporary
                # workaround until new metrics are in place.
                reduce_op = ds_reduce_util.ReduceOp.MEAN
            ctx.set_last_step_output(label, output, reduce_op)

        return combined_fn.updates_op

    # Add initial dummy values for loss and other metric tensors.
    initial_loop_values = {}
    initial_loop_values['loss'] = constant_op.constant(1e7)
    for name in model.metrics_names[1:]:
        tensor = model._all_stateful_metrics_tensors[name]
        initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)

    with current_strategy.scope():
        # TODO(priyag): Use steps_per_run when we use new metrics as they will
        # allow handling metric computation at each step using variables.
        ctx = current_strategy.extended.experimental_run_steps_on_iterator(
            step_fn,
            iterator,
            iterations=1,
            initial_loop_values=initial_loop_values)

    test_op = ctx.run_op
    output_tensors = ctx.last_step_outputs

    if verbose == 1:
        progbar = Progbar(target=steps)

    # Copy the weights from the original model to each of the replicated models.
    with current_strategy.scope():
        _copy_weights_to_distributed_model(model, model._grouped_model_test)
        _reset_metrics(model, model._grouped_model_test)
    assert steps is not None
    outs = [0.] * len(model.metrics_names)
    for step in range(steps):
        _, batch_outs = K.get_session().run([test_op, output_tensors])
        for i, label in enumerate(model.metrics_names):
            if i == 0:
                # Loss is stateless metrics.
                outs[i] += batch_outs[label]
            else:
                # For all stateful metrics, the aggregation is handled by mirrored vars.
                outs[i] = batch_outs[label]

        if verbose >= 1:
            progbar.update(step + 1)

    if len(outs) >= 0:
        outs[0] /= (steps)

    if initialize_finalize_strategy:
        K.get_session().run(current_strategy.finalize())

    if len(outs) == 1:
        return outs[0]
    return outs
示例#53
0
    def testGradientInFunction(self):
        @function.defun
        def f(x):
            return backprop.gradients_function(lambda y: y * y, [0])(x)[0]

        self.assertAllEqual(f(constant_op.constant(1.0)), 2.0)
示例#54
0
 def _loss_fn(labels, logits):
     del labels, logits  # Unused
     return constant_op.constant(loss)
示例#55
0
 def f(x):
     return math_ops.add(x, constant_op.constant(3))
 def metric_fn():
   metric = metrics_module.Mean()
   metric.update_state(constant_op.constant([2.]))
   return {'auc': metric}
示例#57
0
    def testDict(self):
        @function.defun
        def f(x):
            return {'name': x + 1}

        self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0)
示例#58
0
 def f():
     x = variable_scope.get_variable(
         'v', initializer=constant_op.constant(1.0))
     return x * constant_op.constant(2.0)
示例#59
0
 def f():
     x = constant_op.constant([[1, 2], [3, 4]])
     out = math_ops.matmul(v, x)
     self.assertEqual(out.get_shape(),
                      tensor_shape.TensorShape([2, 2]))
示例#60
0
    def testTensorConversionWithDefun(self):
        @function.defun
        def f(x):
            return math_ops.add(x, constant_op.constant(3))

        self.assertAllEqual(5, f(constant_op.constant(2)))