Beispiel #1
0
    def testTrainNetwork(self, distribution, optimizer_fn, is_tpu):
        with distribution.scope():
            single_loss_step, layer = single_loss_example(
                optimizer_fn,
                distribution,
                use_bias=True,
                iterations_per_step=2)

            if context.executing_eagerly():
                single_loss_step.initialize()
                run_step = single_loss_step
            else:
                with self.cached_session() as sess:
                    sess.run(single_loss_step.initialize())
                    run_step = sess.make_callable(single_loss_step())
            self.evaluate(variables.global_variables_initializer())

            weights, biases = [], []
            for _ in range(5):
                run_step()
                weights.append(self.evaluate(layer.kernel))
                biases.append(self.evaluate(layer.bias))

            error = abs(
                numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
            is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
            self.assertTrue(is_not_increasing)
  def testNotPassingASessionInGraph(self):
    distribution = one_device_strategy.OneDeviceStrategy(
        "/device:CPU:0")
    step_function, _ = single_loss_example(
        lambda: gradient_descent.GradientDescentOptimizer(0.2), distribution)

    with context.graph_mode(), ops.Graph().as_default():
      with self.assertRaisesRegexp(ValueError, "Should provide"):
        _ = monitor_lib.Monitor(step_function, session=None)
  def testPassingASessionInEager(self):
    distribution = one_device_strategy.OneDeviceStrategy(
        "/device:CPU:0")
    step_function, _ = single_loss_example(
        lambda: gradient_descent.GradientDescentOptimizer(0.2), distribution)

    with session.Session() as sess, context.eager_mode():
      with self.assertRaisesRegexp(ValueError, "Should not provide"):
        _ = monitor_lib.Monitor(step_function, sess)
  def testNotPassingASessionInGraph(self):
    distribution = one_device_strategy.OneDeviceStrategy(
        "/device:CPU:0")
    step_function, _ = single_loss_example(
        lambda: gradient_descent.GradientDescentOptimizer(0.2), distribution)

    with context.graph_mode(), ops.Graph().as_default():
      with self.assertRaisesRegexp(ValueError, "Should provide"):
        _ = monitor_lib.Monitor(step_function, session=None)
  def testPassingASessionInEager(self):
    distribution = one_device_strategy.OneDeviceStrategy(
        "/device:CPU:0")
    step_function, _ = single_loss_example(
        lambda: gradient_descent.GradientDescentOptimizer(0.2), distribution)

    with session.Session() as sess, context.eager_mode():
      with self.assertRaisesRegexp(ValueError, "Should not provide"):
        _ = monitor_lib.Monitor(step_function, sess)
  def testTrainNetwork(self, distribution, optimizer_fn):
    with distribution.scope():
      single_loss_step, layer = single_loss_example(optimizer_fn, distribution)

      if context.executing_eagerly():
        monitor = monitor_lib.Monitor(single_loss_step, None)
      else:
        with self.cached_session() as sess:
          monitor = monitor_lib.Monitor(single_loss_step, sess)

      monitor.run_steps(1)

      self.assertEqual(1, len(layer.trainable_variables))
      mirrored_weight_variable = layer.trainable_variables[0]
      start_error = self.evaluate(mirrored_weight_variable)
      start_error = abs(numpy.array(start_error) - 1)

      monitor.run_steps(9)
      end_error = self.evaluate(mirrored_weight_variable)
      end_error = abs(numpy.array(end_error) - 1)
      self.assertGreaterEqual(start_error, end_error)
  def testTrainNetwork(self, distribution, optimizer_fn):
    with distribution.scope():
      single_loss_step, layer = single_loss_example(optimizer_fn, distribution)

      if context.executing_eagerly():
        monitor = monitor_lib.Monitor(single_loss_step, None)
      else:
        with self.cached_session() as sess:
          monitor = monitor_lib.Monitor(single_loss_step, sess)

      monitor.run_steps(1)

      self.assertEqual(1, len(layer.trainable_variables))
      mirrored_weight_variable = layer.trainable_variables[0]
      start_error = self.evaluate(mirrored_weight_variable)
      start_error = abs(numpy.array(start_error) - 1)

      monitor.run_steps(9)
      end_error = self.evaluate(mirrored_weight_variable)
      end_error = abs(numpy.array(end_error) - 1)
      self.assertGreaterEqual(start_error, end_error)
  def testTrainNetwork(self, distribution, optimizer_fn, is_tpu):
    with distribution.scope():
      single_loss_step, layer = single_loss_example(
          optimizer_fn, distribution, use_bias=True, iterations_per_step=2)

      if context.executing_eagerly():
        single_loss_step.initialize()
        run_step = single_loss_step
      else:
        with self.cached_session() as sess:
          sess.run(single_loss_step.initialize())
          run_step = sess.make_callable(single_loss_step())
      self.evaluate(variables.global_variables_initializer())

      weights, biases = [], []
      for _ in range(5):
        run_step()
        weights.append(self.evaluate(layer.kernel))
        biases.append(self.evaluate(layer.bias))

      error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
      is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
      self.assertTrue(is_not_increasing)