示例#1
0
    def _mock_optimizer(self, expected_loss=None):
        expected_var_names = [
            '%s/part_0:0' % CELL_BIAS_NAME,
            '%s/part_0:0' % CELL_WEIGHTS_NAME,
            '%s/part_0:0' % LOGITS_BIAS_NAME,
            '%s/part_0:0' % LOGITS_WEIGHTS_NAME,
        ]

        def _minimize(loss, global_step):
            trainable_vars = ops.get_collection(
                ops.GraphKeys.TRAINABLE_VARIABLES)
            self.assertItemsEqual(expected_var_names,
                                  [var.name for var in trainable_vars])

            # Verify loss. We can't check the value directly, so we add an assert op.
            self.assertEquals(0, loss.shape.ndims)
            if expected_loss is None:
                return state_ops.assign_add(global_step, 1).op
            assert_loss = _assert_close(math_ops.to_float(expected_loss,
                                                          name='expected'),
                                        loss,
                                        name='assert_loss')
            with ops.control_dependencies((assert_loss, )):
                return state_ops.assign_add(global_step, 1).op

        mock_optimizer = test.mock.NonCallableMock(spec=optimizer.Optimizer,
                                                   wraps=optimizer.Optimizer(
                                                       use_locking=False,
                                                       name='my_optimizer'))
        mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)

        # NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
        # So, return mock_optimizer itself for deepcopy.
        mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
        return mock_optimizer
示例#2
0
    def _mock_optimizer(self, expected_loss=None):
        expected_var_names = ['%s:0' % BIAS_NAME]

        def _minimize(loss, global_step=None, var_list=None):
            trainable_vars = var_list or ops.get_collection(
                ops.GraphKeys.TRAINABLE_VARIABLES)
            self.assertItemsEqual(expected_var_names,
                                  [var.name for var in trainable_vars])

            # Verify loss. We can't check the value directly, so we add an assert op.
            self.assertEquals(0, loss.shape.ndims)
            if expected_loss is None:
                if global_step is not None:
                    return distribute_lib.increment_var(global_step)
                return control_flow_ops.no_op()
            assert_loss = assert_close(math_ops.to_float(expected_loss,
                                                         name='expected'),
                                       loss,
                                       name='assert_loss')
            with ops.control_dependencies((assert_loss, )):
                if global_step is not None:
                    return distribute_lib.increment_var(global_step)
                return control_flow_ops.no_op()

        mock_optimizer = test.mock.NonCallableMock(spec=optimizer.Optimizer,
                                                   wraps=optimizer.Optimizer(
                                                       use_locking=False,
                                                       name='my_optimizer'))
        mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)

        # NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
        # So, return mock_optimizer itself for deepcopy.
        mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
        return mock_optimizer
示例#3
0
  def _mockOptimizer(self, hidden_units, expected_loss=None):
    hidden_weights_names = [
        (_HIDDEN_WEIGHTS_NAME_PATTERN + '/part_0:0') % i
        for i in range(len(hidden_units))]
    hidden_biases_names = [
        (_HIDDEN_BIASES_NAME_PATTERN + '/part_0:0') % i
        for i in range(len(hidden_units))]
    expected_var_names = (
        hidden_weights_names + hidden_biases_names +
        [_LOGITS_WEIGHTS_NAME + '/part_0:0', _LOGITS_BIASES_NAME + '/part_0:0'])

    def _minimize(loss, global_step):
      trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
      self.assertItemsEqual(
          expected_var_names,
          [var.name for var in trainable_vars])

      # Verify loss. We can't check the value directly, so we add an assert op.
      self.assertEquals(0, loss.shape.ndims)
      if expected_loss is None:
        return state_ops.assign_add(global_step, 1).op
      assert_loss = _assert_close(
          math_ops.to_float(expected_loss, name='expected'), loss,
          name='assert_loss')
      with ops.control_dependencies((assert_loss,)):
        return state_ops.assign_add(global_step, 1).op

    mock_optimizer = test.mock.NonCallableMagicMock(
        spec=optimizer.Optimizer,
        wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
    mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)

    return mock_optimizer
def mock_optimizer(testcase, hidden_units, expected_loss=None):
  """Creates a mock optimizer to test the train method.

  Args:
    testcase: A TestCase instance.
    hidden_units: Iterable of integer sizes for the hidden layers.
    expected_loss: If given, will assert the loss value.

  Returns:
    A mock Optimizer.
  """
  hidden_weights_names = [(HIDDEN_WEIGHTS_NAME_PATTERN + '/part_0:0') % i
                          for i in range(len(hidden_units))]
  hidden_biases_names = [(HIDDEN_BIASES_NAME_PATTERN + '/part_0:0') % i
                         for i in range(len(hidden_units))]
  expected_var_names = (
      hidden_weights_names + hidden_biases_names +
      [LOGITS_WEIGHTS_NAME + '/part_0:0', LOGITS_BIASES_NAME + '/part_0:0'])

  def _minimize(loss, global_step=None, var_list=None):
    """Mock of optimizer.minimize."""
    trainable_vars = var_list or ops.get_collection(
        ops.GraphKeys.TRAINABLE_VARIABLES)
    testcase.assertItemsEqual(expected_var_names,
                              [var.name for var in trainable_vars])

    # Verify loss. We can't check the value directly, so we add an assert op.
    testcase.assertEquals(0, loss.shape.ndims)
    if expected_loss is None:
      if global_step is not None:
        return state_ops.assign_add(global_step, 1).op
      return control_flow_ops.no_op()
    assert_loss = assert_close(
        math_ops.to_float(expected_loss, name='expected'),
        loss,
        name='assert_loss')
    with ops.control_dependencies((assert_loss,)):
      if global_step is not None:
        return state_ops.assign_add(global_step, 1).op
      return control_flow_ops.no_op()

  optimizer_mock = test.mock.NonCallableMagicMock(
      spec=optimizer.Optimizer,
      wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
  optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize)

  return optimizer_mock