Example #1
0
    def test_that_export_fn_is_called(self):
        mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
        mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
        self._set_up_mock_est_to_train_and_evaluate_once(
            mock_est, mock_train_spec)

        def export_fn(estimator, *args, **kwargs):
            del args, kwargs
            estimator.export_fn_was_called = True

        export_strategy = export_strategy_lib.ExportStrategy(
            name='see_whether_export_fn_is_called', export_fn=export_fn)

        eval_spec = training.EvalSpec(input_fn=lambda: 1,
                                      steps=2,
                                      delay_secs=0,
                                      throttle_secs=0,
                                      export_strategies=export_strategy)

        executor = training._TrainingExecutor(mock_est, mock_train_spec,
                                              eval_spec)
        executor.run_evaluator()

        # Verify that export_fn was called on the right estimator.
        self.assertTrue(mock_est.export_fn_was_called)
Example #2
0
  def test_evaluate_multiple_times(self):
    training_max_step = 200

    mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
    mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
    mock_est.evaluate.side_effect = [
        {_GLOBAL_STEP_KEY: training_max_step // 2},
        {_GLOBAL_STEP_KEY: training_max_step}
    ]
    mock_est.latest_checkpoint.side_effect = ['path_1', 'path_2']

    mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
    mock_train_spec.max_steps = training_max_step

    mock_est.times_export_fn_was_called = 0
    def export_fn(estimator, *args, **kwargs):
      del args, kwargs
      estimator.times_export_fn_was_called += 1

    export_strategy = export_strategy_lib.ExportStrategy(
        name='see_whether_export_fn_is_called', export_fn=export_fn)

    eval_spec = training.EvalSpec(
        input_fn=lambda: 1,
        delay_secs=0,
        throttle_secs=0,
        export_strategies=export_strategy)

    executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
    executor.run_evaluator()

    self.assertEqual(2, mock_est.evaluate.call_count)
    self.assertEqual(2, mock_est.times_export_fn_was_called)
Example #3
0
  def test_invalid_train_spec(self):
    estimator = estimator_lib.Estimator(model_fn=lambda features: features)
    invalid_train_spec = object()
    eval_spec = training.EvalSpec(input_fn=lambda: 1)

    with self.assertRaisesRegexp(TypeError, _INVALID_TRAIN_SPEC_MSG):
      training._TrainingExecutor(estimator, invalid_train_spec, eval_spec)
Example #4
0
  def test_invalid_estimator(self):
    invalid_estimator = object()
    train_spec = training.TrainSpec(input_fn=lambda: 1)
    eval_spec = training.EvalSpec(input_fn=lambda: 1)

    with self.assertRaisesRegexp(TypeError, _INVALID_ESTIMATOR_MSG):
      training._TrainingExecutor(invalid_estimator, train_spec, eval_spec)
Example #5
0
    def testRequiredArgumentsSet(self):
        estimator = estimator_lib.Estimator(model_fn=lambda features: features)
        train_spec = training.TrainSpec(input_fn=lambda: 1)
        eval_spec = training.EvalSpec(input_fn=lambda: 1)

        executor = training._TrainingExecutor(estimator, train_spec, eval_spec)
        self.assertEqual(estimator, executor.estimator)
Example #6
0
  def test_runs_in_a_loop_until_max_steps(self):
    mock_est = test.mock.Mock(spec=estimator_lib.Estimator, model_dir='path/')
    mock_est.latest_checkpoint = self.unique_checkpoint_every_time_fn

    mock_est.times_export_fn_was_called = 0
    def export_fn(estimator, *args, **kwargs):
      del args, kwargs
      estimator.times_export_fn_was_called += 1

    export_strategy = export_strategy_lib.ExportStrategy(
        name='see_whether_export_fn_is_called', export_fn=export_fn)

    train_spec = training.TrainSpec(
        input_fn=lambda: 1, max_steps=300, hooks=[_FakeHook()])
    eval_spec = training.EvalSpec(
        input_fn=lambda: 1,
        hooks=[_FakeHook()],
        throttle_secs=100,
        export_strategies=export_strategy)
    # should be called 3 times.
    mock_est.evaluate.side_effect = [{
        _GLOBAL_STEP_KEY: train_spec.max_steps - 100
    }, {
        _GLOBAL_STEP_KEY: train_spec.max_steps - 50
    }, {
        _GLOBAL_STEP_KEY: train_spec.max_steps
    }]

    executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
    executor.run_local()

    self.assertEqual(3, mock_est.train.call_count)
    self.assertEqual(3, mock_est.evaluate.call_count)
    self.assertEqual(3, mock_est.times_export_fn_was_called)
Example #7
0
    def test_train_and_evaluate_args(self):
        mock_est = test.mock.Mock(spec=estimator_lib.Estimator,
                                  model_dir='path/')
        mock_est.latest_checkpoint.return_value = 'checkpoint_path/'
        train_spec = training.TrainSpec(input_fn=lambda: 1,
                                        max_steps=300,
                                        hooks=[_FakeHook()])
        eval_spec = training.EvalSpec(input_fn=lambda: 1,
                                      steps=2,
                                      hooks=[_FakeHook()],
                                      name='local_eval')
        mock_est.evaluate.return_value = {
            _GLOBAL_STEP_KEY: train_spec.max_steps
        }

        executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
        executor.run_local()

        mock_est.evaluate.assert_called_with(
            name=eval_spec.name,
            input_fn=eval_spec.input_fn,
            steps=eval_spec.steps,
            checkpoint_path='checkpoint_path/',
            hooks=eval_spec.hooks)

        train_args = mock_est.train.call_args[1]
        self.assertEqual(list(train_spec.hooks),
                         list(train_args['hooks'][:-1]))
        self.assertEqual(train_spec.input_fn, train_args['input_fn'])
        self.assertEqual(train_spec.max_steps, train_args['max_steps'])
Example #8
0
 def testInvalidTypeOfListOfExportStrategies(self):
     with self.assertRaisesRegexp(TypeError, _INVALID_EXPORT_STRATEGY_MSG):
         training.EvalSpec(input_fn=lambda: 1,
                           export_strategies=[
                               _create_fake_export_strategy('a'),
                               _FakeHook()
                           ])
Example #9
0
    def test_skip_evaluation_due_to_ckpt(self, mock_latest_ckpt):
        training_max_step = 200
        mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
        mock_est.evaluate.side_effect = [{
            _GLOBAL_STEP_KEY:
            training_max_step // 2
        }, {
            _GLOBAL_STEP_KEY: training_max_step
        }]
        mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
        mock_train_spec.max_steps = training_max_step

        self._set_up_mock_est_to_train_and_evaluate_once(
            mock_est, mock_train_spec)

        # First two items are invalid, next two items are same.
        mock_latest_ckpt.side_effect = [None, '', 'same', 'same', 'path_2']

        eval_spec = training.EvalSpec(input_fn=lambda: 1,
                                      delay_secs=0,
                                      throttle_secs=0)

        executor = training._TrainingExecutor(mock_est, mock_train_spec,
                                              eval_spec)
        with test.mock.patch.object(logging, 'warning') as mock_log:
            executor.run_evaluator()

        # Three checkpoint paths are invalid.
        self.assertEqual(5, mock_latest_ckpt.call_count)
        self.assertEqual(2, mock_est.evaluate.call_count)

        # Two warning logs are expected (last warning time is reset after a
        # successuful evaluation)
        self.assertEqual(2, mock_log.call_count)
Example #10
0
    def test_evaluate_with_evaluate_spec(self, mock_latest_ckpt):
        latest_ckpt_path = mock_latest_ckpt.return_value

        mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
        mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
        self._set_up_mock_est_to_train_and_evaluate_once(
            mock_est, mock_train_spec)

        eval_spec = training.EvalSpec(input_fn=lambda: 1,
                                      steps=2,
                                      hooks=[_FakeHook()],
                                      name='cont_eval',
                                      delay_secs=0,
                                      throttle_secs=0)

        executor = training._TrainingExecutor(mock_est, mock_train_spec,
                                              eval_spec)
        executor.run_evaluator()

        mock_est.evaluate.assert_called_with(name='cont_eval',
                                             input_fn=eval_spec.input_fn,
                                             steps=eval_spec.steps,
                                             checkpoint_path=latest_ckpt_path,
                                             hooks=eval_spec.hooks)
        self.assertFalse(mock_est.train.called)
Example #11
0
    def test_that_export_fn_is_called_with_run_local(self):
        mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
        mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
        mock_train_spec.max_steps = 200
        mock_est.evaluate.return_value = {
            _GLOBAL_STEP_KEY: mock_train_spec.max_steps
        }
        # _validate_hooks would have made sure that train_spec.hooks is [], when
        # None were passed.
        mock_train_spec.hooks = []

        def export_fn(estimator, *args, **kwargs):
            del args, kwargs
            estimator.export_fn_was_called = True

        export_strategy = export_strategy_lib.ExportStrategy(
            name='see_whether_export_fn_is_called', export_fn=export_fn)

        eval_spec = training.EvalSpec(input_fn=lambda: 1,
                                      steps=2,
                                      delay_secs=0,
                                      throttle_secs=213,
                                      export_strategies=export_strategy)

        executor = training._TrainingExecutor(mock_est, mock_train_spec,
                                              eval_spec)
        executor.run_local()

        self.assertTrue(mock_est.export_fn_was_called)
Example #12
0
    def test_errors_out_if_throttle_secs_is_zero(self):
        mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
        train_spec = training.TrainSpec(input_fn=lambda: 1)
        eval_spec = training.EvalSpec(input_fn=lambda: 1, throttle_secs=0)

        executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
        with self.assertRaisesRegexp(ValueError, 'throttle_secs'):
            executor.run_local()
Example #13
0
 def testMultipleExportStrategiesWithTheSameName(self):
     with self.assertRaisesRegexp(ValueError,
                                  _DUPLICATE_STRATEGY_NAMES_MSG):
         training.EvalSpec(input_fn=lambda: 1,
                           export_strategies=[
                               _create_fake_export_strategy('a'),
                               _create_fake_export_strategy('a')
                           ])
Example #14
0
  def testListOfExportStrategies(self):
    """Tests that no errors are raised with multiple export strategies."""
    export_strategies = [_create_fake_export_strategy('a'),
                         _create_fake_export_strategy('b')]

    spec = training.EvalSpec(input_fn=lambda: 1,
                             export_strategies=export_strategies)
    self.assertEqual(1, spec.input_fn())
    self.assertEqual(tuple(export_strategies), spec.export_strategies)
Example #15
0
 def testRequiredArgumentsSet(self):
     """Tests that no errors are raised when all required arguments are set."""
     spec = training.EvalSpec(input_fn=lambda: 1)
     self.assertEqual(1, spec.input_fn())
     self.assertEqual(_DEFAULT_EVAL_STEPS, spec.steps)
     self.assertIsNone(spec.name)
     self.assertEqual(0, len(spec.hooks))
     self.assertEqual(0, len(spec.export_strategies))
     self.assertEqual(_DEFAULT_EVAL_DELAY_SECS, spec.delay_secs)
     self.assertEqual(_DEFAULT_EVAL_THROTTLE_SECS, spec.throttle_secs)
Example #16
0
  def testAllArgumentsSet(self):
    """Tests that no errors are raised when all arguments are set."""
    hooks = [_FakeHook()]

    # TODO(b/65169058): Replace the export_strategies with valid instances.
    spec = training.EvalSpec(input_fn=lambda: 1, steps=2, name='name',
                             hooks=hooks, export_strategies=hooks,
                             delay_secs=3, throttle_secs=4)
    self.assertEqual(1, spec.input_fn())
    self.assertEqual(2, spec.steps)
    self.assertEqual('name', spec.name)
    self.assertEqual(tuple(hooks), spec.hooks)
    self.assertEqual(tuple(hooks), spec.export_strategies)
    self.assertEqual(3, spec.delay_secs)
    self.assertEqual(4, spec.throttle_secs)
Example #17
0
  def test_send_stop_at_secs_to_train(self):
    mock_est = test.mock.Mock(spec=estimator_lib.Estimator, model_dir='path/')
    mock_est.latest_checkpoint = self.unique_checkpoint_every_time_fn
    train_spec = training.TrainSpec(
        input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
    eval_spec = training.EvalSpec(
        input_fn=lambda: 1, hooks=[_FakeHook()], throttle_secs=100)
    mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: train_spec.max_steps}

    executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
    executor.run_local()

    stop_hook = mock_est.train.call_args[1]['hooks'][-1]
    self.assertIsInstance(stop_hook, training._StopAtSecsHook)
    self.assertEqual(eval_spec.throttle_secs, stop_hook._stop_after_secs)
Example #18
0
  def testAllArgumentsSet(self):
    """Tests that no errors are raised when all arguments are set."""
    hooks = [_FakeHook()]
    export_strategy = _create_fake_export_strategy('a')

    spec = training.EvalSpec(input_fn=lambda: 1, steps=2, name='name',
                             hooks=hooks, export_strategies=export_strategy,
                             delay_secs=3, throttle_secs=4)
    self.assertEqual(1, spec.input_fn())
    self.assertEqual(2, spec.steps)
    self.assertEqual('name', spec.name)
    self.assertEqual(tuple(hooks), spec.hooks)
    self.assertEqual((export_strategy,), spec.export_strategies)
    self.assertEqual(3, spec.delay_secs)
    self.assertEqual(4, spec.throttle_secs)
Example #19
0
  def test_throttle_secs(self, mock_sleep, mock_time):
    throttle_secs = 123
    operation_secs = 12

    mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
    mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
    self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)

    eval_spec = training.EvalSpec(
        input_fn=lambda: 1, delay_secs=0, throttle_secs=throttle_secs)

    mock_time.side_effect = [921, 921 + operation_secs]

    executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
    # Disable logging as it calls time.time also.
    with test.mock.patch.object(logging, 'info'):
      executor.run_evaluator()
    mock_sleep.assert_called_with(throttle_secs - operation_secs)
    self.assertTrue(mock_est.evaluate.called)
Example #20
0
  def test_sleep_delay_secs(self):
    training_max_step = 200
    delay_secs = 123

    mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
    mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: training_max_step}
    mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
    mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
    mock_train_spec.max_steps = training_max_step

    eval_spec = training.EvalSpec(
        input_fn=lambda: 1, steps=2, hooks=[_FakeHook()], name='cont_eval',
        delay_secs=delay_secs, throttle_secs=0)

    executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
    with test.mock.patch.object(time, 'sleep') as mock_sleep:
      executor.run_evaluator()
      mock_sleep.assert_called_with(delay_secs)
      self.assertTrue(mock_est.evaluate.called)
    def _complete_flow(self,
                       train_distribute,
                       eval_distribute,
                       remote_cluster=None):
        estimator = self._get_estimator(train_distribute, eval_distribute,
                                        remote_cluster)

        input_dimension = LABEL_DIMENSION
        train_input_fn = self.dataset_input_fn(
            x={"x": DATA},
            y=DATA,
            batch_size=BATCH_SIZE // len(train_distribute.worker_devices),
            shuffle=True)
        if eval_distribute:
            eval_batch_size = BATCH_SIZE // len(eval_distribute.worker_devices)
        else:
            eval_batch_size = BATCH_SIZE
        eval_input_fn = self.dataset_input_fn(x={"x": DATA},
                                              y=DATA,
                                              batch_size=eval_batch_size,
                                              shuffle=False)

        linear_feature_columns = [
            feature_column.numeric_column("x", shape=(input_dimension, ))
        ]
        dnn_feature_columns = [
            feature_column.numeric_column("x", shape=(input_dimension, ))
        ]
        feature_columns = linear_feature_columns + dnn_feature_columns

        estimator_training.train_and_evaluate(
            estimator,
            estimator_training.TrainSpec(train_input_fn, max_steps=MAX_STEPS),
            estimator_training.EvalSpec(name=EVAL_NAME,
                                        input_fn=eval_input_fn,
                                        steps=None,
                                        exporters=self._get_exporter(
                                            EXPORTER_NAME, feature_columns),
                                        start_delay_secs=0,
                                        throttle_secs=1))
        return estimator
Example #22
0
    def test_evaluate_multiple_times(self, mock_latest_ckpt):
        training_max_step = 200

        mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
        mock_est.evaluate.side_effect = [{
            _GLOBAL_STEP_KEY:
            training_max_step // 2
        }, {
            _GLOBAL_STEP_KEY: training_max_step
        }]
        mock_latest_ckpt.side_effect = ['path_1', 'path_2']

        mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
        mock_train_spec.max_steps = training_max_step

        eval_spec = training.EvalSpec(input_fn=lambda: 1,
                                      delay_secs=0,
                                      throttle_secs=0)

        executor = training._TrainingExecutor(mock_est, mock_train_spec,
                                              eval_spec)
        executor.run_evaluator()
        self.assertEqual(2, mock_est.evaluate.call_count)
Example #23
0
    def test_runs_in_a_loop_until_max_steps(self):
        mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
        train_spec = training.TrainSpec(input_fn=lambda: 1,
                                        max_steps=300,
                                        hooks=[_FakeHook()])
        eval_spec = training.EvalSpec(input_fn=lambda: 1,
                                      hooks=[_FakeHook()],
                                      throttle_secs=100)
        # should be called 3 times.
        mock_est.evaluate.side_effect = [{
            _GLOBAL_STEP_KEY:
            train_spec.max_steps - 100
        }, {
            _GLOBAL_STEP_KEY:
            train_spec.max_steps - 50
        }, {
            _GLOBAL_STEP_KEY: train_spec.max_steps
        }]

        executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
        executor.run_local()

        self.assertEqual(3, mock_est.train.call_count)
        self.assertEqual(3, mock_est.evaluate.call_count)
Example #24
0
  def _complete_flow(self,
                     train_distribute,
                     eval_distribute,
                     remote_cluster=None,
                     use_train_and_evaluate=True):
    estimator = self._get_estimator(train_distribute, eval_distribute,
                                    remote_cluster)

    input_dimension = LABEL_DIMENSION
    train_input_fn = self.dataset_input_fn(
        x={"x": DATA},
        y=DATA,
        batch_size=BATCH_SIZE // train_distribute.num_replicas_in_sync,
        shuffle=True)
    if eval_distribute:
      eval_batch_size = BATCH_SIZE // eval_distribute.num_replicas_in_sync
    else:
      eval_batch_size = BATCH_SIZE
    eval_input_fn = self.dataset_input_fn(
        x={"x": DATA}, y=DATA, batch_size=eval_batch_size, shuffle=False)

    linear_feature_columns = [
        feature_column.numeric_column("x", shape=(input_dimension,))
    ]
    dnn_feature_columns = [
        feature_column.numeric_column("x", shape=(input_dimension,))
    ]
    feature_columns = linear_feature_columns + dnn_feature_columns

    eval_spec = estimator_training.EvalSpec(
        name=EVAL_NAME,
        input_fn=eval_input_fn,
        steps=None,
        exporters=self._get_exporter(EXPORTER_NAME, feature_columns),
        start_delay_secs=0,
        throttle_secs=1)

    if use_train_and_evaluate:
      estimator_training.train_and_evaluate(
          estimator,
          estimator_training.TrainSpec(train_input_fn, max_steps=MAX_STEPS),
          eval_spec)
    else:
      estimator.train(train_input_fn, max_steps=MAX_STEPS)

      latest_ckpt_path = estimator.latest_checkpoint()
      metrics = estimator.evaluate(eval_input_fn,
                                   checkpoint_path=latest_ckpt_path,
                                   name=EVAL_NAME)

      # Export the eval result to files.
      eval_result = estimator_training._EvalResult(
          status=estimator_training._EvalStatus.EVALUATED,
          metrics=metrics,
          checkpoint_path=latest_ckpt_path)
      evaluator = estimator_training._TrainingExecutor._Evaluator(estimator,
                                                                  eval_spec,
                                                                  None)
      evaluator._export_eval_result(eval_result, True)

    return estimator
Example #25
0
    def test_complete_flow_with_mode(self, distribution,
                                     use_train_and_evaluate):
        label_dimension = 2
        input_dimension = label_dimension
        batch_size = 10
        data = np.linspace(0.,
                           2.,
                           batch_size * label_dimension,
                           dtype=np.float32)
        data = data.reshape(batch_size, label_dimension)
        train_input_fn = self.dataset_input_fn(
            x={'x': data},
            y=data,
            batch_size=batch_size // distribution.num_replicas_in_sync,
            shuffle=True)
        eval_input_fn = self.dataset_input_fn(
            x={'x': data},
            y=data,
            batch_size=batch_size // distribution.num_replicas_in_sync,
            shuffle=False)
        predict_input_fn = numpy_io.numpy_input_fn(x={'x': data},
                                                   batch_size=batch_size,
                                                   shuffle=False)

        linear_feature_columns = [
            feature_column.numeric_column('x', shape=(input_dimension, ))
        ]
        dnn_feature_columns = [
            feature_column.numeric_column('x', shape=(input_dimension, ))
        ]
        feature_columns = linear_feature_columns + dnn_feature_columns
        estimator = dnn_linear_combined.DNNLinearCombinedRegressor(
            linear_feature_columns=linear_feature_columns,
            dnn_hidden_units=(2, 2),
            dnn_feature_columns=dnn_feature_columns,
            label_dimension=label_dimension,
            model_dir=self._model_dir,
            # TODO(isaprykin): Work around the colocate_with error.
            dnn_optimizer=adagrad.AdagradOptimizer(0.001),
            linear_optimizer=adagrad.AdagradOptimizer(0.001),
            config=run_config.RunConfig(train_distribute=distribution,
                                        eval_distribute=distribution))

        num_steps = 10
        if use_train_and_evaluate:
            scores, _ = training.train_and_evaluate(
                estimator,
                training.TrainSpec(train_input_fn, max_steps=num_steps),
                training.EvalSpec(eval_input_fn))
        else:
            estimator.train(train_input_fn, steps=num_steps)
            scores = estimator.evaluate(eval_input_fn)

        self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
        self.assertIn('loss', scores)

        predictions = np.array([
            x[prediction_keys.PredictionKeys.PREDICTIONS]
            for x in estimator.predict(predict_input_fn)
        ])
        self.assertAllEqual((batch_size, label_dimension), predictions.shape)

        feature_spec = feature_column.make_parse_example_spec(feature_columns)
        serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
            feature_spec)
        export_dir = estimator.export_saved_model(tempfile.mkdtemp(),
                                                  serving_input_receiver_fn)
        self.assertTrue(gfile.Exists(export_dir))
    def test_complete_flow_with_mode(self, distribution,
                                     use_train_and_evaluate):
        label_dimension = 2
        input_dimension = label_dimension
        batch_size = 10
        data = np.linspace(0.,
                           2.,
                           batch_size * label_dimension,
                           dtype=np.float32)
        data = data.reshape(batch_size, label_dimension)
        train_input_fn = self.dataset_input_fn(
            x={'x': data},
            y=data,
            batch_size=batch_size // len(distribution.worker_devices))
        eval_input_fn = self.dataset_input_fn(x={'x': data},
                                              y=data,
                                              batch_size=batch_size //
                                              len(distribution.worker_devices))
        predict_input_fn = numpy_io.numpy_input_fn(x={'x': data},
                                                   batch_size=batch_size,
                                                   shuffle=False)

        linear_feature_columns = [
            feature_column.numeric_column('x', shape=(input_dimension, ))
        ]
        dnn_feature_columns = [
            feature_column.numeric_column('x', shape=(input_dimension, ))
        ]
        feature_columns = linear_feature_columns + dnn_feature_columns
        session_config = config_pb2.ConfigProto(log_device_placement=True,
                                                allow_soft_placement=True)
        estimator = dnn_linear_combined.DNNLinearCombinedRegressor(
            linear_feature_columns=linear_feature_columns,
            dnn_hidden_units=(2, 2),
            dnn_feature_columns=dnn_feature_columns,
            label_dimension=label_dimension,
            model_dir=self._model_dir,
            dnn_optimizer=adam.Adam(0.001),
            linear_optimizer=adam.Adam(0.001),
            config=run_config.RunConfig(train_distribute=distribution,
                                        eval_distribute=distribution,
                                        session_config=session_config))

        num_steps = 2
        if use_train_and_evaluate:
            scores, _ = training.train_and_evaluate(
                estimator,
                training.TrainSpec(train_input_fn, max_steps=num_steps),
                training.EvalSpec(eval_input_fn))
        else:
            estimator.train(train_input_fn, steps=num_steps)
            scores = estimator.evaluate(eval_input_fn)

        self.assertIn('loss', six.iterkeys(scores))

        predictions = np.array([
            x[prediction_keys.PredictionKeys.PREDICTIONS]
            for x in estimator.predict(predict_input_fn)
        ])
        self.assertAllEqual((batch_size, label_dimension), predictions.shape)

        feature_spec = feature_column.make_parse_example_spec(feature_columns)
        serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
            feature_spec)
        export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),
                                                 serving_input_receiver_fn)
        self.assertTrue(gfile.Exists(export_dir))
Example #27
0
 def testInvalidTypeOfIndividualExportStrategy(self):
     with self.assertRaisesRegexp(TypeError, _INVALID_EXPORT_STRATEGY_MSG):
         training.EvalSpec(input_fn=lambda: 1,
                           export_strategies=_FakeHook())
Example #28
0
 def testInvalidThrottleSecs(self):
     with self.assertRaisesRegexp(ValueError,
                                  _INVALID_EVAL_THROTTLE_SECS_MSG):
         training.EvalSpec(input_fn=lambda: 1, throttle_secs=-1)
Example #29
0
 def testInvalidDelaySecs(self):
     with self.assertRaisesRegexp(ValueError, _INVALID_EVAL_DELAY_SECS_MSG):
         training.EvalSpec(input_fn=lambda: 1, delay_secs=-1)
Example #30
0
 def testInvalidHook(self):
     with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
         training.EvalSpec(input_fn=lambda: 1, hooks=[_InvalidHook()])