예제 #1
0
    def test_load_saved_model_from_serving_only(self):
        def model_fn(features, labels, mode):
            _, _ = features, labels
            return model_fn_lib.EstimatorSpec(
                mode,
                loss=constant_op.constant([103]),
                train_op=state_ops.assign_add(training.get_global_step(), 1),
                predictions=constant_op.constant([502]),
                export_outputs={
                    'test':
                    export_output.ClassificationOutput(
                        constant_op.constant([[32.]]))
                })

        est = estimator.Estimator(model_fn, self._get_tmp_dir())
        est.train(input_fn=dummy_input_fn, steps=10)

        def serving_input_receiver_fn():
            return export.ServingInputReceiver(
                {'test-features': constant_op.constant([[1], [1]])},
                array_ops.placeholder(dtype=dtypes.string))

        export_dir = est.export_savedmodel(self._get_tmp_dir(),
                                           serving_input_receiver_fn)

        sme = saved_model_estimator.SavedModelEstimator(
            export_dir, self._get_tmp_dir())

        def input_fn():
            return {'inputs': constant_op.constant('someinputstr')}

        prediction = next(sme.predict(input_fn))
        self.assertDictEqual({'scores': 32}, prediction)
예제 #2
0
    def test_with_local_init_op(self):
        def model_fn(features, labels, mode):
            _, _ = features, labels
            v = variables.Variable(21, name='some_var')
            scaffold = monitored_session.Scaffold(
                local_init_op=state_ops.assign_add(v, -3).op)
            return model_fn_lib.EstimatorSpec(mode,
                                              scaffold=scaffold,
                                              train_op=state_ops.assign_add(
                                                  training.get_global_step(),
                                                  1),
                                              loss=array_ops.identity(v))

        export_dir = self._export_estimator(predict=False, model_fn=model_fn)
        sme = saved_model_estimator.SavedModelEstimator(
            export_dir, self._get_tmp_dir())

        eval_results1 = sme.evaluate(dummy_input_fn, steps=2)
        self.assertEqual(15, eval_results1['loss'])

        sme.train(dummy_input_fn, steps=1)
        self.assertEqual(15, sme.get_variable_value('some_var'))

        eval_results2 = sme.evaluate(dummy_input_fn, steps=5)
        self.assertEqual(12, eval_results2['loss'])
예제 #3
0
    def test_re_export_saved_model(self):
        sme = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(), self._get_tmp_dir())
        self.assertDictEqual(
            {
                'loss': 106,
                'metrics/abs_err': 502,
                'global_step': 10
            }, sme.evaluate(dummy_input_fn, steps=1))

        sme.train(dummy_input_fn, steps=3)
        self.assertDictEqual(
            {
                'loss': 106,
                'metrics/abs_err': 502,
                'global_step': 13
            }, sme.evaluate(dummy_input_fn, steps=1))
        self.assertEqual(60, sme.get_variable_value('some_var'))

        predictions = next(sme.predict(dummy_input_fn_features_only))
        self.assertDictEqual({'output': 503}, predictions)

        # Export SavedModel for all modes
        input_receiver_fn_map = {
            model_fn_lib.ModeKeys.TRAIN: dummy_supervised_receiver_fn(),
            model_fn_lib.ModeKeys.EVAL: dummy_supervised_receiver_fn(),
            model_fn_lib.ModeKeys.PREDICT: dummy_serving_receiver_fn()
        }
        sme_export_dir = contrib_export.export_all_saved_models(
            sme, self._get_tmp_dir(), input_receiver_fn_map)

        sme2 = saved_model_estimator.SavedModelEstimator(
            sme_export_dir, self._get_tmp_dir())
        self.assertDictEqual(
            {
                'loss': 106,
                'metrics/abs_err': 502,
                'global_step': 13
            }, sme.evaluate(dummy_input_fn, steps=1))
        self.assertEqual(60, sme.get_variable_value('some_var'))

        sme.train(dummy_input_fn, steps=7)
        self.assertEqual(20, sme.get_variable_value('global_step'))

        predictions = next(sme2.predict(dummy_input_fn_features_only))
        self.assertDictEqual({'output': 503}, predictions)
예제 #4
0
    def test_partial_exported_estimator(self):
        sme1 = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(train=False, predict=False),
            self._get_tmp_dir())
        sme1.evaluate(dummy_input_fn, steps=5)
        with self.assertRaisesRegexp(RuntimeError,
                                     'train mode is not available'):
            sme1.train(input_fn=dummy_input_fn, steps=1)
        with self.assertRaisesRegexp(RuntimeError,
                                     'infer mode is not available'):
            next(sme1.predict(dummy_input_fn_features_only))

        sme2 = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(evaluate=False), self._get_tmp_dir())
        sme2.train(input_fn=dummy_input_fn, steps=1)
        next(sme2.predict(dummy_input_fn_features_only))
        with self.assertRaisesRegexp(RuntimeError,
                                     'eval mode is not available'):
            sme2.evaluate(dummy_input_fn, steps=5)
예제 #5
0
    def test_load_all_modes_no_train(self):
        """Ensure that all functions can be used without requiring a ckpt."""
        sme = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(), self._get_tmp_dir())
        eval_results = sme.evaluate(dummy_input_fn, steps=5)
        self.assertEqual(10, eval_results['global_step'])
        self.assertEqual(106, eval_results['loss'])
        self.assertEqual(502, eval_results['metrics/abs_err'])

        predictions = next(sme.predict(dummy_input_fn_features_only))
        self.assertDictEqual({'output': 503}, predictions)
예제 #6
0
    def test_re_export_saved_model_serving_only(self):
        sme = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(), self._get_tmp_dir())
        sme.train(dummy_input_fn, steps=3)
        self.assertEqual(13, sme.get_variable_value('global_step'))
        self.assertEqual(60, sme.get_variable_value('some_var'))

        predictions = next(sme.predict(dummy_input_fn_features_only))
        self.assertDictEqual({'output': 503}, predictions)

        # Export SavedModel, and test that the variable and prediction values are
        # the same.
        sme_export_dir = sme.export_savedmodel(self._get_tmp_dir(),
                                               dummy_serving_receiver_fn())

        sme2 = saved_model_estimator.SavedModelEstimator(
            sme_export_dir, self._get_tmp_dir())
        self.assertEqual(60, sme.get_variable_value('some_var'))
        self.assertEqual(13, sme.get_variable_value('global_step'))

        predictions = next(sme2.predict(dummy_input_fn_features_only))
        self.assertDictEqual({'output': 503}, predictions)
예제 #7
0
    def test_input_fn_with_global_step(self):
        sme = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(), self._get_tmp_dir())

        def bad_input_fn():
            training.get_or_create_global_step()
            return dataset_ops.Dataset.from_tensors(({
                'x':
                constant_op.constant([[1], [1]], dtype=dtypes.int64)
            }, constant_op.constant([[1], [1]], dtype=dtypes.float32)))

        with self.assertRaisesRegexp(
                RuntimeError, 'Graph must not contain a global step tensor'):
            sme.train(bad_input_fn, steps=1)
예제 #8
0
    def test_load_all_modes(self):
        sme = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(), self._get_tmp_dir())
        sme.train(input_fn=dummy_input_fn, steps=1)
        sme.train(input_fn=dummy_input_fn, steps=2)
        self.assertEqual(13, sme.get_variable_value('global_step'))
        self.assertEqual(60, sme.get_variable_value('some_var'))

        eval_results = sme.evaluate(dummy_input_fn, steps=5)

        self.assertEqual(13, eval_results['global_step'])
        self.assertEqual(106, eval_results['loss'])
        self.assertEqual(502, eval_results['metrics/abs_err'])

        predictions = next(sme.predict(dummy_input_fn_features_only))
        self.assertDictEqual({'output': 503}, predictions)
예제 #9
0
    def test_control_dependency(self):
        # Control dependencies are saved with "^" appended to the start of the input
        # name. The input map must include control dependencies as well.
        def model_fn(features, labels, mode):
            _ = labels
            with ops.control_dependencies([features['x']]):
                loss = features['x'][1][0]
            return model_fn_lib.EstimatorSpec(mode,
                                              loss=loss,
                                              train_op=state_ops.assign_add(
                                                  training.get_global_step(),
                                                  1))

        sme = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(train=False,
                                   predict=False,
                                   model_fn=model_fn), self._get_tmp_dir())
        sme.evaluate(dummy_input_fn, steps=1)  # Should run without error
예제 #10
0
    def test_with_working_input_fn(self):
        def model_fn(features, labels, mode):
            loss = None
            if labels is not None:
                loss = labels[0][0] + labels[1][0]
            return model_fn_lib.EstimatorSpec(
                mode,
                loss=loss,
                train_op=state_ops.assign_add(training.get_global_step(), 1),
                predictions={
                    'features_0': array_ops.identity([features['x'][0][0]]),
                    'features_1': array_ops.identity([features['x'][1][0]])
                })

        sme = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(model_fn=model_fn), self._get_tmp_dir())
        eval_results = sme.evaluate(dummy_input_fn, steps=1)
        self.assertEqual(1, eval_results['loss'])

        predictions = next(sme.predict(dummy_input_fn_features_only))
        self.assertDictEqual({'features_0': 5, 'features_1': 6}, predictions)
예제 #11
0
    def test_with_incorrect_input(self):
        sme = saved_model_estimator.SavedModelEstimator(
            self._export_estimator(), self._get_tmp_dir())

        def bad_shape_input_fn():
            return dataset_ops.Dataset.from_tensors(({
                'x':
                constant_op.constant([1, 2], dtype=dtypes.int64)
            }, constant_op.constant([1, 2], dtype=dtypes.float32)))

        with self.assertRaisesRegexp(ValueError, 'Expected shape'):
            sme.train(bad_shape_input_fn, steps=1)

        def bad_dtype_input_fn():
            return dataset_ops.Dataset.from_tensors(({
                'x':
                constant_op.constant([[1], [1]], dtype=dtypes.int32)
            }, constant_op.constant([[1], [1]], dtype=dtypes.int64)))

        with self.assertRaisesRegexp(ValueError, 'Expected dtype'):
            sme.train(bad_dtype_input_fn, steps=1)