def test_partial_exported_estimator(self): sme1 = saved_model_estimator.SavedModelEstimator( self._export_estimator(train=False, predict=False), self._get_tmp_dir()) sme1.evaluate(dummy_input_fn, steps=5) with self.assertRaisesRegexp(RuntimeError, 'train mode is not available'): sme1.train(input_fn=dummy_input_fn, steps=1) with self.assertRaisesRegexp(RuntimeError, 'infer mode is not available'): next(sme1.predict(dummy_input_fn_features_only)) sme2 = saved_model_estimator.SavedModelEstimator( self._export_estimator(evaluate=False), self._get_tmp_dir()) sme2.train(input_fn=dummy_input_fn, steps=1) next(sme2.predict(dummy_input_fn_features_only)) with self.assertRaisesRegexp(RuntimeError, 'eval mode is not available'): sme2.evaluate(dummy_input_fn, steps=5)
def test_with_local_init_op(self): def model_fn(features, labels, mode): _, _ = features, labels v = variables.Variable(21, name='some_var') scaffold = monitored_session.Scaffold( local_init_op=state_ops.assign_add(v, -3).op) return model_fn_lib.EstimatorSpec(mode, scaffold=scaffold, train_op=state_ops.assign_add( training.get_global_step(), 1), loss=array_ops.identity(v)) export_dir = self._export_estimator(predict=False, model_fn=model_fn) sme = saved_model_estimator.SavedModelEstimator( export_dir, self._get_tmp_dir()) eval_results1 = sme.evaluate(dummy_input_fn, steps=2) self.assertEqual(15, eval_results1['loss']) sme.train(dummy_input_fn, steps=1) self.assertEqual(15, sme.get_variable_value('some_var')) eval_results2 = sme.evaluate(dummy_input_fn, steps=5) self.assertEqual(12, eval_results2['loss'])
def test_load_saved_model_from_serving_only(self): def model_fn(features, labels, mode): _, _ = features, labels return model_fn_lib.EstimatorSpec( mode, loss=constant_op.constant([103]), train_op=state_ops.assign_add(training.get_global_step(), 1), predictions=constant_op.constant([502]), export_outputs={ 'test': export_lib.ClassificationOutput( constant_op.constant([[32.]])) }) est = estimator.Estimator(model_fn, self._get_tmp_dir()) est.train(input_fn=dummy_input_fn, steps=10) def serving_input_receiver_fn(): return export_lib.ServingInputReceiver( {'test-features': constant_op.constant([[1], [1]])}, array_ops.placeholder(dtype=dtypes.string)) export_dir = est.export_saved_model(self._get_tmp_dir(), serving_input_receiver_fn) sme = saved_model_estimator.SavedModelEstimator( export_dir, self._get_tmp_dir()) def input_fn(): return {'inputs': constant_op.constant('someinputstr')} prediction = next(sme.predict(input_fn)) self.assertDictEqual({'scores': 32}, prediction)
def test_re_export_saved_model(self): sme = saved_model_estimator.SavedModelEstimator( self._export_estimator(), self._get_tmp_dir()) self.assertDictEqual( { 'loss': 106, 'metrics/abs_err': 502, 'global_step': 10 }, sme.evaluate(dummy_input_fn, steps=1)) sme.train(dummy_input_fn, steps=3) self.assertDictEqual( { 'loss': 106, 'metrics/abs_err': 502, 'global_step': 13 }, sme.evaluate(dummy_input_fn, steps=1)) self.assertEqual(60, sme.get_variable_value('some_var')) predictions = next(sme.predict(dummy_input_fn_features_only)) self.assertDictEqual({'output': 503}, predictions) # Export SavedModel for all modes input_receiver_fn_map = { ModeKeys.TRAIN: dummy_supervised_receiver_fn(), ModeKeys.EVAL: dummy_supervised_receiver_fn(), ModeKeys.PREDICT: dummy_serving_receiver_fn() } sme_export_dir = sme.experimental_export_all_saved_models( self._get_tmp_dir(), input_receiver_fn_map) sme2 = saved_model_estimator.SavedModelEstimator( sme_export_dir, self._get_tmp_dir()) self.assertDictEqual( { 'loss': 106, 'metrics/abs_err': 502, 'global_step': 13 }, sme.evaluate(dummy_input_fn, steps=1)) self.assertEqual(60, sme.get_variable_value('some_var')) sme.train(dummy_input_fn, steps=7) self.assertEqual(20, sme.get_variable_value('global_step')) predictions = next(sme2.predict(dummy_input_fn_features_only)) self.assertDictEqual({'output': 503}, predictions)
def test_load_all_modes_no_train(self): """Ensure that all functions can be used without requiring a ckpt.""" sme = saved_model_estimator.SavedModelEstimator( self._export_estimator(), self._get_tmp_dir()) eval_results = sme.evaluate(dummy_input_fn, steps=5) self.assertEqual(10, eval_results['global_step']) self.assertEqual(106, eval_results['loss']) self.assertEqual(502, eval_results['metrics/abs_err']) predictions = next(sme.predict(dummy_input_fn_features_only)) self.assertDictEqual({'output': 503}, predictions)
def test_re_export_saved_model_serving_only(self): sme = saved_model_estimator.SavedModelEstimator( self._export_estimator(), self._get_tmp_dir()) sme.train(dummy_input_fn, steps=3) self.assertEqual(13, sme.get_variable_value('global_step')) self.assertEqual(60, sme.get_variable_value('some_var')) predictions = next(sme.predict(dummy_input_fn_features_only)) self.assertDictEqual({'output': 503}, predictions) # Export SavedModel, and test that the variable and prediction values are # the same. sme_export_dir = sme.export_saved_model(self._get_tmp_dir(), dummy_serving_receiver_fn()) sme2 = saved_model_estimator.SavedModelEstimator( sme_export_dir, self._get_tmp_dir()) self.assertEqual(60, sme.get_variable_value('some_var')) self.assertEqual(13, sme.get_variable_value('global_step')) predictions = next(sme2.predict(dummy_input_fn_features_only)) self.assertDictEqual({'output': 503}, predictions)
def test_input_fn_with_global_step(self): sme = saved_model_estimator.SavedModelEstimator(self._export_estimator(), self._get_tmp_dir()) def bad_input_fn(): tf.compat.v1.train.get_or_create_global_step() return tf.compat.v1.data.Dataset.from_tensors(({ 'x': tf.constant([[1], [1]], dtype=tf.dtypes.int64) }, tf.constant([[1], [1]], dtype=tf.dtypes.float32))) with self.assertRaisesRegexp(RuntimeError, 'Graph must not contain a global step tensor'): sme.train(bad_input_fn, steps=1)
def test_control_dependency(self): # Control dependencies are saved with "^" appended to the start of the input # name. The input map must include control dependencies as well. def model_fn(features, labels, mode): _ = labels with ops.control_dependencies([features['x']]): loss = features['x'][1][0] return model_fn_lib.EstimatorSpec( mode, loss=loss, train_op=state_ops.assign_add(training.get_global_step(), 1)) sme = saved_model_estimator.SavedModelEstimator( self._export_estimator(train=False, predict=False, model_fn=model_fn), self._get_tmp_dir()) sme.evaluate(dummy_input_fn, steps=1) # Should run without error
def test_load_all_modes(self): sme = saved_model_estimator.SavedModelEstimator( self._export_estimator(), self._get_tmp_dir()) sme.train(input_fn=dummy_input_fn, steps=1) sme.train(input_fn=dummy_input_fn, steps=2) self.assertEqual(13, sme.get_variable_value('global_step')) self.assertEqual(60, sme.get_variable_value('some_var')) eval_results = sme.evaluate(dummy_input_fn, steps=5) self.assertEqual(13, eval_results['global_step']) self.assertEqual(106, eval_results['loss']) self.assertEqual(502, eval_results['metrics/abs_err']) predictions = next(sme.predict(dummy_input_fn_features_only)) self.assertDictEqual({'output': 503}, predictions)
def test_with_incorrect_input(self): sme = saved_model_estimator.SavedModelEstimator(self._export_estimator(), self._get_tmp_dir()) def bad_shape_input_fn(): return tf.compat.v1.data.Dataset.from_tensors(({ 'x': tf.constant([1, 2], dtype=tf.dtypes.int64) }, tf.constant([1, 2], dtype=tf.dtypes.float32))) with self.assertRaisesRegexp(ValueError, 'Expected shape'): sme.train(bad_shape_input_fn, steps=1) def bad_dtype_input_fn(): return tf.compat.v1.data.Dataset.from_tensors(({ 'x': tf.constant([[1], [1]], dtype=tf.dtypes.int32) }, tf.constant([[1], [1]], dtype=tf.dtypes.int64))) with self.assertRaisesRegexp(ValueError, 'Expected dtype'): sme.train(bad_dtype_input_fn, steps=1)
def test_with_working_input_fn(self): def model_fn(features, labels, mode): loss = None if labels is not None: loss = labels[0][0] + labels[1][0] return model_fn_lib.EstimatorSpec( mode, loss=loss, train_op=state_ops.assign_add(training.get_global_step(), 1), predictions={'features_0': array_ops.identity([features['x'][0][0]]), 'features_1': array_ops.identity([features['x'][1][0]])}) sme = saved_model_estimator.SavedModelEstimator( self._export_estimator(model_fn=model_fn), self._get_tmp_dir()) eval_results = sme.evaluate(dummy_input_fn, steps=1) self.assertEqual(1, eval_results['loss']) predictions = next(sme.predict(dummy_input_fn_features_only)) self.assertDictEqual({'features_0': 5, 'features_1': 6}, predictions)
def test_with_assets(self): filename = 'test_asset' tmpdir = tempfile.mkdtemp() absolute_filepath = os.path.join(tmpdir, filename) num_buckets = 1000 with open(absolute_filepath, 'w') as f: f.write(six.ensure_str(b'test')) def model_fn(features, labels, mode): _, _ = features, labels v = tf.Variable(0, name='some_var', dtype=tf.dtypes.int64) # We verify the value of filepath_tensor is replaced with a path to the # saved model's assets directory by assigning a hash of filepath_tensor # to some_var. filepath_tensor = ops.convert_to_tensor(absolute_filepath) tf.compat.v1.add_to_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS, filepath_tensor) scaffold = tf.compat.v1.train.Scaffold( local_init_op=tf.compat.v1.assign( v, tf.strings.to_hash_bucket_fast(filepath_tensor, num_buckets)).op) return model_fn_lib.EstimatorSpec( mode, scaffold=scaffold, train_op=tf.compat.v1.assign_add( tf.compat.v1.train.get_global_step(), 1), loss=tf.identity(0)) export_dir = self._export_estimator(predict=False, model_fn=model_fn) sme = saved_model_estimator.SavedModelEstimator( export_dir, self._get_tmp_dir()) with self.session() as sess: expected_bucket = sess.run( tf.strings.to_hash_bucket_fast( os.path.join( six.ensure_str(export_dir), six.ensure_str(tf.saved_model.ASSETS_DIRECTORY), six.ensure_str(filename)), num_buckets)) sme.train(dummy_input_fn, steps=1) self.assertEqual(expected_bucket, sme.get_variable_value('some_var'))
def test_saveable_resources(self): def model_fn(features, labels, mode): tb = lookup_ops.MutableHashTable(key_dtype=tf.dtypes.int32, value_dtype=tf.dtypes.int32, default_value=-1) predictions = tb.lookup(features['x']) train_op = None if mode == ModeKeys.TRAIN: train_op = tf.group( tb.insert(features['x'], labels), tf.compat.v1.assign_add( tf.compat.v1.train.get_global_step(), 1)) return model_fn_lib.EstimatorSpec(mode, loss=tf.constant(0), predictions=predictions, train_op=train_op) # Trains the model so that the table maps 1 -> 4, and -2 -> -3 # (see dummy_input_fn) sme = saved_model_estimator.SavedModelEstimator( self._export_estimator(model_fn=model_fn), self._get_tmp_dir()) def gen_input_fn(features, labels=None): def fn(): if labels: t = ({ 'x': tf.constant(features, name='feature_x') }, tf.constant(labels, name='truth')) else: t = {'x': tf.constant(features, name='feature_x')} return tf.compat.v1.data.Dataset.from_tensors(t).repeat() return fn self.assertAllEqual([-1], next(sme.predict(gen_input_fn([[5]])))['output']) self.assertAllEqual([4], next(sme.predict(gen_input_fn([[1]])))['output']) sme.train(gen_input_fn([[5]], [[6]]), steps=1) self.assertAllEqual([6], next(sme.predict(gen_input_fn([[5]])))['output'])