def test_export_strategies_reset(self): est = TestEstimator() export_strategy_1 = saved_model_export_utils.make_export_strategy( est, 'export_input_1', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics='eval_metrics', train_steps=100, eval_steps=100, export_strategies=[export_strategy_1]) ex.train_and_evaluate() self.assertEqual(1, est.export_count) # After reset with empty list (None), the count does not change and the user # provided export strategy list should remain intact. old_es = ex.reset_export_strategies() ex.train_and_evaluate() self.assertAllEqual([export_strategy_1], old_es) self.assertEqual(1, est.export_count) # After reset with list, the count should increase with the number of items. export_strategy_2 = saved_model_export_utils.make_export_strategy( est, 'export_input_2', exports_to_keep=None) export_strategy_3 = saved_model_export_utils.make_export_strategy( est, 'export_input_3', exports_to_keep=None) old_es = ex.reset_export_strategies([export_strategy_2, export_strategy_3]) ex.train_and_evaluate() self.assertAllEqual([], old_es) self.assertEqual(3, est.export_count)
def test_export_strategies_reset(self): est = TestEstimator() export_strategy_1 = saved_model_export_utils.make_export_strategy( est, 'export_input_1', exports_to_keep=None) ex = experiment.Experiment(est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics='eval_metrics', train_steps=100, eval_steps=100, export_strategies=[export_strategy_1]) ex.train_and_evaluate() self.assertEqual(1, est.export_count) # After reset with empty list (None), the count does not change and the user # provided export strategy list should remain intact. old_es = ex.reset_export_strategies() ex.train_and_evaluate() self.assertAllEqual([export_strategy_1], old_es) self.assertEqual(1, est.export_count) # After reset with list, the count should increase with the number of items. export_strategy_2 = saved_model_export_utils.make_export_strategy( est, 'export_input_2', exports_to_keep=None) export_strategy_3 = saved_model_export_utils.make_export_strategy( est, 'export_input_3', exports_to_keep=None) old_es = ex.reset_export_strategies( [export_strategy_2, export_strategy_3]) ex.train_and_evaluate() self.assertAllEqual([], old_es) self.assertEqual(3, est.export_count)
def _experiment_fn(output_dir): # num_epochs can control duration if train_steps isn't # passed to Experiment train_input = model.generate_input_fn( train_files, num_epochs=num_epochs, batch_size=train_batch_size, ) # Don't shuffle evaluation data eval_input = model.generate_input_fn(eval_files, batch_size=eval_batch_size, shuffle=False) return tf.contrib.learn.Experiment( model.build_estimator( output_dir, # Construct layers sizes with exponetial decay hidden_units=[ max(2, int(first_layer_size * scale_factor**i)) for i in range(num_layers) ]), train_input_fn=train_input, eval_input_fn=eval_input, # export strategies control the prediction graph structure # of exported binaries. export_strategies=[ saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], **experiment_args)
def test_train_and_evaluate(self): for est in self._estimators_for_tests(): eval_metrics = 'eval_metrics' if not isinstance( est, core_estimator.Estimator) else None noop_hook = _NoopHook() export_strategy = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_input', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics=eval_metrics, eval_hooks=[noop_hook], train_steps=100, eval_steps=100, export_strategies=export_strategy) ex.train_and_evaluate() self.assertEqual(1, est.fit_count) self.assertEqual(1, est.eval_count) self.assertEqual(1, est.export_count) self.assertEqual(1, len(est.monitors)) self.assertEqual([noop_hook], est.eval_hooks) self.assertTrue(isinstance(est.monitors[0], session_run_hook.SessionRunHook))
def test_continuous_train_and_eval_with_predicate_fn(self): for est in self._estimators_for_tests(eval_dict={'global_step': 100}): eval_metrics = 'eval_metrics' if not isinstance( est, core_estimator.Estimator) else None export_strategy = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_input', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics=eval_metrics, train_steps=100000000000, # a value will make `ex` never stops. eval_steps=100, export_strategies=export_strategy) def predicate_fn(eval_result): del eval_result # unused. for fn signature. return False ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn) self.assertEqual(0, est.fit_count) self.assertEqual(0, est.eval_count) self.assertEqual(1, est.export_count)
def _make_experiment_fn(output_dir): """Creates experiment for DNNBoostedTreeCombinedRegressor.""" (x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data() train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( x={"x": x_train}, y=y_train, batch_size=FLAGS.batch_size, num_epochs=None, shuffle=True) eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( x={"x": x_test}, y=y_test, num_epochs=1, shuffle=False) feature_columns = [ feature_column.real_valued_column("x", dimension=_BOSTON_NUM_FEATURES) ] feature_spec = tf.contrib.layers.create_feature_spec_for_parsing( feature_columns) serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec) export_strategies = [ saved_model_export_utils.make_export_strategy(serving_input_fn)] return tf.contrib.learn.Experiment( estimator=_get_estimator(output_dir, feature_columns), train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=None, eval_steps=FLAGS.num_eval_steps, eval_metrics=None, export_strategies=export_strategies)
def main(): args_parser = argparse.ArgumentParser() args = parameters.initialise_arguments(args_parser) parameters.HYPER_PARAMS = hparam.HParams(**args.__dict__) # Set python level verbosity tf.logging.set_verbosity(args.verbosity) # Set C++ Graph Execution level verbosity os.environ['TF_CPP_MIN_LOG_LEVEL'] = str( tf.logging.__dict__[args.verbosity] / 10) # Directory to store output model and checkpoints output_dir = args.job_dir # Run the training job learn_runner.run(experiment.generate_experiment_fn( min_eval_frequency=args.min_eval_frequency, eval_delay_secs=args.eval_delay_secs, train_steps=args.train_steps, eval_steps=args.eval_steps, export_strategies=[ saved_model_export_utils.make_export_strategy( serving.SERVING_FUNCTIONS[args.export_format], exports_to_keep=1, default_output_alternative_key=None, ) ]), run_config=run_config.RunConfig(model_dir=output_dir), hparams=parameters.HYPER_PARAMS)
def create_experiment_fn(output_dir=None): """Experiment function.""" distance_metric = (tf.contrib.factorization.COSINE_DISTANCE if FLAGS.use_cosine_distance else tf.contrib.factorization.SQUARED_EUCLIDEAN_DISTANCE) initial_clusters = (tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT if FLAGS.use_kmeans_plus_plus else tf.contrib.factorization.RANDOM_INIT) # Create estimator kmeans = kmeans_lib.KMeansClustering( FLAGS.num_clusters, model_dir=output_dir, initial_clusters=initial_clusters, distance_metric=distance_metric, use_mini_batch=True, relative_tolerance=FLAGS.relative_tolerance, config=tf.contrib.learn.RunConfig( save_checkpoints_secs=FLAGS.save_checkpoints_secs)) train_monitors = [] if FLAGS.debug: train_monitors.append(tf_debug.LocalCLIDebugHook()) return tf.contrib.learn.Experiment( estimator=kmeans, train_steps=FLAGS.num_train_steps, eval_steps=1, eval_input_fn=_input_fn, train_input_fn=_input_fn, train_monitors=train_monitors, export_strategies=[ saved_model_export_utils.make_export_strategy(_predict_input_fn, exports_to_keep=5) ])
def make_custom_export_strategy(name, convert_fn, feature_columns, export_input_fn): """Makes custom exporter of GTFlow tree format. Args: name: A string, for the name of the export strategy. convert_fn: A function that converts the tree proto to desired format and saves it to the desired location. Can be None to skip conversion. feature_columns: A list of feature columns. export_input_fn: A function that takes no arguments and returns an `InputFnOps`. Returns: An `ExportStrategy`. """ base_strategy = saved_model_export_utils.make_export_strategy( serving_input_fn=export_input_fn) input_fn = export_input_fn() (sorted_feature_names, dense_floats, sparse_float_indices, _, _, sparse_int_indices, _, _) = gbdt_batch.extract_features( input_fn.features, feature_columns) def export_fn(estimator, export_dir, checkpoint_path=None, eval_result=None): """A wrapper to export to SavedModel, and convert it to other formats.""" result_dir = base_strategy.export(estimator, export_dir, checkpoint_path, eval_result) with ops.Graph().as_default() as graph: with tf_session.Session(graph=graph) as sess: saved_model_loader.load( sess, [tag_constants.SERVING], result_dir) # Note: This is GTFlow internal API and might change. ensemble_model = graph.get_operation_by_name( "ensemble_model/TreeEnsembleSerialize") _, dfec_str = sess.run(ensemble_model.outputs) dtec = tree_config_pb2.DecisionTreeEnsembleConfig() dtec.ParseFromString(dfec_str) # Export the result in the same folder as the saved model. if convert_fn: convert_fn(dtec, sorted_feature_names, len(dense_floats), len(sparse_float_indices), len(sparse_int_indices), result_dir, eval_result) feature_importances = _get_feature_importances( dtec, sorted_feature_names, len(dense_floats), len(sparse_float_indices), len(sparse_int_indices)) sorted_by_importance = sorted( feature_importances.items(), key=lambda x: -x[1]) assets_dir = os.path.join(result_dir, "assets.extra") gfile.MakeDirs(assets_dir) with gfile.GFile(os.path.join(assets_dir, "feature_importances"), "w") as f: f.write("\n".join("%s, %f" % (k, v) for k, v in sorted_by_importance)) return result_dir return export_strategy.ExportStrategy( name, export_fn, strip_default_attrs=True)
def main(unused_argv): # Load training and eval data mnist = read_data_sets(FLAGS.data_dir, source_url=FLAGS.datasource_url) train_data = mnist.train.images # Returns np.array train_labels = np.asarray(mnist.train.labels, dtype=np.int32) eval_data = mnist.test.images # Returns np.array eval_labels = np.asarray(mnist.test.labels, dtype=np.int32) def serving_input_receiver_fn(): feature_tensor = tf.placeholder(tf.float32, [None, 784]) return tf.estimator.export.ServingInputReceiver({'x': feature_tensor}, {'x': feature_tensor}) learn_runner.run( generate_experiment_fn( min_eval_frequency=1, train_steps=FLAGS.num_steps, eval_steps=FLAGS.eval_steps, export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_receiver_fn, exports_to_keep=1 )] ), run_config = tf.contrib.learn.RunConfig().replace(model_dir=FLAGS.job_dir, save_checkpoints_steps=1000), hparams=hparam.HParams(dataset=mnist.train, eval_data=eval_data, eval_labels=eval_labels), )
def test_continuous_train_and_eval(self): for est in self._estimators_for_tests(eval_dict={'global_step': 100}): if isinstance(est, core_estimator.Estimator): eval_metrics = None saving_listeners = 'saving_listeners' else: eval_metrics = 'eval_metrics' saving_listeners = None noop_hook = _NoopHook() export_strategy = saved_model_export_utils.make_export_strategy( est, None if isinstance( est, core_estimator.Estimator) else 'export_input', exports_to_keep=None) ex = experiment.Experiment(est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics=eval_metrics, eval_hooks=[noop_hook], train_steps=100, eval_steps=100, export_strategies=export_strategy, saving_listeners=saving_listeners) ex.continuous_train_and_eval() self.assertEqual(1, est.fit_count) self.assertEqual(1, est.eval_count) self.assertEqual(1, est.export_count) self.assertEqual([noop_hook], est.eval_hooks)
def test_checkpoint_and_export(self): model_dir = tempfile.mkdtemp() config = run_config_lib.RunConfig(save_checkpoints_steps=3) est = dnn.DNNClassifier(n_classes=3, feature_columns=[ feature_column.real_valued_column( 'feature', dimension=4) ], hidden_units=[3, 3], model_dir=model_dir, config=config) exp_strategy = saved_model_export_utils.make_export_strategy( est, 'export_input', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn=test_data.iris_input_multiclass_fn, eval_input_fn=test_data.iris_input_multiclass_fn, export_strategies=(exp_strategy, ), train_steps=8, checkpoint_and_export=True, eval_delay_secs=0) with test.mock.patch.object(ex, '_maybe_export'): with test.mock.patch.object(ex, '_call_evaluate'): ex.train_and_evaluate() # Eval and export are called after steps 1, 4, 7, and 8 (after training # is completed). self.assertEqual(ex._maybe_export.call_count, 4) self.assertEqual(ex._call_evaluate.call_count, 4)
def _experiment_fn(output_dir): """Experiment function used by learn_runner to run training/eval/etc. Args: output_dir: String path of directory to use for outputs. Returns: tf.learn `Experiment`. """ estimator = tf.contrib.learn.Estimator(model_fn=_build_model_fn(), model_dir=output_dir) train_input_fn = _build_input_fn(input_file_pattern=train_file_pattern, batch_size=batch_size, mode=tf.contrib.learn.ModeKeys.TRAIN) eval_input_fn = _build_input_fn(input_file_pattern=eval_file_pattern, batch_size=batch_size, mode=tf.contrib.learn.ModeKeys.EVAL) return tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, train_steps=FLAGS.num_train_steps, eval_input_fn=eval_input_fn, eval_steps=FLAGS.num_eval_steps, eval_metrics=_create_evaluation_metrics(), min_eval_frequency=100, export_strategies=[ saved_model_export_utils.make_export_strategy( _predict_input_fn, exports_to_keep=5, default_output_alternative_key=DEFAULT_OUTPUT_ALTERNATIVE) ])
def experiment_fn(output_dir): # run experiment #train_monitors = tf.contrib.learn.monitors.ValidationMonitor(test_set.target, test_set.target,every_n_steps=5) #logging_hook = tf.train.LoggingTensorHook({"accuracy" : tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class')}, every_n_iter=10) return tflearn.Experiment( tflearn.Estimator(model_fn=cnn_model, model_dir=output_dir, config=tf.contrib.learn.RunConfig( save_checkpoints_steps=CHECKPOINT_STEPS, save_checkpoints_secs=None, save_summary_steps=SUMMARY_STEPS)), train_input_fn=get_train(), eval_input_fn=get_valid(), eval_metrics={ 'acc': tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class') }, checkpoint_and_export=True, train_monitors=None, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], train_steps=TRAIN_STEPS, eval_steps=EVAL_STEPS)
def main(unused_argv): # Load training and eval data mnist = read_data_sets(FLAGS.data_dir, source_url=FLAGS.datasource_url) train_data = mnist.train.images # Returns np.array train_labels = np.asarray(mnist.train.labels, dtype=np.int32) eval_data = mnist.test.images # Returns np.array eval_labels = np.asarray(mnist.test.labels, dtype=np.int32) def serving_input_receiver_fn(): feature_tensor = tf.placeholder(tf.float32, [None, 784]) return tf.estimator.export.ServingInputReceiver({'x': feature_tensor}, {'x': feature_tensor}) learn_runner.run( generate_experiment_fn( min_eval_frequency=1, train_steps=FLAGS.num_steps, eval_steps=FLAGS.eval_steps, export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_receiver_fn, exports_to_keep=1 )] ), run_config = tf.contrib.learn.RunConfig().replace(model_dir=FLAGS.job_dir, save_checkpoints_steps=1000), hparams=hparam.HParams(dataset=mnist.train, eval_data=eval_data, eval_labels=eval_labels), )
def test_continuous_train_and_eval(self): for est in self._estimators_for_tests(eval_dict={'global_step': 100}): if isinstance(est, core_estimator.Estimator): eval_metrics = None saving_listeners = 'saving_listeners' else: eval_metrics = 'eval_metrics' saving_listeners = None noop_hook = _NoopHook() export_strategy = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_input', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics=eval_metrics, eval_hooks=[noop_hook], train_steps=100, eval_steps=100, export_strategies=export_strategy, saving_listeners=saving_listeners) ex.continuous_train_and_eval() self.assertEqual(1, est.fit_count) self.assertEqual(1, est.eval_count) self.assertEqual(1, est.export_count) self.assertEqual([noop_hook], est.eval_hooks)
def make_custom_export_strategy(name, convert_fn, feature_columns, export_input_fn): """Makes custom exporter of GTFlow tree format. Args: name: A string, for the name of the export strategy. convert_fn: A function that converts the tree proto to desired format and saves it to the desired location. Can be None to skip conversion. feature_columns: A list of feature columns. export_input_fn: A function that takes no arguments and returns an `InputFnOps`. Returns: An `ExportStrategy`. """ base_strategy = saved_model_export_utils.make_export_strategy( serving_input_fn=export_input_fn) input_fn = export_input_fn() (sorted_feature_names, dense_floats, sparse_float_indices, _, _, sparse_int_indices, _, _) = gbdt_batch.extract_features(input_fn.features, feature_columns) def export_fn(estimator, export_dir, checkpoint_path=None, eval_result=None): """A wrapper to export to SavedModel, and convert it to other formats.""" result_dir = base_strategy.export(estimator, export_dir, checkpoint_path, eval_result) with ops.Graph().as_default() as graph: with tf_session.Session(graph=graph) as sess: saved_model_loader.load(sess, [tag_constants.SERVING], result_dir) # Note: This is GTFlow internal API and might change. ensemble_model = graph.get_operation_by_name( "ensemble_model/TreeEnsembleSerialize") _, dfec_str = sess.run(ensemble_model.outputs) dtec = tree_config_pb2.DecisionTreeEnsembleConfig() dtec.ParseFromString(dfec_str) # Export the result in the same folder as the saved model. if convert_fn: convert_fn(dtec, sorted_feature_names, len(dense_floats), len(sparse_float_indices), len(sparse_int_indices), result_dir, eval_result) feature_importances = _get_feature_importances( dtec, sorted_feature_names, len(dense_floats), len(sparse_float_indices), len(sparse_int_indices)) sorted_by_importance = sorted(feature_importances.items(), key=lambda x: -x[1]) assets_dir = os.path.join(result_dir, "assets.extra") gfile.MakeDirs(assets_dir) with gfile.GFile( os.path.join(assets_dir, "feature_importances"), "w") as f: f.write("\n".join("%s, %f" % (k, v) for k, v in sorted_by_importance)) return result_dir return export_strategy.ExportStrategy(name, export_fn)
def test_train_and_evaluate(self): for est in self._estimators_for_tests(): eval_metrics = 'eval_metrics' if not isinstance( est, core_estimator.Estimator) else None noop_hook = _NoopHook() export_strategy = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_input', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics=eval_metrics, eval_hooks=[noop_hook], train_steps=100, eval_steps=100, export_strategies=export_strategy) ex.train_and_evaluate() self.assertEqual(1, est.fit_count) self.assertEqual(1, est.eval_count) self.assertEqual(1, est.export_count) self.assertEqual(1, len(est.monitors)) self.assertEqual([noop_hook], est.eval_hooks) self.assertTrue(isinstance(est.monitors[0], session_run_hook.SessionRunHook))
def _make_experiment_fn(output_dir): """Creates experiment for DNNBoostedTreeCombinedRegressor.""" (x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data() train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( x={"x": x_train}, y=y_train, batch_size=FLAGS.batch_size, num_epochs=None, shuffle=True) eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( x={"x": x_test}, y=y_test, num_epochs=1, shuffle=False) feature_columns = [ feature_column.real_valued_column("x", dimension=_BOSTON_NUM_FEATURES) ] feature_spec = tf.contrib.layers.create_feature_spec_for_parsing( feature_columns) serving_input_fn = input_fn_utils.build_parsing_serving_input_fn( feature_spec) export_strategies = [ saved_model_export_utils.make_export_strategy(serving_input_fn) ] return tf.contrib.learn.Experiment(estimator=_get_estimator( output_dir, feature_columns), train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=None, eval_steps=FLAGS.num_eval_steps, eval_metrics=None, export_strategies=export_strategies)
def test_continuous_train_and_eval_with_predicate_fn(self): for est in self._estimators_for_tests(eval_dict={'global_step': 100}): eval_metrics = 'eval_metrics' if not isinstance( est, core_estimator.Estimator) else None export_strategy = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_input', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics=eval_metrics, train_steps=100000000000, # a value will make `ex` never stops. eval_steps=100, export_strategies=export_strategy) def predicate_fn(eval_result): del eval_result # unused. for fn signature. return False ex.continuous_train_and_eval(continuous_eval_predicate_fn=predicate_fn) self.assertEqual(0, est.fit_count) self.assertEqual(0, est.eval_count) self.assertEqual(0, est.export_count)
def test_checkpoint_and_export(self): model_dir = tempfile.mkdtemp() config = run_config_lib.RunConfig(save_checkpoints_steps=3) est = dnn.DNNClassifier( n_classes=3, feature_columns=[ feature_column.real_valued_column('feature', dimension=4) ], hidden_units=[3, 3], model_dir=model_dir, config=config) exp_strategy = saved_model_export_utils.make_export_strategy( est, 'export_input', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn=test_data.iris_input_multiclass_fn, eval_input_fn=test_data.iris_input_multiclass_fn, export_strategies=(exp_strategy,), train_steps=8, checkpoint_and_export=True, eval_delay_secs=0) with test.mock.patch.object(ex, '_maybe_export'): with test.mock.patch.object(ex, '_call_evaluate'): ex.train_and_evaluate() # Eval and export are called after steps 1, 4, 7, and 8 (after training # is completed). self.assertEqual(ex._maybe_export.call_count, 4) self.assertEqual(ex._call_evaluate.call_count, 4)
def _export_strategy(): if self.saves_training(): return [saved_model_export_utils.make_export_strategy( serving_input_fn=_serving_input_fn, default_output_alternative_key=None, exports_to_keep=1)] logger.warn("serving_input_fn not specified, model NOT saved, use checkpoints to reconstruct") return None
def _export_strategy(): if self.saves_training(): return [saved_model_export_utils.make_export_strategy( serving_input_fn=_serving_input_fn, default_output_alternative_key=None, exports_to_keep=1)] logger.warn("serving_input_fn not specified, model NOT saved, use checkpoints to reconstruct") return None
def test_export_strategies_reset(self): for est in self._estimators_for_tests(): eval_metrics = 'eval_metrics' if not isinstance( est, core_estimator.Estimator) else None export_strategy_1 = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_1', exports_to_keep=None) ex = experiment.Experiment(est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics=eval_metrics, train_steps=100, eval_steps=100, export_strategies=(export_strategy_1, )) ex.train_and_evaluate() self.assertEqual(1, est.export_count) # After reset with empty list (None), the count does not change and the # user provided export strategy list should remain intact. old_es = ex.reset_export_strategies() ex.train_and_evaluate() self.assertAllEqual([export_strategy_1], old_es) self.assertEqual(1, est.export_count) # After reset with list, the count should increase with the number of # items. export_strategy_2 = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_2', exports_to_keep=None) export_strategy_3 = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_3', exports_to_keep=None) old_es = ex.reset_export_strategies( [export_strategy_2, export_strategy_3]) ex.train_and_evaluate() self.assertAllEqual([], old_es) self.assertEqual(3, est.export_count)
def test_export_strategies_reset(self): for est in self._estimators_for_tests(): eval_metrics = 'eval_metrics' if not isinstance( est, core_estimator.Estimator) else None export_strategy_1 = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_1', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics=eval_metrics, train_steps=100, eval_steps=100, export_strategies=(export_strategy_1,)) ex.train_and_evaluate() self.assertEqual(1, est.export_count) # After reset with empty list (None), the count does not change and the # user provided export strategy list should remain intact. old_es = ex.reset_export_strategies() ex.train_and_evaluate() self.assertAllEqual([export_strategy_1], old_es) self.assertEqual(1, est.export_count) # After reset with list, the count should increase with the number of # items. export_strategy_2 = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_2', exports_to_keep=None) export_strategy_3 = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_3', exports_to_keep=None) old_es = ex.reset_export_strategies( [export_strategy_2, export_strategy_3]) ex.train_and_evaluate() self.assertAllEqual([], old_es) self.assertEqual(3, est.export_count)
def test_test(self): for est in self._estimators_for_tests(): exp_strategy = saved_model_export_utils.make_export_strategy( est, 'export_input', exports_to_keep=None) ex = experiment.Experiment(est, train_input_fn='train_input', eval_input_fn='eval_input', export_strategies=[exp_strategy]) ex.test() self.assertEqual(1, est.fit_count) self.assertEqual(1, est.eval_count) self.assertEqual(1, est.export_count)
def test_make_export_strategy(self): """Only tests that an ExportStrategy instance is created.""" def _serving_input_fn(): return array_ops.constant([1]), None export_strategy = saved_model_export_utils.make_export_strategy( serving_input_fn=_serving_input_fn, default_output_alternative_key="default", assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=5) self.assertTrue( isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def _experiment_fn(output_dir): my_model = build_estimator(output_dir) experiment = tf.contrib.learn.Experiment( my_model, train_input_fn=generate_input_fn(df_train), eval_input_fn=generate_input_fn(df_test), train_steps=FLAGS.steps, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None) ]) return experiment
def test_make_export_strategy(self): """Only tests that an ExportStrategy instance is created.""" def _serving_input_fn(): return array_ops.constant([1]), None export_strategy = saved_model_export_utils.make_export_strategy( serving_input_fn=_serving_input_fn, default_output_alternative_key="default", assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=5) self.assertTrue( isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def experiment_fn(output_dir): PADWORD = '[PAD]' MAX_DOCUMENT_LENGTH = 3 titles = [ 'Biodegradable Bags Cause Outrage in Italy', 'Tom Brady denies key points of ESPN Patriots article', 'Aldi to open first Kingwood store', PADWORD ] labels = ['International', 'Sport', 'Business'] TARGETS = tf.constant(["International", "Sport", "Business"]) words = tf.sparse_tensor_to_dense(tf.string_split(titles), default_value=PADWORD) vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor( MAX_DOCUMENT_LENGTH) vocab_processor.fit(titles) outfilename = "/Users/eliapalme/Newsriver/Newsriver-classifier/training/vocabfile.vcb" vocab_processor.save(outfilename) nwords = len(vocab_processor.vocabulary_) ## Transform the documents using the vocabulary. XX = np.array(list(vocab_processor.fit_transform(titles))) # make targets numeric table = tf.contrib.lookup.index_table_from_tensor(mapping=TARGETS, num_oov_buckets=1, default_value=-1) features = tf.constant(["International", "Sport", "Business"]) targetX = table.lookup(features) return tflearn.Experiment( tflearn.Estimator(model_fn=cnn_model, model_dir=output_dir), train_input_fn=XX, eval_input_fn=targetX, eval_metrics={ 'acc': tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class') }, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], train_steps=TRAIN_STEPS)
def test_test(self): est = TestEstimator() exp_strategy = saved_model_export_utils.make_export_strategy( est, 'export_input', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', export_strategies=[exp_strategy]) ex.test() self.assertEqual(1, est.fit_count) self.assertEqual(1, est.eval_count) self.assertEqual(1, est.export_count)
def test_test(self): for est in self._estimators_for_tests(): exp_strategy = saved_model_export_utils.make_export_strategy( est, 'export_input', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', export_strategies=(exp_strategy,)) ex.test() self.assertEqual(1, est.fit_count) self.assertEqual(1, est.eval_count) self.assertEqual(1, est.export_count)
def _experiment_fn(output_dir): return Experiment( models.build_estimator(output_dir), train_input_fn=models.get_input_fn( filename=os.path.join(data_dir, 'train.tfrecords')), eval_input_fn=models.get_input_fn( filename=os.path.join(data_dir, 'test.tfrecords')), export_strategies=[saved_model_export_utils.make_export_strategy( models.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1)], train_steps=train_steps, eval_steps=eval_steps, **experiment_args )
def test_default_output_alternative_key_core_estimator(self): est = TestCoreEstimator() export_strategy = saved_model_export_utils.make_export_strategy( est, default_output_alternative_key='export_key', exports_to_keep=None) ex = experiment.Experiment(est, train_input_fn='train_input', eval_input_fn='eval_input', train_steps=100, eval_steps=100, export_strategies=export_strategy) with self.assertRaisesRegexp( ValueError, 'default_output_alternative_key is not supported'): ex.train_and_evaluate()
def _experiment_fn(output_dir): train_input_fn = generate_input_fn(train_file) eval_input_fn = generate_input_fn(test_file) my_model = build_estimator(model_type=model_type, model_dir=output_dir) experiment = tf.contrib.learn.Experiment( my_model, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=1000, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None) ]) return experiment
def train_and_evaluate(output_dir): review_column = feature_column.sparse_column_with_integerized_feature( const.REVIEW_COLUMN, bucket_size=vocab_size + 1, combiner='sum') weighted_reviews = feature_column.weighted_sparse_column( review_column, const.REVIEW_WEIGHT) estimator = learn.LinearClassifier( feature_columns=[weighted_reviews], n_classes=2, model_dir=output_dir, config=tf.contrib.learn.RunConfig(save_checkpoints_secs=30)) transformed_metadata = metadata_io.read_metadata( transformed_metadata_dir) raw_metadata = metadata_io.read_metadata(raw_metadata_dir) train_input_fn = input_fn_maker.build_training_input_fn( transformed_metadata, transformed_train_file_pattern, training_batch_size=train_batch_size, label_keys=[const.LABEL_COLUMN]) eval_input_fn = input_fn_maker.build_training_input_fn( transformed_metadata, transformed_test_file_pattern, training_batch_size=1, label_keys=[const.LABEL_COLUMN]) serving_input_fn = input_fn_maker.build_default_transforming_serving_input_fn( raw_metadata=raw_metadata, transform_savedmodel_dir=output_dir + '/transform_fn', raw_label_keys=[], raw_feature_keys=[const.REVIEW_COLUMN]) export_strategy = saved_model_export_utils.make_export_strategy( serving_input_fn, exports_to_keep=5, default_output_alternative_key=None) return tf.contrib.learn.Experiment(estimator=estimator, train_steps=train_num_epochs * num_train_instances / train_batch_size, eval_steps=num_test_instances, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, export_strategies=export_strategy, min_eval_frequency=500)
def test_default_output_alternative_key_core_estimator(self): est = TestCoreEstimator() export_strategy = saved_model_export_utils.make_export_strategy( est, default_output_alternative_key='export_key', exports_to_keep=None) ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', train_steps=100, eval_steps=100, export_strategies=export_strategy) with self.assertRaisesRegexp( ValueError, 'default_output_alternative_key is not supported'): ex.train_and_evaluate()
def experiment_fn(output_dir): return tflearn.Experiment( tflearn.Estimator(model_fn=simple_rnn, model_dir=output_dir), train_input_fn=get_train(), eval_input_fn=get_valid(), eval_metrics={ 'rmse': tflearn.MetricSpec( metric_fn=metrics.streaming_root_mean_squared_error) }, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ])
def _experiment_fn(output_dir): return Experiment( model.build_estimator(output_dir, model_type, learning_rate), train_input_fn=input_pipe.get_input_fn("train", data_dir, **input_pipe_settings), eval_input_fn=input_pipe.get_input_fn("eval", data_dir, **input_pipe_settings), train_steps=train_steps, eval_steps=eval_steps, export_strategies=[ saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], **other_experiment_args)
def experiment_fn(output_dir): # run experiment return tflearn.Experiment( tflearn.Estimator(model_fn=cnn_model, model_dir=output_dir), train_input_fn=get_train(), eval_input_fn=get_valid(), eval_metrics={ 'acc': tflearn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key='class') }, export_strategies=[ saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ])
def _experiment_fn(output_dir): return tflearn.Experiment( get_model(output_dir, nbuckets, hidden_units, learning_rate), train_input_fn=read_dataset(traindata, mode=tf.contrib.learn.ModeKeys.TRAIN, num_training_epochs=num_training_epochs, batch_size=batch_size), eval_input_fn=read_dataset(evaldata), export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], eval_metrics = { 'rmse' : tflearn.MetricSpec(metric_fn=my_rmse, prediction_key='probabilities'), 'training/hptuning/metric' : tflearn.MetricSpec(metric_fn=my_rmse, prediction_key='probabilities') }, min_eval_frequency = 100, **args )
def experiment_fn(output_dir): wide, deep = get_wide_deep() return tf.contrib.learn.Experiment( tf.estimator.DNNLinearCombinedRegressor(model_dir=output_dir, linear_feature_columns=wide, dnn_feature_columns=deep, dnn_hidden_units=[64, 32]), train_input_fn=read_dataset('train', PATTERN), eval_input_fn=read_dataset('eval', PATTERN), export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], train_steps=TRAIN_STEPS, eval_steps=None )
def main(): args_parser = argparse.ArgumentParser() args = parameters.initialise_arguments(args_parser) parameters.HYPER_PARAMS = hparam.HParams(**args.__dict__) # Set python level verbosity tf.logging.set_verbosity(args.verbosity) # Set C++ Graph Execution level verbosity os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf.logging.__dict__[args.verbosity] / 10) # Directory to store output model and checkpoints output_dir = args.job_dir # If job_dir_reuse is False then remove the job_dir if it exists if not args.reuse_job_dir: if tf.gfile.Exists(args.job_dir): tf.gfile.DeleteRecursively(args.job_dir) tf.logging.info("Deleted job_dir {} to avoid re-use".format(args.job_dir)) else: tf.logging.info("No job_dir available to delete") else: tf.logging.info("Reusing job_dir {} if it exists".format(args.job_dir)) # Run the training experiment learn_runner.run( experiment.generate_experiment_fn( min_eval_frequency=args.min_eval_frequency, eval_delay_secs=args.eval_delay_secs, train_steps=args.train_steps, eval_steps=args.eval_steps, export_strategies=[saved_model_export_utils.make_export_strategy( serving.SERVING_FUNCTIONS[args.export_format], exports_to_keep=1, default_output_alternative_key=None, )] ), run_config=tf.contrib.learn.RunConfig( model_dir=output_dir, log_device_placement=True ), schedule="train_and_evaluate", hparams=parameters.HYPER_PARAMS )
def test_train_and_evaluate(self): est = TestEstimator() export_strategy = saved_model_export_utils.make_export_strategy( est, 'export_input') ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', eval_metrics='eval_metrics', train_steps=100, eval_steps=100, export_strategies=export_strategy) ex.train_and_evaluate() self.assertEquals(1, est.fit_count) self.assertEquals(1, est.eval_count) self.assertEquals(1, est.export_count) self.assertEquals(1, len(est.monitors)) self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
def _experiment_fn(output_dir): train_input_fn = generate_input_fn(train_file) eval_input_fn = generate_input_fn(test_file) my_model = build_estimator(model_type=model_type, model_dir=output_dir) experiment = tf.contrib.learn.Experiment( my_model, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, train_steps=1000 , export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None )] ) return experiment
def experiment_fn(output_dir): # run experiment return tflearn.Experiment( tflearn.Estimator(model_fn=cnn_model, model_dir=output_dir), train_input_fn=get_train(), eval_input_fn=get_valid(), eval_metrics={ 'acc': tflearn.MetricSpec( metric_fn=metrics.streaming_accuracy, prediction_key='class' ) }, export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], train_steps = TRAIN_STEPS )
def _experiment_fn(output_dir): train_input = model.generate_input_fn(train_file, num_epochs=num_epochs, batch_size=train_batch_size) eval_input = model.generate_input_fn(eval_file, batch_size=eval_batch_size) return Experiment(model.build_estimator(job_dir, embedding_size=embedding_size, hidden_units=hidden_units), train_input_fn=train_input, eval_input_fn=eval_input, export_strategies=[ saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1) ], **experiment_args)
def make_custom_export_strategy(name, convert_fn, feature_columns, export_input_fn): """Makes custom exporter of GTFlow tree format. Args: name: A string, for the name of the export strategy. convert_fn: A function that converts the tree proto to desired format and saves it to the desired location. feature_columns: A list of feature columns. export_input_fn: A function that takes no arguments and returns an `InputFnOps`. Returns: An `ExportStrategy`. """ base_strategy = saved_model_export_utils.make_export_strategy( serving_input_fn=export_input_fn) input_fn = export_input_fn() (sorted_feature_names, dense_floats, sparse_float_indices, _, _, sparse_int_indices, _, _) = gbdt_batch.extract_features( input_fn.features, feature_columns) def export_fn(estimator, export_dir, checkpoint_path=None, eval_result=None): """A wrapper to export to SavedModel, and convert it to other formats.""" result_dir = base_strategy.export(estimator, export_dir, checkpoint_path, eval_result) with ops.Graph().as_default() as graph: with tf_session.Session(graph=graph) as sess: saved_model_loader.load( sess, [tag_constants.SERVING], result_dir) # Note: This is GTFlow internal API and might change. ensemble_model = graph.get_operation_by_name( "ensemble_model/TreeEnsembleSerialize") _, dfec_str = sess.run(ensemble_model.outputs) dtec = tree_config_pb2.DecisionTreeEnsembleConfig() dtec.ParseFromString(dfec_str) # Export the result in the same folder as the saved model. convert_fn(dtec, sorted_feature_names, len(dense_floats), len(sparse_float_indices), len(sparse_int_indices), result_dir, eval_result) return result_dir return export_strategy.ExportStrategy(name, export_fn)
def experiment_fn(output_dir): get_train = model.read_dataset(train_data_paths, mode=tf.contrib.learn.ModeKeys.TRAIN) get_valid = model.read_dataset(eval_data_paths, mode=tf.contrib.learn.ModeKeys.EVAL) # run experiment return tflearn.Experiment( tflearn.Estimator(model_fn=model.simple_rnn, model_dir=output_dir), train_input_fn=get_train, eval_input_fn=get_valid, eval_metrics={ 'rmse': tflearn.MetricSpec( metric_fn=tf.contrib.metrics.streaming_root_mean_squared_error ) }, export_strategies=[saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], **experiment_args )
def experiment_fn(output_dir): wide, deep = get_wide_deep() return tflearn.Experiment( tflearn.DNNLinearCombinedRegressor(model_dir=output_dir, linear_feature_columns=wide, dnn_feature_columns=deep, dnn_hidden_units=[64, 32]), train_input_fn=read_dataset('Training'), eval_input_fn=read_dataset('Evaluation'), eval_metrics={ 'rmse': tflearn.MetricSpec( metric_fn=metrics.streaming_root_mean_squared_error ) }, export_strategies=[saved_model_export_utils.make_export_strategy( serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], train_steps=TRAIN_STEPS )
def experiment_fn(output_dir): get_train = model.read_dataset(hparams['train_data_paths'], tf.estimator.ModeKeys.TRAIN, hparams['train_batch_size']) get_valid = model.read_dataset(hparams['eval_data_paths'], tf.estimator.ModeKeys.EVAL, 1000) eval_freq = max(1, min(2000, hparams['train_steps']/5)) return tf.contrib.learn.Experiment( estimator=create_custom_estimator(output_dir, hparams), train_input_fn=get_train, eval_input_fn=get_valid, train_steps=hparams['train_steps'], eval_steps=1, min_eval_frequency=eval_freq, export_strategies=[saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )] )
def _experiment_fn(output_dir): input_fn = model.generate_csv_input_fn train_input = input_fn( train_data_paths, num_epochs=num_epochs, batch_size=train_batch_size) eval_input = input_fn( eval_data_paths, batch_size=eval_batch_size, mode=tf.contrib.learn.ModeKeys.EVAL) return Experiment( model.build_estimator( output_dir, hidden_units=hidden_units ), train_input_fn=train_input, eval_input_fn=eval_input, export_strategies=[saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], eval_metrics=model.get_eval_metrics(), #min_eval_frequency = 1000, # change this to speed up training on large datasets **experiment_args )
def test_test(self): for est in self._estimators_for_tests(): exp_strategy = saved_model_export_utils.make_export_strategy( est, None if isinstance(est, core_estimator.Estimator) else 'export_input', exports_to_keep=None) if isinstance(est, core_estimator.Estimator): eval_metrics = None saving_listeners = 'saving_listeners' else: eval_metrics = 'eval_metrics' saving_listeners = None ex = experiment.Experiment( est, train_input_fn='train_input', eval_input_fn='eval_input', export_strategies=(exp_strategy,), eval_metrics=eval_metrics, saving_listeners=saving_listeners) ex.test() self.assertEqual(1, est.fit_count) self.assertEqual(1, est.eval_count) self.assertEqual(1, est.export_count)
choices=['JSON', 'CSV', 'EXAMPLE'], default='JSON' ) args = parser.parse_args() # Set python level verbosity tf.logging.set_verbosity(args.verbosity) # Set C++ Graph Execution level verbosity os.environ['TF_CPP_MIN_LOG_LEVEL'] = str( tf.logging.__dict__[args.verbosity] / 10) # Run the training job # learn_runner pulls configuration information from environment # variables using tf.learn.RunConfig and uses this configuration # to conditionally execute Experiment, or param server code learn_runner.run( generate_experiment_fn( min_eval_frequency=args.min_eval_frequency, eval_delay_secs=args.eval_delay_secs, train_steps=args.train_steps, eval_steps=args.eval_steps, export_strategies=[saved_model_export_utils.make_export_strategy( model.SERVING_FUNCTIONS[args.export_format], exports_to_keep=1 )] ), run_config=tf.contrib.learn.RunConfig(model_dir=args.job_dir), hparams=hparam.HParams(**args.__dict__) )
# Compatibility warning: this will move to tf.estimator.EstimatorSpec in TF 1.2 return tf.contrib.learn.ModelFnOps( mode=mode, predictions={"predictions": predict, "classes": classes}, # name these fields as you like loss=loss, train_op=train_op, eval_metric_ops=eval_metrics ) # Compatibility warning: this will move to tf.estimator.run_config.RunConfing in TF 1.2 training_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=None, save_checkpoints_steps=1000) # This will export a model at every checkpoint, including the transformations needed for online predictions. # Bug: exports_to_keep=None is mandatory otherwise training crashes. # Compatibility warning: make_export_strategy is currently in contrib. It will move in TF 1.2 export_strategy=saved_model_export_utils.make_export_strategy(serving_input_fn=serving_input_fn, exports_to_keep=None) # The Experiment is an Estimator with data loading functions and other parameters def experiment_fn_with_params(output_dir, data_dir, **kwargs): ITERATIONS = 10000 mnist = mnist_data.read_data_sets(data_dir, reshape=True, one_hot=False, validation_size=0) # loads training and eval data in memory # Compatibility warning: Experiment will move return tf.contrib.learn.Experiment( # Compatibility warning: this will move to tf.estimator.Estimator estimator=tf.contrib.learn.Estimator(model_fn=conv_model, model_dir=output_dir, config=training_config), train_input_fn=lambda: train_data_input_fn(mnist), eval_input_fn=lambda: eval_data_input_fn(mnist), train_steps=ITERATIONS, eval_steps=1, min_eval_frequency=1000,
eval_metrics = conv_model_eval_metrics(classes, Y_, mode) return learn.ModelFnOps( mode=mode, # You can name the fields of your predictions dictionary as you like. predictions={"predictions": predict, "classes": classes}, loss=loss, train_op=train_op, eval_metric_ops=eval_metrics ) # Configuration to save a checkpoint every 1000 steps. training_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=None, save_checkpoints_steps=1000, gpu_memory_fraction=0.9) # This will export a model at every checkpoint, including the transformations needed for online predictions. export_strategy=saved_model_export_utils.make_export_strategy(export_input_fn=serving_input_fn) # The Experiment is an Estimator with data loading functions and other parameters def experiment_fn_with_params(output_dir, data, **kwargs): ITERATIONS = 10000 mnist = input_data.read_data_sets(data) # loads training and eval data in memory return learn.Experiment( estimator=learn.Estimator(model_fn=conv_model, model_dir=output_dir, config=training_config), train_input_fn=lambda: train_data_input_fn(mnist), eval_input_fn=lambda: eval_data_input_fn(mnist), train_steps=ITERATIONS, eval_steps=1, export_strategies=export_strategy )
args = parser.parse_args() # Set python level verbosity tf.logging.set_verbosity(args.verbosity) # Set C++ Graph Execution level verbosity os.environ['TF_CPP_MIN_LOG_LEVEL'] = str( tf.logging.__dict__[args.verbosity] / 10) # [START learn-runner] # Run the training job # learn_runner pulls configuration information from environment # variables using tf.learn.RunConfig and uses this configuration # to conditionally execute Experiment, or param server code learn_runner.run( generate_experiment_fn( min_eval_frequency=args.min_eval_frequency, eval_delay_secs=args.eval_delay_secs, train_steps=args.train_steps, eval_steps=args.eval_steps, export_strategies=[saved_model_export_utils.make_export_strategy( model.SERVING_FUNCTIONS[args.export_format], exports_to_keep=1, default_output_alternative_key=None, )] ), run_config=run_config.RunConfig(model_dir=args.job_dir), hparams=hparam.HParams(**args.__dict__) ) # [END learn-runner]