def train_and_evaluate_estimator(): """Runs Estimator distributed training.""" # The tf.estimator.RunConfig automatically parses the TF_CONFIG environment # variables during construction. # For more information on how tf.estimator.RunConfig uses TF_CONFIG, see # https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig. config = tf.estimator.RunConfig(tf_random_seed=42, model_dir=os.environ["MODEL_DIR"]) head = tf.contrib.estimator.regression_head( loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE) subnetwork_generator = SimpleGenerator([ _DNNBuilder("dnn1", config, layer_size=3), _DNNBuilder("dnn2", config, layer_size=4), _DNNBuilder("dnn3", config, layer_size=5), ]) estimator = Estimator( head=head, subnetwork_generator=subnetwork_generator, max_iteration_steps=100, force_grow=True, delay_secs_per_worker=.2, max_worker_delay_secs=1, worker_wait_secs=.5, # Set low timeout to reduce wait time for failures. worker_wait_timeout_secs=60, config=config) def input_fn(): xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] xor_labels = [[1.], [0.], [1.], [0.]] input_features = {"x": tf.constant(xor_features, name="x")} input_labels = tf.constant(xor_labels, name="y") return input_features, input_labels # Train for three iterations. train_spec = tf.estimator.TrainSpec(input_fn=input_fn, max_steps=500) eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1) # Calling train_and_evaluate is the official way to perform distributed # training with an Estimator. Calling Estimator#train directly results # in an error when the TF_CONFIG is setup for a cluster. tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def train_and_evaluate_estimator(): """Runs Estimator distributed training.""" # The tf.estimator.RunConfig automatically parses the TF_CONFIG environment # variables during construction. # For more information on how tf.estimator.RunConfig uses TF_CONFIG, see # https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig. config = tf.estimator.RunConfig( tf_random_seed=42, save_checkpoints_steps=10, save_checkpoints_secs=None, # Keep all checkpoints to avoid checkpoint GC causing failures during # evaluation. # TODO: Prevent checkpoints that are currently being # evaluated by another process from being garbage collected. keep_checkpoint_max=None, model_dir=FLAGS.model_dir, session_config=tf_compat.v1.ConfigProto( log_device_placement=False, # Ignore other workers; only talk to parameter servers. # Otherwise, when a chief/worker terminates, the others will hang. device_filters=["/job:ps"])) def input_fn(): input_features = {"x": tf.constant(features, name="x")} input_labels = tf.constant(labels, name="y") return tf.data.Dataset.from_tensors((input_features, input_labels)).repeat() kwargs = { "max_iteration_steps": 100, "force_grow": True, "delay_secs_per_worker": .2, "max_worker_delay_secs": 1, "worker_wait_secs": 1, # Set low timeout to reduce wait time for failures. "worker_wait_timeout_secs": 180, "evaluator": Evaluator(input_fn, steps=10), "config": config } head = head_lib._regression_head( # pylint: disable=protected-access loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE) features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] labels = [[1.], [0.], [1.], [0.]] estimator_type = FLAGS.estimator_type if FLAGS.placement_strategy == "round_robin": kwargs["experimental_placement_strategy"] = RoundRobinStrategy() if estimator_type == "autoensemble": feature_columns = [tf.feature_column.numeric_column("x", shape=[2])] # pylint: disable=g-long-lambda # TODO: Switch optimizers to tf.keras.optimizers.Adam once the # distribution bug is fixed. candidate_pool = { "linear": tf.estimator.LinearEstimator( head=head, feature_columns=feature_columns, optimizer=lambda: tf_compat.v1.train.AdamOptimizer( learning_rate=.001)), "dnn": tf.estimator.DNNEstimator( head=head, feature_columns=feature_columns, optimizer=lambda: tf_compat.v1.train.AdamOptimizer( learning_rate=.001), hidden_units=[3]), "dnn2": tf.estimator.DNNEstimator( head=head, feature_columns=feature_columns, optimizer=lambda: tf_compat.v1.train.AdamOptimizer( learning_rate=.001), hidden_units=[10, 10]), } # pylint: enable=g-long-lambda estimator = AutoEnsembleEstimator( head=head, candidate_pool=candidate_pool, **kwargs) elif estimator_type == "estimator": subnetwork_generator = SimpleGenerator([ _DNNBuilder("dnn1", config, layer_size=3), _DNNBuilder("dnn2", config, layer_size=4), _DNNBuilder("dnn3", config, layer_size=5), ]) estimator = Estimator( head=head, subnetwork_generator=subnetwork_generator, **kwargs) elif FLAGS.estimator_type == "autoensemble_trees_multiclass": if not bt_losses: logging.warning( "Skipped autoensemble_trees_multiclass test since contrib is missing." ) return n_classes = 3 head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access n_classes=n_classes, loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE) def tree_loss_fn(labels, logits): result = bt_losses.per_example_maxent_loss( labels=labels, logits=logits, num_classes=n_classes, weights=None) return result[0] tree_head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access loss_fn=tree_loss_fn, n_classes=n_classes, loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE) labels = [[1], [0], [1], [2]] feature_columns = [tf.feature_column.numeric_column("x", shape=[2])] # TODO: Switch optimizers to tf.keras.optimizers.Adam once the # distribution bug is fixed. candidate_pool = lambda config: { # pylint: disable=g-long-lambda "linear": tf.estimator.LinearEstimator( head=head, feature_columns=feature_columns, optimizer=tf_compat.v1.train.AdamOptimizer( learning_rate=.001), config=config), "gbdt": tf.estimator.BoostedTreesEstimator( head=tree_head, feature_columns=feature_columns, n_trees=10, n_batches_per_layer=1, center_bias=False, config=config), } estimator = AutoEnsembleEstimator( head=head, candidate_pool=candidate_pool, **kwargs) elif estimator_type == "estimator_with_experimental_multiworker_strategy": def _model_fn(features, labels, mode): """Test model_fn.""" layer = tf.keras.layers.Dense(1) logits = layer(features["x"]) if mode == tf.estimator.ModeKeys.PREDICT: predictions = {"logits": logits} return tf.estimator.EstimatorSpec(mode, predictions=predictions) loss = tf.losses.mean_squared_error( labels=labels, predictions=logits, reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE) if mode == tf.estimator.ModeKeys.EVAL: return tf.estimator.EstimatorSpec(mode, loss=loss) if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.GradientDescentOptimizer(0.2) train_op = optimizer.minimize( loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) if json.loads(os.environ["TF_CONFIG"])["task"]["type"] == "evaluator": # The evaluator job would crash if MultiWorkerMirroredStrategy is called. distribution = None else: distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy() multiworker_config = tf.estimator.RunConfig( tf_random_seed=42, model_dir=FLAGS.model_dir, train_distribute=distribution, session_config=tf_compat.v1.ConfigProto(log_device_placement=False)) # TODO: Replace with adanet.Estimator. Currently this just verifies # that the distributed testing framework supports distribute strategies. estimator = tf.estimator.Estimator( model_fn=_model_fn, config=multiworker_config) train_hooks = [ tf.estimator.ProfilerHook(save_steps=50, output_dir=FLAGS.model_dir) ] # Train for three iterations. train_spec = tf.estimator.TrainSpec( input_fn=input_fn, max_steps=300, hooks=train_hooks) eval_spec = tf.estimator.EvalSpec( input_fn=input_fn, steps=1, start_delay_secs=.5, throttle_secs=.05) # Calling train_and_evaluate is the official way to perform distributed # training with an Estimator. Calling Estimator#train directly results # in an error when the TF_CONFIG is setup for a cluster. tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def train_and_evaluate_estimator(): """Runs Estimator distributed training.""" # The tf.estimator.RunConfig automatically parses the TF_CONFIG environment # variables during construction. # For more information on how tf.estimator.RunConfig uses TF_CONFIG, see # https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig. config = tf.estimator.RunConfig( tf_random_seed=42, model_dir=FLAGS.model_dir, session_config=tf.ConfigProto( log_device_placement=False, # Ignore other workers; only talk to parameter servers. # Otherwise, when a chief/worker terminates, the others will hang. device_filters=["/job:ps"])) head = tf.contrib.estimator.regression_head( loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE) kwargs = { "max_iteration_steps": 100, "force_grow": True, "delay_secs_per_worker": .2, "max_worker_delay_secs": 1, "worker_wait_secs": .5, # Set low timeout to reduce wait time for failures. "worker_wait_timeout_secs": 60, "config": config } if FLAGS.estimator_type == "autoensemble": feature_columns = [tf.feature_column.numeric_column("x", shape=[2])] if hasattr(tf.estimator, "LinearEstimator"): linear_estimator_fn = tf.estimator.LinearEstimator else: linear_estimator_fn = tf.contrib.estimator.LinearEstimator if hasattr(tf.estimator, "DNNEstimator"): dnn_estimator_fn = tf.estimator.DNNEstimator else: dnn_estimator_fn = tf.contrib.estimator.DNNEstimator candidate_pool = { "linear": linear_estimator_fn( head=head, feature_columns=feature_columns, optimizer=tf.train.AdamOptimizer(learning_rate=.001)), "dnn": dnn_estimator_fn( head=head, feature_columns=feature_columns, optimizer=tf.train.AdamOptimizer(learning_rate=.001), hidden_units=[3]), "dnn2": dnn_estimator_fn( head=head, feature_columns=feature_columns, optimizer=tf.train.AdamOptimizer(learning_rate=.001), hidden_units=[5]) } estimator = AutoEnsembleEstimator(head=head, candidate_pool=candidate_pool, **kwargs) elif FLAGS.estimator_type == "estimator": subnetwork_generator = SimpleGenerator([ _DNNBuilder("dnn1", config, layer_size=3), _DNNBuilder("dnn2", config, layer_size=4), _DNNBuilder("dnn3", config, layer_size=5), ]) estimator = Estimator(head=head, subnetwork_generator=subnetwork_generator, **kwargs) def input_fn(): xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] xor_labels = [[1.], [0.], [1.], [0.]] input_features = {"x": tf.constant(xor_features, name="x")} input_labels = tf.constant(xor_labels, name="y") return input_features, input_labels # Train for three iterations. train_spec = tf.estimator.TrainSpec(input_fn=input_fn, max_steps=300) eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1) # Calling train_and_evaluate is the official way to perform distributed # training with an Estimator. Calling Estimator#train directly results # in an error when the TF_CONFIG is setup for a cluster. tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def train_and_evaluate_estimator(): """Runs Estimator distributed training.""" # The tf.estimator.RunConfig automatically parses the TF_CONFIG environment # variables during construction. # For more information on how tf.estimator.RunConfig uses TF_CONFIG, see # https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig. config = tf.estimator.RunConfig( tf_random_seed=42, model_dir=FLAGS.model_dir, session_config=tf_compat.v1.ConfigProto( log_device_placement=False, # Ignore other workers; only talk to parameter servers. # Otherwise, when a chief/worker terminates, the others will hang. device_filters=["/job:ps"])) kwargs = { "max_iteration_steps": 100, "force_grow": True, "delay_secs_per_worker": .2, "max_worker_delay_secs": 1, "worker_wait_secs": .5, # Set low timeout to reduce wait time for failures. "worker_wait_timeout_secs": 60, "config": config } head = regression_head.RegressionHead( loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE) features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] labels = [[1.], [0.], [1.], [0.]] if FLAGS.placement_strategy == "round_robin": kwargs["experimental_placement_strategy"] = RoundRobinStrategy() if FLAGS.estimator_type == "autoensemble": feature_columns = [tf.feature_column.numeric_column("x", shape=[2])] candidate_pool = { "linear": tf.estimator.LinearEstimator( head=head, feature_columns=feature_columns, optimizer=lambda: tf.keras.optimizers.Adam(lr=.001)), "dnn": tf.estimator.DNNEstimator( head=head, feature_columns=feature_columns, optimizer=lambda: tf.keras.optimizers.Adam(lr=.001), hidden_units=[3]), "dnn2": tf.estimator.DNNEstimator( head=head, feature_columns=feature_columns, optimizer=lambda: tf.keras.optimizers.Adam(lr=.001), hidden_units=[5]), } estimator = AutoEnsembleEstimator(head=head, candidate_pool=candidate_pool, **kwargs) elif FLAGS.estimator_type == "estimator": subnetwork_generator = SimpleGenerator([ _DNNBuilder("dnn1", config, layer_size=3), _DNNBuilder("dnn2", config, layer_size=4), _DNNBuilder("dnn3", config, layer_size=5), ]) estimator = Estimator(head=head, subnetwork_generator=subnetwork_generator, **kwargs) elif FLAGS.estimator_type == "autoensemble_trees_multiclass": n_classes = 3 head = multi_class_head.MultiClassHead( n_classes=n_classes, loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE) def tree_loss_fn(labels, logits): result = bt_losses.per_example_maxent_loss(labels=labels, logits=logits, num_classes=n_classes, weights=None) return result[0] tree_head = multi_class_head.MultiClassHead( loss_fn=tree_loss_fn, n_classes=n_classes, loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE) labels = [[1], [0], [1], [2]] feature_columns = [tf.feature_column.numeric_column("x", shape=[2])] candidate_pool = lambda config: { # pylint: disable=g-long-lambda "linear": tf.estimator.LinearEstimator( head=head, feature_columns=feature_columns, optimizer=tf.keras.optimizers.Adam(lr=.001), config=config), "gbdt": CoreGradientBoostedDecisionTreeEstimator( head=tree_head, learner_config=learner_pb2.LearnerConfig(num_classes=n_classes), examples_per_layer=8, num_trees=None, center_bias=False, # Required for multi-class. feature_columns=feature_columns, config=config), } estimator = AutoEnsembleEstimator(head=head, candidate_pool=candidate_pool, **kwargs) def input_fn(): input_features = {"x": tf.constant(features, name="x")} input_labels = tf.constant(labels, name="y") return input_features, input_labels train_hooks = [ tf.estimator.ProfilerHook(save_steps=50, output_dir=FLAGS.model_dir) ] # Train for three iterations. train_spec = tf.estimator.TrainSpec(input_fn=input_fn, max_steps=300, hooks=train_hooks) eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=1, start_delay_secs=.5, throttle_secs=.5) # Calling train_and_evaluate is the official way to perform distributed # training with an Estimator. Calling Estimator#train directly results # in an error when the TF_CONFIG is setup for a cluster. tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def test_summaries(self): """Tests that summaries are written to candidate directory.""" run_config = tf.estimator.RunConfig(tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2, model_dir=self.test_subdirectory) subnetwork_generator = SimpleGenerator([_SimpleBuilder("dnn")]) report_materializer = ReportMaterializer(input_fn=tu.dummy_input_fn( [[1., 1.]], [[0.]]), steps=1) estimator = Estimator(head=regression_head.RegressionHead( loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE), subnetwork_generator=subnetwork_generator, report_materializer=report_materializer, max_iteration_steps=10, config=run_config) train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]]) estimator.train(input_fn=train_input_fn, max_steps=3) ensemble_loss = 1.52950 self.assertAlmostEqual(ensemble_loss, tu.check_eventfile_for_keyword( "loss", self.test_subdirectory), places=3) self.assertIsNotNone( tu.check_eventfile_for_keyword("global_step/sec", self.test_subdirectory)) self.assertEqual( 0., tu.check_eventfile_for_keyword("iteration/adanet/iteration", self.test_subdirectory)) subnetwork_subdir = os.path.join(self.test_subdirectory, "subnetwork/t0_dnn") self.assertAlmostEqual(3., tu.check_eventfile_for_keyword( "scalar", subnetwork_subdir), places=3) self.assertEqual( (3, 3, 1), tu.check_eventfile_for_keyword("image", subnetwork_subdir)) self.assertAlmostEqual(5., tu.check_eventfile_for_keyword( "nested/scalar", subnetwork_subdir), places=3) ensemble_subdir = os.path.join( self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized") self.assertAlmostEqual( ensemble_loss, tu.check_eventfile_for_keyword( "adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir), places=1) self.assertAlmostEqual( 0., tu.check_eventfile_for_keyword( "complexity_regularization/adanet/adanet_weighted_ensemble", ensemble_subdir), places=3) self.assertAlmostEqual(1., tu.check_eventfile_for_keyword( "mixture_weight_norms/adanet/" "adanet_weighted_ensemble/subnetwork_0", ensemble_subdir), places=3)