def _model_fn_scaffold(features, labels, mode): _, _ = features, labels return model_fn_lib.EstimatorSpec( mode=mode, loss=constant_op.constant(0.), train_op=constant_op.constant(0.), scaffold=training.Scaffold(init_fn=_init_fn))
def _evaluate(self, session, step): var_name_to_value = session.run(self._var_name_to_train_var) logging.info('Building placeholders.') placeholder_to_value = { self._var_name_to_placeholder[v_name]: var_name_to_value[v_name] for v_name in var_name_to_value } def feed_variables(scaffold, session): del scaffold session.run(self._var_feed_op, feed_dict=placeholder_to_value) logging.info('Building scaffold.') scaffold = training.Scaffold(init_fn=feed_variables, copy_from_scaffold=self._scaffold) with self._graph.as_default(): eval_results = self._estimator._evaluate_run( checkpoint_path=None, scaffold=scaffold, update_op=self._update_op, eval_dict=self._eval_dict, all_hooks=self._all_hooks, output_dir=self._eval_dir) logging.info('Eval done.') self._timer.update_last_triggered_step(step) return eval_results
def _train_model(self, input_fn, hooks): all_hooks = [] with ops.Graph().as_default() as g, g.device(self._device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step_tensor = training.create_global_step(g) with ops.device('/cpu:0'): features, labels = input_fn() estimator_spec = self._call_model_fn(features, labels, model_fn_lib.ModeKeys.FIT) ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss) all_hooks.extend([ training.NanTensorHook(estimator_spec.loss), training.LoggingTensorHook( { 'loss': estimator_spec.loss, 'step': global_step_tensor }, every_n_iter=100) ]) all_hooks.extend(hooks) all_hooks.extend(estimator_spec.training_hooks) scaffold = estimator_spec.scaffold or training.Scaffold() if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)): ops.add_to_collection(ops.GraphKeys.SAVERS, training.Saver( sharded=True, max_to_keep=self._config.keep_checkpoint_max, defer_build=True)) chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): saver_hook_exists = any([ isinstance(h, training.CheckpointSaverHook) for h in (all_hooks + chief_hooks + estimator_spec.training_chief_hooks) ]) if not saver_hook_exists: chief_hooks = [ training.CheckpointSaverHook( self._model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, scaffold=scaffold) ] with training.MonitoredTrainingSession( master=self._config.master, is_chief=self._config.is_chief, checkpoint_dir=self._model_dir, scaffold=scaffold, hooks=all_hooks, chief_only_hooks=chief_hooks + estimator_spec.training_chief_hooks, save_checkpoint_secs=0, # Saving is handled by a hook. save_summaries_steps=self._config.save_summary_steps, config=config_pb2.ConfigProto(allow_soft_placement=True)) as mon_sess: loss = None while not mon_sess.should_stop(): _, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss]) return loss
def model_fn(features, labels, mode): _, _ = features, labels return estimator_lib.EstimatorSpec( mode, loss=constant_op.constant(3.), scaffold=training.Scaffold(saver=training.Saver()), train_op=constant_op.constant(5.), eval_metric_ops={ 'mean_of_features': metrics_lib.mean(constant_op.constant(2.)) })
def _model_fn_scaffold(features, labels, mode): _, _ = features, labels variables.Variable(1., name='weight') real_saver = saver.Saver() self.mock_saver = test.mock.Mock( wraps=real_saver, saver_def=real_saver.saver_def) return model_fn_lib.EstimatorSpec( mode=mode, predictions=constant_op.constant([[1.]]), loss=constant_op.constant(0.), train_op=constant_op.constant(0.), scaffold=training.Scaffold(saver=self.mock_saver))
def model_fn(features, labels, mode): _, _ = features, labels mean = metrics_module.Mean() mean.update_state(constant_op.constant(2.)) return estimator_lib.EstimatorSpec( mode, loss=constant_op.constant(3.), scaffold=training.Scaffold(saver=training.Saver()), train_op=constant_op.constant(5.), eval_metric_ops={ 'mean_of_features': mean, })
def model_fn(features, labels, mode): _, _ = features, labels def init_fn(scaffold, session): _, _ = scaffold, session return estimator_lib.EstimatorSpec( mode, loss=constant_op.constant(3.), scaffold=training.Scaffold(init_fn=init_fn), train_op=constant_op.constant(5.), eval_metric_ops={ 'mean_of_features': metrics_lib.mean(constant_op.constant(2.)) })
def model_fn(features, labels, mode): _, _ = features, labels w = variables.VariableV1( initial_value=[0.], trainable=False, collections=[ops.GraphKeys.SAVEABLE_OBJECTS]) init_op = control_flow_ops.group( [w.initializer, training.get_global_step().initializer]) return estimator_lib.EstimatorSpec( mode, loss=constant_op.constant(3.), scaffold=training.Scaffold(init_op=init_op), train_op=constant_op.constant(5.), eval_metric_ops={ 'mean_of_features': metrics_lib.mean(constant_op.constant(2.)) })
def _predict(self, run_ctx, step): var_name_to_value = run_ctx.session.run(self._var_name_to_train_var) logging.info('Building placeholders.') placeholder_to_value = { self._var_name_to_placeholder[v_name]: var_name_to_value[v_name] for v_name in var_name_to_value } def feed_variables(scaffold, session): del scaffold session.run(self._var_feed_op, feed_dict=placeholder_to_value) logging.info('Building scaffold.') scaffold = training.Scaffold(init_fn=feed_variables) with self._graph.as_default(): session_creator = monitored_session.ChiefSessionCreator( scaffold=scaffold, checkpoint_filename_with_path=None, master=run_ctx.session.sess_str) self._handler.setup(step) logging.info('Setup done.') with monitored_session.MonitoredSession( session_creator=session_creator, hooks=self._all_hooks) as predict_session: while not predict_session.should_stop(): logging.info('Predicting.... %s', self._predictions) preds_evaluated = predict_session.run(self._predictions) if not isinstance(self._predictions, dict): for pred in preds_evaluated: self._handler.handle_prediction(pred) else: for i in range( self._estimator._extract_batch_length( preds_evaluated)): self._handler.handle_prediction({ key: value[i] for key, value in six.iteritems( preds_evaluated) }) logging.info('Finalizing.') self._handler.finalize(step) logging.info('Done with prediction.') self._timer.update_last_triggered_step(step)
def _model_fn_scaffold(features, labels, mode): _, _ = features, labels my_int = variables.Variable(1, name='my_int', collections=[ops.GraphKeys.LOCAL_VARIABLES]) scores = constant_op.constant([3.]) with ops.control_dependencies( [variables.local_variables_initializer(), data_flow_ops.tables_initializer()]): assign_op = state_ops.assign(my_int, 12345) # local_initSop must be an Operation, not a Tensor. custom_local_init_op = control_flow_ops.group(assign_op) return model_fn_lib.EstimatorSpec( mode=mode, predictions=constant_op.constant([[1.]]), loss=constant_op.constant(0.), train_op=constant_op.constant(0.), scaffold=training.Scaffold(local_init_op=custom_local_init_op), export_outputs={'test': export.ClassificationOutput(scores)})
def _evaluate(self, train_session): var_name_to_value = train_session.run(self._var_name_to_train_var) placeholder_to_value = { self._var_name_to_placeholder[v_name]: var_name_to_value[v_name] for v_name in var_name_to_value } def feed_variables(scaffold, session): del scaffold session.run(self._var_feed_op, feed_dict=placeholder_to_value) scaffold = training.Scaffold(init_fn=feed_variables, copy_from_scaffold=self._scaffold) with self._graph.as_default(): self._estimator._evaluate_run(checkpoint_path=None, scaffold=scaffold, update_op=self._update_op, eval_dict=self._eval_dict, all_hooks=self._all_hooks, output_dir=self._eval_dir) self._timer.update_last_triggered_step(self._iter_count)
def get_initialized_session(*args, **kwargs): scaffold = training.Scaffold().finalize() sess = session.Session(*args, **kwargs) sess.run(scaffold.init_op) return sess
def _scaffold_fn_on_cpu(): scaffold = training.Scaffold() self.assertNotIn(mode, self.is_scaffold_fn_called) self.is_scaffold_fn_called[mode] = True return scaffold