def model_fn_diff_modes(features, labels, mode): _, _ = features, labels v = variables.Variable(21, name='some_var') train_op = None loss = constant_op.constant(104) if mode == model_fn_lib.ModeKeys.TRAIN: loss = constant_op.constant(105) predictions = constant_op.constant([501]) train_op = control_flow_ops.group( state_ops.assign_add(training.get_global_step(), 1), state_ops.assign_add(v, 3)) elif mode == model_fn_lib.ModeKeys.EVAL: loss = constant_op.constant(106) predictions = constant_op.constant([502]) else: loss = constant_op.constant(107) predictions = constant_op.constant([503]) return model_fn_lib.EstimatorSpec(mode, loss=loss, train_op=train_op, eval_metric_ops={ 'abs_err': metrics_lib.mean_absolute_error( constant_op.constant(0), predictions) }, predictions=predictions)
def _model_fn(features, labels, mode): predictions = layers.dense( features['x'], 1, kernel_initializer=init_ops.zeros_initializer()) export_outputs = { 'predictions': export.RegressionOutput(predictions) } if mode == model_fn_lib.ModeKeys.PREDICT: return model_fn_lib.EstimatorSpec( mode, predictions=predictions, export_outputs=export_outputs) loss = losses.mean_squared_error(labels, predictions) train_op = training.GradientDescentOptimizer(learning_rate=0.5).minimize( loss, training.get_global_step()) eval_metric_ops = { 'absolute_error': metrics_lib.mean_absolute_error( labels, predictions) } return model_fn_lib.EstimatorSpec( mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, export_outputs=export_outputs)
def model_fn_diff_modes(features, labels, mode): _, _ = features, labels v = variables.Variable(21, name='some_var') train_op = None loss = constant_op.constant(104) if mode == model_fn_lib.ModeKeys.TRAIN: loss = constant_op.constant(105) predictions = constant_op.constant([501]) train_op = control_flow_ops.group( state_ops.assign_add(training.get_global_step(), 1), state_ops.assign_add(v, 3)) elif mode == model_fn_lib.ModeKeys.EVAL: loss = constant_op.constant(106) predictions = constant_op.constant([502]) else: loss = constant_op.constant(107) predictions = constant_op.constant([503]) return model_fn_lib.EstimatorSpec( mode, loss=loss, train_op=train_op, eval_metric_ops={ 'abs_err': metrics_lib.mean_absolute_error( constant_op.constant(0), predictions)}, predictions=predictions)
def _model_fn(features, labels, mode): predictions = layers.dense( features['x'], 1, kernel_initializer=init_ops.zeros_initializer()) export_outputs = { 'predictions': export_output.RegressionOutput(predictions) } if mode == model_fn_lib.ModeKeys.PREDICT: return model_fn_lib.EstimatorSpec( mode, predictions=predictions, export_outputs=export_outputs) loss = losses.mean_squared_error(labels, predictions) train_op = training.GradientDescentOptimizer(learning_rate=0.5).minimize( loss, training.get_global_step()) eval_metric_ops = { 'absolute_error': metrics_lib.mean_absolute_error( labels, predictions) } return model_fn_lib.EstimatorSpec( mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, export_outputs=export_outputs)
def _model_fn(features, labels, mode, params): if not self._export_mode: # Always check batch size in params self.assertEqual(batch_size_dict[mode], params['batch_size']) else: self.assertNotIn('batch_size', params) # Check the input feeds correct shape for train and eval. When eval on CPU # or predict, it is allowed to have dynamic shape. So, here only validates # the fully known shape (which covers the TPU train). if features['x'].shape.is_fully_defined(): self.assertEqual(batch_size_dict[mode], features['x'].shape[0]) predictions = layers.dense( features['x'], 1, kernel_initializer=init_ops.ones_initializer()) export_outputs = { 'predictions': export_output.RegressionOutput(predictions) } if mode == _PREDICT: return _create_estimator_spec( mode=mode, predictions={'predictions': predictions}, export_outputs=export_outputs) loss = losses.mean_squared_error(labels, predictions) optimizer = tf.tpu.CrossShardOptimizer( training.GradientDescentOptimizer(learning_rate=0.5)) train_op = optimizer.minimize( loss, global_step=training.get_global_step()) eval_metrics = ( lambda labels, predictions: { # pylint: disable=g-long-lambda 'absolute_error': metrics_lib.mean_absolute_error(labels, predictions) }, [labels, predictions]) return _create_estimator_spec( mode=mode, loss=loss, predictions={'predictions': predictions}, export_outputs=export_outputs, train_op=train_op, eval_metrics=eval_metrics)
def _metric_fn_on_cpu(self, labels, predictions): return { 'mse': metrics_lib.mean_absolute_error(labels, predictions), }