示例#1
0
    def testRegression(self):
        """Tests regression using matrix data as input."""
        head_fn = head_lib._regression_head(
            label_dimension=1,
            loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)

        hparams = tensor_forest.ForestHParams(num_trees=5,
                                              max_nodes=1000,
                                              num_classes=1,
                                              num_features=13,
                                              regression=True,
                                              split_after_samples=20)

        regressor = random_forest.CoreTensorForestEstimator(hparams.fill(),
                                                            head=head_fn)

        input_fn, predict_input_fn = _get_regression_input_fns()

        regressor.train(input_fn=input_fn, steps=100)
        res = regressor.evaluate(input_fn=input_fn, steps=10)
        self.assertGreaterEqual(0.1, res['loss'])

        predictions = list(regressor.predict(input_fn=predict_input_fn))
        self.assertAllClose([[24.]],
                            [pred['predictions'] for pred in predictions],
                            atol=1)
示例#2
0
  def testRegression(self):
    """Tests regression using matrix data as input."""
    head_fn = head_lib._regression_head(
        label_dimension=1,
        loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)

    hparams = tensor_forest.ForestHParams(
        num_trees=5,
        max_nodes=1000,
        num_classes=1,
        num_features=13,
        regression=True,
        split_after_samples=20)

    regressor = random_forest.CoreTensorForestEstimator(
        hparams.fill(), head=head_fn)

    input_fn, predict_input_fn = _get_regression_input_fns()

    regressor.train(input_fn=input_fn, steps=100)
    res = regressor.evaluate(input_fn=input_fn, steps=10)
    self.assertGreaterEqual(0.1, res['loss'])

    predictions = list(regressor.predict(input_fn=predict_input_fn))
    self.assertAllClose(
        [[24.]], [pred['predictions'] for pred in predictions], atol=1)
def _get_default_head(params, weights_name, output_type, name=None):
    """Creates a default head based on a type of a problem."""
    if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
        if params.regression:
            return head_lib.regression_head(weight_column_name=weights_name,
                                            label_dimension=params.num_outputs,
                                            enable_centered_bias=False,
                                            head_name=name)
        else:
            return head_lib.multi_class_head(params.num_classes,
                                             weight_column_name=weights_name,
                                             enable_centered_bias=False,
                                             head_name=name)
    else:
        if params.regression:
            return core_head_lib._regression_head(  # pylint:disable=protected-access
                weight_column=weights_name,
                label_dimension=params.num_outputs,
                name=name,
                loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
        else:
            return core_head_lib._multi_class_head_with_softmax_cross_entropy_loss(  # pylint:disable=protected-access
                n_classes=params.num_classes,
                weight_column=weights_name,
                name=name,
                loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
示例#4
0
def poisson_regression_head(
    weight_column=None,
    label_dimension=1,
    loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
    compute_full_loss=True,
    name=None):
  """Creates a `_Head` for poisson regression using `tf.nn.log_poisson_loss`.

  The loss is the weighted sum over all input dimensions. Namely, if the input
  labels have shape `[batch_size, label_dimension]`, the loss is the weighted
  sum over both `batch_size` and `label_dimension`.

  The head expects `logits` with shape `[D0, D1, ... DN, label_dimension]`.
  In many applications, the shape is `[batch_size, label_dimension]`.

  The `labels` shape must match `logits`, namely
  `[D0, D1, ... DN, label_dimension]`. If `label_dimension=1`, shape
  `[D0, D1, ... DN]` is also supported.

  If `weight_column` is specified, weights must be of shape
  `[D0, D1, ... DN]`, `[D0, D1, ... DN, 1]` or
  `[D0, D1, ... DN, label_dimension]`.

  This is implemented as a generalized linear model, see
  https://en.wikipedia.org/wiki/Generalized_linear_model.

  Args:
    weight_column: A string or a `_NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It is used to down weight or boost examples during training. It
      will be multiplied by the loss of the example.
    label_dimension: Number of regression labels per example. This is the size
      of the last dimension of the labels `Tensor` (typically, this has shape
      `[batch_size, label_dimension]`).
    loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
      reduce training loss over batch and label dimension. Defaults to
      `SUM_OVER_BATCH_SIZE`, namely weighted sum of losses divided by
      `batch size * label_dimension`. See `tf.losses.Reduction`.
    compute_full_loss: Whether to include the constant `log(z!)` term in
      computing the poisson loss. See `tf.nn.log_poisson_loss` for the full
      documentation.
    name: name of the head. If provided, summary and metrics keys will be
      suffixed by `"/" + name`. Also used as `name_scope` when creating ops.

  Returns:
    An instance of `_Head` for poisson regression.

  Raises:
    ValueError: If `label_dimension` or `loss_reduction` is invalid.
  """
  def _poisson_loss(labels, logits):
    return nn.log_poisson_loss(
        targets=labels, log_input=logits, compute_full_loss=compute_full_loss)
  return head_lib._regression_head(  # pylint:disable=protected-access
      weight_column=weight_column,
      label_dimension=label_dimension,
      loss_reduction=loss_reduction,
      loss_fn=_poisson_loss,
      inverse_link_fn=math_ops.exp,
      name=name)
示例#5
0
def _get_default_head(params, weights_name, output_type, name=None):
  """Creates a default head based on a type of a problem."""
  if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
    if params.regression:
      return head_lib.regression_head(
          weight_column_name=weights_name,
          label_dimension=params.num_outputs,
          enable_centered_bias=False,
          head_name=name)
    else:
      return head_lib.multi_class_head(
          params.num_classes,
          weight_column_name=weights_name,
          enable_centered_bias=False,
          head_name=name)
  else:
    if params.regression:
      return core_head_lib._regression_head(  # pylint:disable=protected-access
          weight_column=weights_name,
          label_dimension=params.num_outputs,
          name=name,
          loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
    else:
      return core_head_lib._multi_class_head_with_softmax_cross_entropy_loss(  # pylint:disable=protected-access
          n_classes=params.num_classes,
          weight_column=weights_name,
          name=name,
          loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
示例#6
0
def _create_regression_head(label_dimension, weight_column=None):
  if label_dimension != 1:
    raise ValueError('For now only 1 dimension regression is supported.'
                     'label_dimension given as {}'.format(label_dimension))
  # pylint: disable=protected-access
  return head_lib._regression_head(
      label_dimension=label_dimension,
      weight_column=weight_column,
      loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
示例#7
0
def _create_regression_head(label_dimension, weight_column=None):
  if label_dimension != 1:
    raise ValueError('For now only 1 dimension regression is supported.'
                     'label_dimension given as {}'.format(label_dimension))
  # pylint: disable=protected-access
  return head_lib._regression_head(
      label_dimension=label_dimension,
      weight_column=weight_column,
      loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
示例#8
0
 def _model_fn(features, labels, mode, config):
   """Call the defined shared _dnn_model_fn."""
   return _dnn_model_fn(
       features=features,
       labels=labels,
       mode=mode,
       head=head_lib._regression_head(  # pylint: disable=protected-access
           label_dimension=label_dimension, weight_column=weight_column,
           loss_reduction=loss_reduction),
       hidden_units=hidden_units,
       feature_columns=tuple(feature_columns or []),
       optimizer=optimizer,
       activation_fn=activation_fn,
       dropout=dropout,
       input_layer_partitioner=input_layer_partitioner,
       config=config)
示例#9
0
 def _model_fn(features, labels, mode, config):
   """Call the defined shared _dnn_model_fn."""
   return _dnn_model_fn(
       features=features,
       labels=labels,
       mode=mode,
       head=head_lib._regression_head(  # pylint: disable=protected-access
           label_dimension=label_dimension, weight_column=weight_column,
           loss_reduction=loss_reduction),
       hidden_units=hidden_units,
       feature_columns=tuple(feature_columns or []),
       optimizer=optimizer,
       activation_fn=activation_fn,
       dropout=dropout,
       input_layer_partitioner=input_layer_partitioner,
       config=config)
示例#10
0
    def __init__(self,
                 model_dir=None,
                 label_dimension=1,
                 weight_column=None,
                 optimizer='Ftrl',
                 config=None,
                 loss_reduction=losses.Reduction.SUM):
        """Initializes a BaselineRegressor instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      label_dimension: Number of regression targets per example. This is the
        size of the last dimension of the labels and logits `Tensor` objects
        (typically, these have shape `[batch_size, label_dimension]`).
      weight_column: A string or a `_NumericColumn` created by
        `tf.feature_column.numeric_column` defining feature column representing
         weights. It will be multiplied by the loss of the example.
      optimizer: String, `tf.Optimizer` object, or callable that creates the
        optimizer to use for training. If not specified, will use
        `FtrlOptimizer` with a default learning rate of 0.3.
      config: `RunConfig` object to configure the runtime settings.
      loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
        to reduce training loss over batch. Defaults to `SUM`.
    Returns:
      A `BaselineRegressor` estimator.
    """

        head = head_lib._regression_head(  # pylint: disable=protected-access
            label_dimension=label_dimension,
            weight_column=weight_column,
            loss_reduction=loss_reduction)

        def _model_fn(features, labels, mode, config):
            return _baseline_model_fn(features=features,
                                      labels=labels,
                                      mode=mode,
                                      head=head,
                                      optimizer=optimizer,
                                      config=config)

        super(BaselineRegressor, self).__init__(model_fn=_model_fn,
                                                model_dir=model_dir,
                                                config=config)
示例#11
0
  def __init__(self,
               model_dir=None,
               label_dimension=1,
               weight_column=None,
               optimizer='Ftrl',
               config=None,
               loss_reduction=losses.Reduction.SUM):
    """Initializes a BaselineRegressor instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      label_dimension: Number of regression targets per example. This is the
        size of the last dimension of the labels and logits `Tensor` objects
        (typically, these have shape `[batch_size, label_dimension]`).
      weight_column: A string or a `_NumericColumn` created by
        `tf.feature_column.numeric_column` defining feature column representing
         weights. It will be multiplied by the loss of the example.
      optimizer: String, `tf.Optimizer` object, or callable that creates the
        optimizer to use for training. If not specified, will use
        `FtrlOptimizer` with a default learning rate of 0.3.
      config: `RunConfig` object to configure the runtime settings.
      loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
        to reduce training loss over batch. Defaults to `SUM`.
    Returns:
      A `BaselineRegressor` estimator.
    """

    head = head_lib._regression_head(  # pylint: disable=protected-access
        label_dimension=label_dimension,
        weight_column=weight_column,
        loss_reduction=loss_reduction)
    def _model_fn(features, labels, mode, config):
      return _baseline_model_fn(
          features=features,
          labels=labels,
          mode=mode,
          head=head,
          optimizer=optimizer,
          config=config)
    super(BaselineRegressor, self).__init__(
        model_fn=_model_fn,
        model_dir=model_dir,
        config=config)
示例#12
0
 def _model_fn(features, labels, mode, config):
   """Call the _dnn_linear_combined_model_fn."""
   return _dnn_linear_combined_model_fn(
       features=features,
       labels=labels,
       mode=mode,
       head=head_lib._regression_head(  # pylint: disable=protected-access
           label_dimension=label_dimension, weight_column=weight_column,
           loss_reduction=loss_reduction),
       linear_feature_columns=linear_feature_columns,
       linear_optimizer=linear_optimizer,
       dnn_feature_columns=dnn_feature_columns,
       dnn_optimizer=dnn_optimizer,
       dnn_hidden_units=dnn_hidden_units,
       dnn_activation_fn=dnn_activation_fn,
       dnn_dropout=dnn_dropout,
       input_layer_partitioner=input_layer_partitioner,
       config=config)
示例#13
0
 def _model_fn(features, labels, mode, config):
     """Call the _dnn_linear_combined_model_fn."""
     return _dnn_linear_combined_model_fn(
         features=features,
         labels=labels,
         mode=mode,
         head=head_lib._regression_head(  # pylint: disable=protected-access
             label_dimension=label_dimension,
             weight_column=weight_column,
             loss_reduction=loss_reduction),
         linear_feature_columns=linear_feature_columns,
         linear_optimizer=linear_optimizer,
         dnn_feature_columns=dnn_feature_columns,
         dnn_optimizer=dnn_optimizer,
         dnn_hidden_units=dnn_hidden_units,
         dnn_activation_fn=dnn_activation_fn,
         dnn_dropout=dnn_dropout,
         input_layer_partitioner=input_layer_partitioner,
         config=config)
示例#14
0
def core_quantile_regression_head(
        quantiles,
        weight_column=None,
        loss_reduction=core_losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
    """Core head for quantile regression problems."""
    def loss_fn(labels, logits):
        result = losses.per_example_quantile_regression_loss(
            labels=labels,
            predictions=logits,
            weights=weight_column,
            quantile=quantiles)
        return result[0]

    # pylint:disable=protected-access
    head_fn = core_head_lib._regression_head(label_dimension=1,
                                             loss_fn=loss_fn,
                                             loss_reduction=loss_reduction,
                                             weight_column=weight_column)
    # pylint:enable=protected-access
    return head_fn
示例#15
0
def core_quantile_regression_head(
    quantiles,
    weight_column=None,
    loss_reduction=core_losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
  """Core head for quantile regression problems."""

  def loss_fn(labels, logits):
    result = losses.per_example_quantile_regression_loss(
        labels=labels,
        predictions=logits,
        weights=weight_column,
        quantile=quantiles)
    return result[0]

  # pylint:disable=protected-access
  head_fn = core_head_lib._regression_head(
      label_dimension=1,
      loss_fn=loss_fn,
      loss_reduction=loss_reduction,
      weight_column=weight_column)
  # pylint:enable=protected-access
  return head_fn
  def __init__(self,
               feature_columns,
               model_dir=None,
               label_dimension=1,
               weight_column=None,
               optimizer='Ftrl',
               config=None,
               partitioner=None,
               warm_start_from=None,
               loss_reduction=losses.Reduction.SUM,
               sparse_combiner='sum'):
    """Initializes a `LinearRegressor` instance.

    Args:
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      label_dimension: Number of regression targets per example. This is the
        size of the last dimension of the labels and logits `Tensor` objects
        (typically, these have shape `[batch_size, label_dimension]`).
      weight_column: A string or a `_NumericColumn` created by
        `tf.feature_column.numeric_column` defining feature column representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example. If it is a string, it is
        used as a key to fetch weight tensor from the `features`. If it is a
        `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
        then weight_column.normalizer_fn is applied on it to get weight tensor.
      optimizer: An instance of `tf.Optimizer` used to train the model. Can also
        be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
        callable. Defaults to FTRL optimizer.
      config: `RunConfig` object to configure the runtime settings.
      partitioner: Optional. Partitioner for input layer.
      warm_start_from: A string filepath to a checkpoint to warm-start from, or
        a `WarmStartSettings` object to fully configure warm-starting.  If the
        string filepath is provided instead of a `WarmStartSettings`, then all
        weights and biases are warm-started, and it is assumed that vocabularies
        and Tensor names are unchanged.
      loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
        to reduce training loss over batch. Defaults to `SUM`.
      sparse_combiner: A string specifying how to reduce if a categorical column
        is multivalent.  One of "mean", "sqrtn", and "sum" -- these are
        effectively different ways to do example-level normalization, which can
        be useful for bag-of-words features. for more details, see
        `tf.feature_column.linear_model`.
    """
    head = head_lib._regression_head(  # pylint: disable=protected-access
        label_dimension=label_dimension, weight_column=weight_column,
        loss_reduction=loss_reduction)

    def _model_fn(features, labels, mode, config):
      """Call the defined shared _linear_model_fn."""
      return _linear_model_fn(
          features=features,
          labels=labels,
          mode=mode,
          head=head,
          feature_columns=tuple(feature_columns or []),
          optimizer=optimizer,
          partitioner=partitioner,
          config=config,
          sparse_combiner=sparse_combiner)

    super(LinearRegressor, self).__init__(
        model_fn=_model_fn,
        model_dir=model_dir,
        config=config,
        warm_start_from=warm_start_from)
示例#17
0
def logistic_regression_head(
    weight_column=None,
    loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
    name=None):
  """Creates a `_Head` for logistic regression.

  Uses `sigmoid_cross_entropy_with_logits` loss, which is the same as
  `binary_classification_head`. The differences compared to
  `binary_classification_head` are:

  * Does not support `label_vocabulary`. Instead, labels must be float in the
    range [0, 1].
  * Does not calculate some metrics that do not make sense, such as AUC.
  * In `PREDICT` mode, only returns logits and predictions
    (`=tf.sigmoid(logits)`), whereas `binary_classification_head` also returns
    probabilities, classes, and class_ids.
  * Export output defaults to `RegressionOutput`, whereas
    `binary_classification_head` defaults to `PredictOutput`.

  The head expects `logits` with shape `[D0, D1, ... DN, 1]`.
  In many applications, the shape is `[batch_size, 1]`.

  The `labels` shape must match `logits`, namely
  `[D0, D1, ... DN]` or `[D0, D1, ... DN, 1]`.

  If `weight_column` is specified, weights must be of shape
  `[D0, D1, ... DN]` or `[D0, D1, ... DN, 1]`.

  This is implemented as a generalized linear model, see
  https://en.wikipedia.org/wiki/Generalized_linear_model.

  The head can be used with a canned estimator. Example:

  ```python
  my_head = tf.contrib.estimator.logistic_regression_head()
  my_estimator = tf.contrib.estimator.DNNEstimator(
      head=my_head,
      hidden_units=...,
      feature_columns=...)
  ```

  It can also be used with a custom `model_fn`. Example:

  ```python
  def _my_model_fn(features, labels, mode):
    my_head = tf.contrib.estimator.logistic_regression_head()
    logits = tf.keras.Model(...)(features)

    return my_head.create_estimator_spec(
        features=features,
        mode=mode,
        labels=labels,
        optimizer=tf.AdagradOptimizer(learning_rate=0.1),
        logits=logits)

  my_estimator = tf.estimator.Estimator(model_fn=_my_model_fn)
  ```

  Args:
    weight_column: A string or a `_NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It is used to down weight or boost examples during training. It
      will be multiplied by the loss of the example.
    loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
      reduce training loss over batch and label dimension. Defaults to
      `SUM_OVER_BATCH_SIZE`, namely weighted sum of losses divided by
      `batch size * label_dimension`. See `tf.losses.Reduction`.
    name: name of the head. If provided, summary and metrics keys will be
      suffixed by `"/" + name`. Also used as `name_scope` when creating ops.

  Returns:
    An instance of `_Head` for logistic regression.

  Raises:
    ValueError: If `loss_reduction` is invalid.
  """
  def _logistic_loss(labels, logits):
    labels = head_lib._assert_range(  # pylint:disable=protected-access
        labels, n_classes=2, message='Labels must be in range [0, 1]')
    return nn.sigmoid_cross_entropy_with_logits(
        labels=labels, logits=logits)
  return head_lib._regression_head(  # pylint:disable=protected-access
      weight_column=weight_column,
      label_dimension=1,
      loss_reduction=loss_reduction,
      loss_fn=_logistic_loss,
      inverse_link_fn=math_ops.sigmoid,
      name=name)
示例#18
0
def regression_head(weight_column=None,
                    label_dimension=1,
                    loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
                    loss_fn=None,
                    inverse_link_fn=None,
                    name=None):
    """Creates a `_Head` for regression using the `mean_squared_error` loss.

  The loss is the weighted sum over all input dimensions. Namely, if the input
  labels have shape `[batch_size, label_dimension]`, the loss is the weighted
  sum over both `batch_size` and `label_dimension`.

  The head expects `logits` with shape `[D0, D1, ... DN, label_dimension]`.
  In many applications, the shape is `[batch_size, label_dimension]`.

  The `labels` shape must match `logits`, namely
  `[D0, D1, ... DN, label_dimension]`. If `label_dimension=1`, shape
  `[D0, D1, ... DN]` is also supported.

  If `weight_column` is specified, weights must be of shape
  `[D0, D1, ... DN]`, `[D0, D1, ... DN, 1]` or
  `[D0, D1, ... DN, label_dimension]`.

  Supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
  `(labels, logits, features)` as arguments and returns unreduced loss with
  shape `[D0, D1, ... DN, label_dimension]`.

  Also supports custom `inverse_link_fn`, also known as 'mean function'.
  `inverse_link_fn` is only used in `PREDICT` mode. It takes `logits` as
  argument and returns predicted values. This function is the inverse of the
  link function defined in
  https://en.wikipedia.org/wiki/Generalized_linear_model#Link_function
  Namely, for poisson regression, set `inverse_link_fn=tf.exp`.

  Args:
    weight_column: A string or a `_NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It is used to down weight or boost examples during training. It
      will be multiplied by the loss of the example.
    label_dimension: Number of regression labels per example. This is the size
      of the last dimension of the labels `Tensor` (typically, this has shape
      `[batch_size, label_dimension]`).
    loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
      reduce training loss over batch and label dimension. Defaults to
      `SUM_OVER_BATCH_SIZE`, namely weighted sum of losses divided by
      `batch size * label_dimension`. See `tf.losses.Reduction`.
    loss_fn: Optional loss function. Defaults to `mean_squared_error`.
    inverse_link_fn: Optional inverse link function, also known as 'mean
      function'. Defaults to identity.
    name: name of the head. If provided, summary and metrics keys will be
      suffixed by `"/" + name`. Also used as `name_scope` when creating ops.

  Returns:
    An instance of `_Head` for linear regression.

  Raises:
    ValueError: If `label_dimension` or `loss_reduction` is invalid.
  """
    return head_lib._regression_head(  # pylint:disable=protected-access
        weight_column=weight_column,
        label_dimension=label_dimension,
        loss_reduction=loss_reduction,
        loss_fn=loss_fn,
        inverse_link_fn=inverse_link_fn,
        name=name)
示例#19
0
def regression_head(weight_column=None,
                    label_dimension=1,
                    loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
                    loss_fn=None,
                    inverse_link_fn=None,
                    name=None):
  """Creates a `_Head` for regression using the `mean_squared_error` loss.

  The loss is the weighted sum over all input dimensions. Namely, if the input
  labels have shape `[batch_size, label_dimension]`, the loss is the weighted
  sum over both `batch_size` and `label_dimension`.

  The head expects `logits` with shape `[D0, D1, ... DN, label_dimension]`.
  In many applications, the shape is `[batch_size, label_dimension]`.

  The `labels` shape must match `logits`, namely
  `[D0, D1, ... DN, label_dimension]`. If `label_dimension=1`, shape
  `[D0, D1, ... DN]` is also supported.

  If `weight_column` is specified, weights must be of shape
  `[D0, D1, ... DN]`, `[D0, D1, ... DN, 1]` or
  `[D0, D1, ... DN, label_dimension]`.

  Supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
  `(labels, logits, features)` as arguments and returns unreduced loss with
  shape `[D0, D1, ... DN, label_dimension]`.

  Also supports custom `inverse_link_fn`, also known as 'mean function'.
  `inverse_link_fn` is only used in `PREDICT` mode. It takes `logits` as
  argument and returns predicted values. This function is the inverse of the
  link function defined in
  https://en.wikipedia.org/wiki/Generalized_linear_model#Link_function
  Namely, for poisson regression, set `inverse_link_fn=tf.exp`.

  The head can be used with a canned estimator. Example:

  ```python
  my_head = tf.contrib.estimator.regression_head()
  my_estimator = tf.contrib.estimator.DNNEstimator(
      head=my_head,
      hidden_units=...,
      feature_columns=...)
  ```

  It can also be used with a custom `model_fn`. Example:

  ```python
  def _my_model_fn(features, labels, mode):
    my_head = tf.contrib.estimator.regression_head()
    logits = tf.keras.Model(...)(features)

    return my_head.create_estimator_spec(
        features=features,
        mode=mode,
        labels=labels,
        optimizer=tf.AdagradOptimizer(learning_rate=0.1),
        logits=logits)

  my_estimator = tf.estimator.Estimator(model_fn=_my_model_fn)
  ```

  Args:
    weight_column: A string or a `_NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It is used to down weight or boost examples during training. It
      will be multiplied by the loss of the example.
    label_dimension: Number of regression labels per example. This is the size
      of the last dimension of the labels `Tensor` (typically, this has shape
      `[batch_size, label_dimension]`).
    loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
      reduce training loss over batch and label dimension. Defaults to
      `SUM_OVER_BATCH_SIZE`, namely weighted sum of losses divided by
      `batch size * label_dimension`. See `tf.losses.Reduction`.
    loss_fn: Optional loss function. Defaults to `mean_squared_error`.
    inverse_link_fn: Optional inverse link function, also known as 'mean
      function'. Defaults to identity.
    name: name of the head. If provided, summary and metrics keys will be
      suffixed by `"/" + name`. Also used as `name_scope` when creating ops.

  Returns:
    An instance of `_Head` for linear regression.

  Raises:
    ValueError: If `label_dimension` or `loss_reduction` is invalid.
  """
  return head_lib._regression_head(  # pylint:disable=protected-access
      weight_column=weight_column,
      label_dimension=label_dimension,
      loss_reduction=loss_reduction,
      loss_fn=loss_fn,
      inverse_link_fn=inverse_link_fn,
      name=name)
示例#20
0
def logistic_regression_head(
        weight_column=None,
        loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
        name=None):
    """Creates a `_Head` for logistic regression.

  Uses `sigmoid_cross_entropy_with_logits` loss, which is the same as
  `binary_classification_head`. The differences compared to
  `binary_classification_head` are:

  * Does not support `label_vocabulary`. Instead, labels must be float in the
    range [0, 1].
  * Does not calculate some metrics that do not make sense, such as AUC.
  * In `PREDICT` mode, only returns logits and predictions
    (`=tf.sigmoid(logits)`), whereas `binary_classification_head` also returns
    probabilities, classes, and class_ids.
  * Export output defaults to `RegressionOutput`, whereas
    `binary_classification_head` defaults to `PredictOutput`.

  The head expects `logits` with shape `[D0, D1, ... DN, 1]`.
  In many applications, the shape is `[batch_size, 1]`.

  The `labels` shape must match `logits`, namely
  `[D0, D1, ... DN]` or `[D0, D1, ... DN, 1]`.

  If `weight_column` is specified, weights must be of shape
  `[D0, D1, ... DN]` or `[D0, D1, ... DN, 1]`.

  This is implemented as a generalized linear model, see
  https://en.wikipedia.org/wiki/Generalized_linear_model.

  Args:
    weight_column: A string or a `_NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It is used to down weight or boost examples during training. It
      will be multiplied by the loss of the example.
    loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
      reduce training loss over batch and label dimension. Defaults to
      `SUM_OVER_BATCH_SIZE`, namely weighted sum of losses divided by
      `batch size * label_dimension`. See `tf.losses.Reduction`.
    name: name of the head. If provided, summary and metrics keys will be
      suffixed by `"/" + name`. Also used as `name_scope` when creating ops.

  Returns:
    An instance of `_Head` for logistic regression.

  Raises:
    ValueError: If `loss_reduction` is invalid.
  """
    def _logistic_loss(labels, logits):
        labels = head_lib._assert_range(  # pylint:disable=protected-access
            labels,
            n_classes=2,
            message='Labels must be in range [0, 1]')
        return nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                    logits=logits)

    return head_lib._regression_head(  # pylint:disable=protected-access
        weight_column=weight_column,
        label_dimension=1,
        loss_reduction=loss_reduction,
        loss_fn=_logistic_loss,
        inverse_link_fn=math_ops.sigmoid,
        name=name)