예제 #1
0
def _joint_linear_estimator(head, feature_columns):
    return estimator.Estimator(model_fn=_base_model_fn,
                               params={
                                   'model':
                                   composable_model.LinearComposableModel(
                                       num_label_columns=head.logits_dimension,
                                       _joint_weights=True),
                                   'feature_columns':
                                   feature_columns,
                                   'head':
                                   head
                               })
    def __init__(self,
                 target_column,
                 model_dir=None,
                 linear_feature_columns=None,
                 linear_optimizer=None,
                 dnn_feature_columns=None,
                 dnn_optimizer=None,
                 dnn_hidden_units=None,
                 dnn_activation_fn=nn.relu,
                 dnn_dropout=None,
                 gradient_clip_norm=None,
                 enable_centered_bias=True,
                 config=None):
        """Initializes a _DNNLinearCombinedBaseEstimator instance.

    Args:
      target_column: A _TargetColumn object.
      model_dir: Directory to save model parameters, graph and etc.
      linear_feature_columns: An iterable containing all the feature columns
        used by linear part of the model. All items in the set should be
        instances of classes derived from `FeatureColumn`.
      linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the linear part of the model. If `None`, will use a FTRL optimizer.
      dnn_feature_columns: An iterable containing all the feature columns used
        by deep part of the model. All items in the set should be instances of
        classes derived from `FeatureColumn`.
      dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the deep part of the model. If `None`, will use an Adagrad optimizer.
      dnn_hidden_units: List of hidden units per layer. All layers are fully
        connected.
      dnn_activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dnn_dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      config: RunConfig object to configure the runtime settings.

    Raises:
      ValueError: If both linear_feature_columns and dnn_features_columns are
        empty at the same time.
    """
        super(_DNNLinearCombinedBaseEstimator,
              self).__init__(model_dir=model_dir, config=config)

        num_ps_replicas = config.num_ps_replicas if config else 0

        self._linear_model = composable_model.LinearComposableModel(
            num_label_columns=target_column.num_label_columns,
            optimizer=linear_optimizer,
            gradient_clip_norm=gradient_clip_norm,
            num_ps_replicas=num_ps_replicas)

        self._dnn_model = composable_model.DNNComposableModel(
            num_label_columns=target_column.num_label_columns,
            hidden_units=dnn_hidden_units,
            optimizer=dnn_optimizer,
            activation_fn=dnn_activation_fn,
            dropout=dnn_dropout,
            gradient_clip_norm=gradient_clip_norm,
            num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None

        self._linear_feature_columns = linear_feature_columns
        self._linear_optimizer = linear_optimizer
        self._linear_weight_collection = (
            self._linear_model.get_weight_collection_name())
        self._dnn_feature_columns = dnn_feature_columns
        self._dnn_hidden_units = dnn_hidden_units
        self._centered_bias_weight_collection = "centered_bias"
        self._enable_centered_bias = enable_centered_bias
        self._target_column = target_column
    def __init__(
            self,  # _joint_linear_weights pylint: disable=invalid-name
            head,
            model_dir=None,
            linear_feature_columns=None,
            linear_optimizer=None,
            _joint_linear_weights=False,
            dnn_feature_columns=None,
            dnn_optimizer=None,
            dnn_hidden_units=None,
            dnn_activation_fn=nn.relu,
            dnn_dropout=None,
            gradient_clip_norm=None,
            config=None,
            feature_engineering_fn=None,
            default_prediction_key=None,
            enable_centered_bias=False):
        """Initializes a _DNNLinearCombinedBaseEstimator instance.

    Args:
      head: A _Head object.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      linear_feature_columns: An iterable containing all the feature columns
        used by linear part of the model. All items in the set should be
        instances of classes derived from `FeatureColumn`.
      linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the linear part of the model. If `None`, will use a FTRL optimizer.
      _joint_linear_weights: If True will use a single (possibly partitioned)
        variable to store all weights for the linear model. More efficient if
        there are many columns, however requires all columns are sparse and
        have the 'sum' combiner.
      dnn_feature_columns: An iterable containing all the feature columns used
        by deep part of the model. All items in the set should be instances of
        classes derived from `FeatureColumn`.
      dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the deep part of the model. If `None`, will use an Adagrad optimizer.
      dnn_hidden_units: List of hidden units per layer. All layers are fully
        connected.
      dnn_activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dnn_dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      config: RunConfig object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and
                        returns features and labels which will be fed
                        into the model.
      default_prediction_key: Default prediction key to use with metrics.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.

    Raises:
      ValueError: If both linear_feature_columns and dnn_features_columns are
        empty at the same time.
    """
        super(_DNNLinearCombinedBaseEstimator,
              self).__init__(model_dir=model_dir, config=config)

        num_ps_replicas = config.num_ps_replicas if config else 0

        self._linear_model = composable_model.LinearComposableModel(
            num_label_columns=head.logits_dimension,
            optimizer=linear_optimizer,
            _joint_weights=_joint_linear_weights,
            gradient_clip_norm=gradient_clip_norm,
            num_ps_replicas=num_ps_replicas)

        self._dnn_model = composable_model.DNNComposableModel(
            num_label_columns=head.logits_dimension,
            hidden_units=dnn_hidden_units,
            optimizer=dnn_optimizer,
            activation_fn=dnn_activation_fn,
            dropout=dnn_dropout,
            gradient_clip_norm=gradient_clip_norm,
            num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None

        self._linear_feature_columns = linear_feature_columns
        self._linear_optimizer = linear_optimizer
        self._dnn_feature_columns = dnn_feature_columns
        self._dnn_hidden_units = dnn_hidden_units
        self._head = head
        self._default_prediction_key = default_prediction_key
        self._feature_engineering_fn = (feature_engineering_fn
                                        or (lambda features, labels:
                                            (features, labels)))
        self._enable_centered_bias = enable_centered_bias
예제 #4
0
 def __init__(self, head, feature_columns):
     super(JointLinearEstimator, self).__init__(head, feature_columns)
     self._model = composable_model.LinearComposableModel(
         num_label_columns=head.logits_dimension, _joint_weights=True)
예제 #5
0
 def __init__(self, target_column, feature_columns):
     super(JointLinearEstimator, self).__init__(target_column,
                                                feature_columns)
     self._model = composable_model.LinearComposableModel(
         num_label_columns=target_column.num_label_columns,
         _joint_weights=True)