Exemple #1
0
    def __init__(
            self,  # _joint_weights: pylint: disable=invalid-name
            feature_columns,
            model_dir=None,
            weight_column_name=None,
            optimizer=None,
            gradient_clip_norm=None,
            enable_centered_bias=None,
            target_dimension=1,
            _joint_weights=False,
            config=None,
            feature_engineering_fn=None):
        """Construct a `LinearRegressor` estimator object.

    Args:
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph, etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Ftrl optimizer.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      target_dimension: dimension of the target for multilabels.
      _joint_weights: If True use a single (possibly partitioned) variable to
        store the weights. It's faster, but requires all feature columns are
        sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        targets which are the output of `input_fn` and
                        returns features and targets which will be fed
                        into the model.

    Returns:
      A `LinearRegressor` estimator.
    """
        if enable_centered_bias is None:
            enable_centered_bias = True
            dnn_linear_combined._changing_default_center_bias()  # pylint: disable=protected-access
        self._joint_weights = _joint_weights
        super(LinearRegressor,
              self).__init__(model_dir=model_dir,
                             weight_column_name=weight_column_name,
                             linear_feature_columns=feature_columns,
                             linear_optimizer=optimizer,
                             _joint_linear_weights=_joint_weights,
                             gradient_clip_norm=gradient_clip_norm,
                             enable_centered_bias=enable_centered_bias,
                             target_dimension=target_dimension,
                             config=config,
                             feature_engineering_fn=feature_engineering_fn)
Exemple #2
0
  def __init__(self,  # _joint_weights: pylint: disable=invalid-name
               feature_columns,
               model_dir=None,
               weight_column_name=None,
               optimizer=None,
               gradient_clip_norm=None,
               enable_centered_bias=None,
               target_dimension=1,
               _joint_weights=False,
               config=None,
               feature_engineering_fn=None):
    """Construct a `LinearRegressor` estimator object.

    Args:
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph, etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Ftrl optimizer.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      target_dimension: dimension of the target for multilabels.
      _joint_weights: If True use a single (possibly partitioned) variable to
        store the weights. It's faster, but requires all feature columns are
        sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        targets which are the output of `input_fn` and
                        returns features and targets which will be fed
                        into the model.

    Returns:
      A `LinearRegressor` estimator.
    """
    if enable_centered_bias is None:
      enable_centered_bias = True
      dnn_linear_combined._changing_default_center_bias()  # pylint: disable=protected-access
    self._joint_weights = _joint_weights
    super(LinearRegressor, self).__init__(
        model_dir=model_dir,
        weight_column_name=weight_column_name,
        linear_feature_columns=feature_columns,
        linear_optimizer=optimizer,
        _joint_linear_weights=_joint_weights,
        gradient_clip_norm=gradient_clip_norm,
        enable_centered_bias=enable_centered_bias,
        target_dimension=target_dimension,
        config=config,
        feature_engineering_fn=feature_engineering_fn)
Exemple #3
0
  def __init__(self,  # _joint_weight pylint: disable=invalid-name
               feature_columns,
               model_dir=None,
               n_classes=2,
               weight_column_name=None,
               optimizer=None,
               gradient_clip_norm=None,
               enable_centered_bias=None,
               _joint_weight=False,
               config=None,
               feature_engineering_fn=None):
    """Construct a `LinearClassifier` estimator object.

    Args:
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      n_classes: number of target classes. Default is binary classification.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: The optimizer used to train the model. If specified, it should
        be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
        the Ftrl optimizer will be used.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      _joint_weight: If True, the weights for all columns will be stored in a
        single (possibly partitioned) variable. It's more efficient, but it's
        incompatible with SDCAOptimizer, and requires all feature columns are
        sparse and use the 'sum' combiner.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        targets which are the output of `input_fn` and
                        returns features and targets which will be fed
                        into the model.

    Returns:
      A `LinearClassifier` estimator.

    Raises:
      ValueError: if n_classes < 2.
    """
    # TODO(zoy): Give an unsupported error if enable_centered_bias is
    #    requested for SDCA once its default changes to False.
    if enable_centered_bias is None:
      enable_centered_bias = True
      dnn_linear_combined._changing_default_center_bias()  # pylint: disable=protected-access
    self._model_dir = model_dir or tempfile.mkdtemp()
    if n_classes < 2:
      raise ValueError("Classification requires n_classes >= 2")
    self._n_classes = n_classes
    self._feature_columns = feature_columns
    assert self._feature_columns
    self._weight_column_name = weight_column_name
    self._optimizer = _get_default_optimizer(feature_columns)
    if optimizer:
      self._optimizer = _get_optimizer(optimizer)
    num_ps_replicas = config.num_ps_replicas if config else 0

    if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
      assert not _joint_weight, ("_joint_weight is incompatible with the"
                                 " SDCAOptimizer")
      model_fn = sdca_classifier_model_fn
      params = {
          "feature_columns": feature_columns,
          "optimizer": self._optimizer,
          "weight_column_name": weight_column_name,
          "loss_type": "logistic_loss",
      }
    else:
      model_fn = _linear_classifier_model_fn
      params = {
          "n_classes": n_classes,
          "weight_column_name": weight_column_name,
          "feature_columns": feature_columns,
          "optimizer": self._optimizer,
          "gradient_clip_norm": gradient_clip_norm,
          "enable_centered_bias": enable_centered_bias,
          "num_ps_replicas": num_ps_replicas,
          "joint_weights": _joint_weight,
      }

    self._estimator = estimator.Estimator(
        model_fn=model_fn,
        model_dir=self._model_dir,
        config=config,
        params=params,
        feature_engineering_fn=feature_engineering_fn)
Exemple #4
0
    def __init__(
            self,  # _joint_weight pylint: disable=invalid-name
            feature_columns,
            model_dir=None,
            n_classes=2,
            weight_column_name=None,
            optimizer=None,
            gradient_clip_norm=None,
            enable_centered_bias=None,
            _joint_weight=False,
            config=None,
            feature_engineering_fn=None):
        """Construct a `LinearClassifier` estimator object.

    Args:
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      n_classes: number of target classes. Default is binary classification.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: The optimizer used to train the model. If specified, it should
        be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
        the Ftrl optimizer will be used.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      _joint_weight: If True, the weights for all columns will be stored in a
        single (possibly partitioned) variable. It's more efficient, but it's
        incompatible with SDCAOptimizer, and requires all feature columns are
        sparse and use the 'sum' combiner.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        targets which are the output of `input_fn` and
                        returns features and targets which will be fed
                        into the model.

    Returns:
      A `LinearClassifier` estimator.

    Raises:
      ValueError: if n_classes < 2.
    """
        # TODO(zoy): Give an unsupported error if enable_centered_bias is
        #    requested for SDCA once its default changes to False.
        if enable_centered_bias is None:
            enable_centered_bias = True
            dnn_linear_combined._changing_default_center_bias()  # pylint: disable=protected-access
        self._model_dir = model_dir or tempfile.mkdtemp()
        if n_classes < 2:
            raise ValueError("Classification requires n_classes >= 2")
        self._n_classes = n_classes
        self._feature_columns = feature_columns
        assert self._feature_columns
        self._weight_column_name = weight_column_name
        self._optimizer = _get_default_optimizer(feature_columns)
        if optimizer:
            self._optimizer = _get_optimizer(optimizer)
        num_ps_replicas = config.num_ps_replicas if config else 0

        chief_hook = None
        if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
            assert not _joint_weight, ("_joint_weight is incompatible with the"
                                       " SDCAOptimizer")
            model_fn = sdca_classifier_model_fn
            # We use a hook to perform the weight update and shrink step only on the
            # chief. Because the SdcaModel constructed by the estimator within the
            # call to fit() but we need to pass the hook to fit(), we pass the hook
            # as a parameter to the model_fn and have that propagate the model to the
            # hook.
            chief_hook = _SdcaUpdateWeightsHook()
            params = {
                "feature_columns": feature_columns,
                "optimizer": self._optimizer,
                "weight_column_name": weight_column_name,
                "loss_type": "logistic_loss",
                "update_weights_hook": chief_hook,
            }
        else:
            model_fn = _linear_classifier_model_fn
            params = {
                "n_classes": n_classes,
                "weight_column_name": weight_column_name,
                "feature_columns": feature_columns,
                "optimizer": self._optimizer,
                "gradient_clip_norm": gradient_clip_norm,
                "enable_centered_bias": enable_centered_bias,
                "num_ps_replicas": num_ps_replicas,
                "joint_weights": _joint_weight,
            }

        self._estimator = estimator.Estimator(
            model_fn=model_fn,
            model_dir=self._model_dir,
            config=config,
            params=params,
            feature_engineering_fn=feature_engineering_fn)

        self._additional_run_hook = None
        if self._estimator.config.is_chief:
            self._additional_run_hook = chief_hook
Exemple #5
0
    def __init__(self,
                 hidden_units,
                 feature_columns,
                 model_dir=None,
                 weight_column_name=None,
                 optimizer=None,
                 activation_fn=nn.relu,
                 dropout=None,
                 gradient_clip_norm=None,
                 enable_centered_bias=None,
                 config=None,
                 feature_engineering_fn=None):
        """Initializes a `DNNRegressor` instance.

    Args:
      hidden_units: List of hidden units per layer. All layers are fully
        connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
        has 32.
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can also
        be used to load checkpoints from the directory into a estimator to continue
        training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Adagrad optimizer.
      activation_fn: Activation function applied to each layer. If `None`, will
        use `tf.nn.relu`.
      dropout: When not `None`, the probability we will drop out a given
        coordinate.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        targets which are the output of `input_fn` and
                        returns features and targets which will be fed
                        into the model.

    Returns:
      A `DNNRegressor` estimator.
    """
        if enable_centered_bias is None:
            enable_centered_bias = True
            dnn_linear_combined._changing_default_center_bias()  # pylint: disable=protected-access
        super(DNNRegressor,
              self).__init__(model_dir=model_dir,
                             weight_column_name=weight_column_name,
                             dnn_feature_columns=feature_columns,
                             dnn_optimizer=optimizer,
                             dnn_hidden_units=hidden_units,
                             dnn_activation_fn=activation_fn,
                             dnn_dropout=dropout,
                             gradient_clip_norm=gradient_clip_norm,
                             enable_centered_bias=enable_centered_bias,
                             config=config,
                             feature_engineering_fn=feature_engineering_fn)
        self.feature_columns = feature_columns
        self.optimizer = optimizer
        self.activation_fn = activation_fn
        self.dropout = dropout
        self.hidden_units = hidden_units
        self._feature_columns_inferred = False
Exemple #6
0
    def __init__(self,
                 hidden_units,
                 feature_columns,
                 model_dir=None,
                 n_classes=2,
                 weight_column_name=None,
                 optimizer=None,
                 activation_fn=nn.relu,
                 dropout=None,
                 gradient_clip_norm=None,
                 enable_centered_bias=None,
                 config=None,
                 feature_engineering_fn=None):
        """Initializes a DNNClassifier instance.

    Args:
      hidden_units: List of hidden units per layer. All layers are fully
        connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
        has 32.
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can also
        be used to load checkpoints from the directory into a estimator to continue
        training a previously saved model.
      n_classes: number of target classes. Default is binary classification.
        It must be greater than 1.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Adagrad optimizer.
      activation_fn: Activation function applied to each layer. If `None`, will
        use `tf.nn.relu`.
      dropout: When not `None`, the probability we will drop out a given
        coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are
        clipped to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        targets which are the output of `input_fn` and
                        returns features and targets which will be fed
                        into the model.

    Returns:
      A `DNNClassifier` estimator.

    Raises:
      ValueError: If `n_classes` < 2.
    """
        if enable_centered_bias is None:
            enable_centered_bias = True
            dnn_linear_combined._changing_default_center_bias()  # pylint: disable=protected-access
        self._hidden_units = hidden_units
        self._feature_columns = feature_columns
        self._model_dir = model_dir or tempfile.mkdtemp()
        if n_classes <= 1:
            raise ValueError(
                "Classification requires n_classes >= 2. Given: {}".format(
                    n_classes))
        self._n_classes = n_classes
        self._weight_column_name = weight_column_name
        optimizer = optimizer or "Adagrad"
        num_ps_replicas = config.num_ps_replicas if config else 0

        self._estimator = estimator.Estimator(
            model_fn=_dnn_classifier_model_fn,
            model_dir=self._model_dir,
            config=config,
            params={
                "hidden_units": hidden_units,
                "feature_columns": feature_columns,
                "n_classes": n_classes,
                "weight_column_name": weight_column_name,
                "optimizer": optimizer,
                "activation_fn": activation_fn,
                "dropout": dropout,
                "gradient_clip_norm": gradient_clip_norm,
                "enable_centered_bias": enable_centered_bias,
                "num_ps_replicas": num_ps_replicas,
            },
            feature_engineering_fn=feature_engineering_fn)
Exemple #7
0
  def __init__(self,
               hidden_units,
               feature_columns,
               model_dir=None,
               weight_column_name=None,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               enable_centered_bias=None,
               config=None,
               feature_engineering_fn=None):
    """Initializes a `DNNRegressor` instance.

    Args:
      hidden_units: List of hidden units per layer. All layers are fully
        connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
        has 32.
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can also
        be used to load checkpoints from the directory into a estimator to continue
        training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Adagrad optimizer.
      activation_fn: Activation function applied to each layer. If `None`, will
        use `tf.nn.relu`.
      dropout: When not `None`, the probability we will drop out a given
        coordinate.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        targets which are the output of `input_fn` and
                        returns features and targets which will be fed
                        into the model.

    Returns:
      A `DNNRegressor` estimator.
    """
    if enable_centered_bias is None:
      enable_centered_bias = True
      dnn_linear_combined._changing_default_center_bias()  # pylint: disable=protected-access
    super(DNNRegressor, self).__init__(
        model_dir=model_dir,
        weight_column_name=weight_column_name,
        dnn_feature_columns=feature_columns,
        dnn_optimizer=optimizer,
        dnn_hidden_units=hidden_units,
        dnn_activation_fn=activation_fn,
        dnn_dropout=dropout,
        gradient_clip_norm=gradient_clip_norm,
        enable_centered_bias=enable_centered_bias,
        config=config,
        feature_engineering_fn=feature_engineering_fn)
    self.feature_columns = feature_columns
    self.optimizer = optimizer
    self.activation_fn = activation_fn
    self.dropout = dropout
    self.hidden_units = hidden_units
    self._feature_columns_inferred = False
Exemple #8
0
  def __init__(self,
               hidden_units,
               feature_columns,
               model_dir=None,
               n_classes=2,
               weight_column_name=None,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               enable_centered_bias=None,
               config=None,
               feature_engineering_fn=None):
    """Initializes a DNNClassifier instance.

    Args:
      hidden_units: List of hidden units per layer. All layers are fully
        connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
        has 32.
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can also
        be used to load checkpoints from the directory into a estimator to continue
        training a previously saved model.
      n_classes: number of target classes. Default is binary classification.
        It must be greater than 1.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Adagrad optimizer.
      activation_fn: Activation function applied to each layer. If `None`, will
        use `tf.nn.relu`.
      dropout: When not `None`, the probability we will drop out a given
        coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are
        clipped to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        targets which are the output of `input_fn` and
                        returns features and targets which will be fed
                        into the model.

    Returns:
      A `DNNClassifier` estimator.

    Raises:
      ValueError: If `n_classes` < 2.
    """
    if enable_centered_bias is None:
      enable_centered_bias = True
      dnn_linear_combined._changing_default_center_bias()  # pylint: disable=protected-access
    self._hidden_units = hidden_units
    self._feature_columns = feature_columns
    self._model_dir = model_dir or tempfile.mkdtemp()
    if n_classes <= 1:
      raise ValueError(
          "Classification requires n_classes >= 2. Given: {}".format(n_classes))
    self._n_classes = n_classes
    self._weight_column_name = weight_column_name
    optimizer = optimizer or "Adagrad"
    num_ps_replicas = config.num_ps_replicas if config else 0

    self._estimator = estimator.Estimator(
        model_fn=_dnn_classifier_model_fn,
        model_dir=self._model_dir,
        config=config,
        params={
            "hidden_units": hidden_units,
            "feature_columns": feature_columns,
            "n_classes": n_classes,
            "weight_column_name": weight_column_name,
            "optimizer": optimizer,
            "activation_fn": activation_fn,
            "dropout": dropout,
            "gradient_clip_norm": gradient_clip_norm,
            "enable_centered_bias": enable_centered_bias,
            "num_ps_replicas": num_ps_replicas,
        },
        feature_engineering_fn=feature_engineering_fn)
Exemple #9
0
    def __init__(self,
                 feature_columns,
                 model_dir=None,
                 n_classes=2,
                 weight_column_name=None,
                 optimizer=None,
                 gradient_clip_norm=None,
                 enable_centered_bias=None,
                 config=None):
        """Construct a `LinearClassifier` estimator object.

    Args:
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      n_classes: number of target classes. Default is binary classification.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: The optimizer used to train the model. If specified, it should
        be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
        the Ftrl optimizer will be used.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      config: `RunConfig` object to configure the runtime settings.

    Returns:
      A `LinearClassifier` estimator.

    Raises:
      ValueError: if n_classes < 2.
    """
        # TODO(zoy): Give an unsupported error if enable_centered_bias is
        #    requested for SDCA once its default changes to False.
        if enable_centered_bias is None:
            enable_centered_bias = True
            dnn_linear_combined._changing_default_center_bias()  # pylint: disable=protected-access
        self._model_dir = model_dir or tempfile.mkdtemp()
        if n_classes < 2:
            raise ValueError("Classification requires n_classes >= 2")
        self._n_classes = n_classes
        self._feature_columns = feature_columns
        assert self._feature_columns
        self._weight_column_name = weight_column_name
        self._optimizer = _get_default_optimizer(feature_columns)
        if optimizer:
            self._optimizer = _get_optimizer(optimizer)
        num_ps_replicas = config.num_ps_replicas if config else 0

        if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
            model_fn = sdca_classifier_model_fn
            params = {
                "feature_columns": feature_columns,
                "optimizer": self._optimizer,
                "weight_column_name": weight_column_name,
                "loss_type": "logistic_loss",
            }
        else:
            model_fn = _linear_classifier_model_fn
            params = {
                "n_classes": n_classes,
                "weight_column_name": weight_column_name,
                "feature_columns": feature_columns,
                "optimizer": self._optimizer,
                "gradient_clip_norm": gradient_clip_norm,
                "enable_centered_bias": enable_centered_bias,
                "num_ps_replicas": num_ps_replicas,
            }

        self._estimator = estimator.Estimator(model_fn=model_fn,
                                              model_dir=self._model_dir,
                                              config=config,
                                              params=params)