Exemple #1
0
 def testRegression(self):
     head = head_lib._regression_head()
     with tf.Graph().as_default(), tf.Session() as sess:
         prediction = tf.constant([[1.0], [1.0], [3.0]])
         labels = tf.constant([[0.0], [1.0], [1.0]])
         model_fn_ops = head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=prediction)
         self.assertAlmostEqual(5.0 / 3, sess.run(model_fn_ops.loss))
Exemple #2
0
 def testRegression(self):
   head = head_lib._regression_head()
   with tf.Graph().as_default(), tf.Session() as sess:
     prediction = tf.constant([[1.], [1.], [3.]])
     targets = tf.constant([[0.], [1.], [1.]])
     model_fn_ops = head.head_ops({}, targets,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  None, logits=prediction)
     self.assertAlmostEqual(5. / 3, sess.run(model_fn_ops.loss))
Exemple #3
0
 def testRegression(self):
   head = head_lib._regression_head()
   with tf.Graph().as_default(), tf.Session() as sess:
     prediction = tf.constant([[1.], [1.], [3.]])
     labels = tf.constant([[0.], [1.], [1.]])
     model_fn_ops = head.head_ops({}, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=prediction)
     self.assertAlmostEqual(5. / 3, sess.run(model_fn_ops.loss))
Exemple #4
0
 def testRegressionWithInvalidLogits(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
       head.create_model_fn_ops(
           {},
           labels=((0.,), (1.,), (1.,)),
           mode=model_fn.ModeKeys.TRAIN,
           train_op_fn=_noop_train_op,
           logits=((1., 1.), (1., 1.), (3., 1.)))
Exemple #5
0
 def testRegressionWithWeights(self):
     head = head_lib._regression_head(weight_column_name="label_weight")
     with tf.Graph().as_default(), tf.Session() as sess:
         features = {"label_weight": tf.constant([[2.0], [5.0], [0.0]])}
         prediction = tf.constant([[1.0], [1.0], [3.0]])
         labels = tf.constant([[0.0], [1.0], [1.0]])
         model_fn_ops = head.head_ops(
             features, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=prediction
         )
         self.assertAlmostEqual(2.0 / 3, sess.run(model_fn_ops.loss), places=3)
Exemple #6
0
 def testRegression(self):
   head = head_lib._regression_head()
   with tf.Graph().as_default(), tf.Session():
     prediction = tf.constant([[1.], [1.], [3.]])
     labels = tf.constant([[0.], [1.], [1.]])
     model_fn_ops = head.head_ops({}, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=prediction)
     _assert_no_variables(self)
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #7
0
 def testRegressionWithInvalidLogits(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
       head.create_model_fn_ops(
           {},
           labels=((0.,), (1.,), (1.,)),
           mode=model_fn.ModeKeys.TRAIN,
           train_op_fn=_noop_train_op,
           logits=((1., 1.), (1., 1.), (3., 1.)))
Exemple #8
0
 def testRegressionEvalMode(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default(), session.Session():
     prediction = constant_op.constant([[1.], [1.], [3.]])
     labels = constant_op.constant([[0.], [1.], [1.]])
     model_fn_ops = head.head_ops(
         {}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=prediction)
     self.assertIsNone(model_fn_ops.train_op)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #9
0
 def testErrorInSparseTensorLabels(self):
     head = head_lib._regression_head()
     with tf.Graph().as_default():
         prediction = tf.constant([[1.0], [1.0], [3.0]])
         labels = tf.SparseTensor(
             indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
             values=tf.constant([0.0, 1.0, 1.0]),
             shape=[3, 1],
         )
         with self.assertRaisesRegexp(ValueError, "SparseTensor is not supported as labels."):
             head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=prediction)
Exemple #10
0
 def testRegressionWithWeights(self):
   head = head_lib._regression_head(
       weight_column_name="label_weight")
   with tf.Graph().as_default(), tf.Session() as sess:
     features = {"label_weight": tf.constant([[2.], [5.], [0.]])}
     prediction = tf.constant([[1.], [1.], [3.]])
     labels = tf.constant([[0.], [1.], [1.]])
     model_fn_ops = head.head_ops(features, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=prediction)
     self.assertAlmostEqual(2. / 3, sess.run(model_fn_ops.loss), places=3)
Exemple #11
0
 def testRegressionWithLogitsAndLogitsInput(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(
         ValueError, "Both logits and logits_input supplied"):
       head.create_model_fn_ops(
           {},
           labels=((0.,), (1.,), (1.,)),
           mode=model_fn.ModeKeys.TRAIN,
           train_op_fn=_noop_train_op,
           logits_input=((0., 0.), (0., 0.), (0., 0.)),
           logits=((1.,), (1.,), (3.,)))
Exemple #12
0
 def testRegressionWithLogits(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default(), session.Session():
     model_fn_ops = head.create_model_fn_ops(
         {},
         labels=((0.,), (1.,), (1.,)),
         mode=model_fn.ModeKeys.TRAIN,
         train_op_fn=_noop_train_op,
         logits=((1.,), (1.,), (3.,)))
     _assert_summary_tags(self, ["loss"])
     _assert_no_variables(self)
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #13
0
 def testErrorInSparseTensorTarget(self):
   head = head_lib._regression_head()
   with tf.Graph().as_default():
     prediction = tf.constant([[1.], [1.], [3.]])
     targets = tf.SparseTensor(
         indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
         values=tf.constant([0., 1., 1.]),
         shape=[3, 1])
     with self.assertRaisesRegexp(
         ValueError, "SparseTensor is not supported as a target"):
       head.head_ops({}, targets, tf.contrib.learn.ModeKeys.TRAIN, None,
                     logits=prediction)
Exemple #14
0
 def testRegressionWithLogitsAndLogitsInput(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default(), session.Session():
     with self.assertRaisesRegexp(
         ValueError, "Both logits and logits_input supplied"):
       head.create_model_fn_ops(
           {},
           labels=((0.,), (1.,), (1.,)),
           mode=model_fn.ModeKeys.TRAIN,
           train_op_fn=_noop_train_op,
           logits_input=((0., 0.), (0., 0.), (0., 0.)),
           logits=((1.,), (1.,), (3.,)))
Exemple #15
0
 def testErrorInSparseTensorLabels(self):
   head = head_lib._regression_head()
   with tf.Graph().as_default():
     prediction = tf.constant([[1.], [1.], [3.]])
     labels = tf.SparseTensor(
         indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
         values=tf.constant([0., 1., 1.]),
         shape=[3, 1])
     with self.assertRaisesRegexp(
         ValueError, "SparseTensor is not supported as labels."):
       head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN,
                     _noop_train_op, logits=prediction)
 def testRegressionWithLabelName(self):
   label_name = "my_label"
   head = head_lib._regression_head(label_name=label_name)
   with tf.Graph().as_default(), tf.Session():
     prediction = tf.constant([[1.], [1.], [3.]])
     labels = {label_name: tf.constant([[0.], [1.], [1.]])}
     model_fn_ops = head.head_ops({}, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=prediction)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #17
0
 def testRegression(self):
     head = head_lib._regression_head()
     with ops.Graph().as_default(), session.Session():
         prediction = constant_op.constant([[1.], [1.], [3.]])
         labels = constant_op.constant([[0.], [1.], [1.]])
         model_fn_ops = head.head_ops({},
                                      labels,
                                      model_fn.ModeKeys.TRAIN,
                                      _noop_train_op,
                                      logits=prediction)
         _assert_summary_tags(self, ["loss"])
         _assert_no_variables(self)
         _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #18
0
 def testRegressionEvalMode(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default(), session.Session():
     model_fn_ops = head.create_model_fn_ops(
         {},
         labels=((1.,), (1.,), (3.,)),
         mode=model_fn.ModeKeys.EVAL,
         train_op_fn=_noop_train_op,
         logits=((0.,), (1.,), (1.,)))
     self._assert_output_alternatives(model_fn_ops)
     self.assertIsNone(model_fn_ops.train_op)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #19
0
 def testRegressionWithLabelName(self):
     label_name = "my_label"
     head = head_lib._regression_head(label_name=label_name)
     with tf.Graph().as_default(), tf.Session():
         prediction = tf.constant([[1.], [1.], [3.]])
         labels = {label_name: tf.constant([[0.], [1.], [1.]])}
         model_fn_ops = head.head_ops({},
                                      labels,
                                      tf.contrib.learn.ModeKeys.TRAIN,
                                      _noop_train_op,
                                      logits=prediction)
         _assert_no_variables(self)
         _assert_summary_tags(self, ["loss"])
         _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #20
0
 def testRegressionWithLabelName(self):
   label_name = "my_label"
   head = head_lib._regression_head(label_name=label_name)
   with ops.Graph().as_default(), session.Session():
     model_fn_ops = head.create_model_fn_ops(
         {},
         labels={label_name: ((0.,), (1.,), (1.,))},
         mode=model_fn.ModeKeys.TRAIN,
         train_op_fn=_noop_train_op,
         logits=((1.,), (1.,), (3.,)))
     self._assert_output_alternatives(model_fn_ops)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #21
0
 def testRegressionEvalMode(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default(), session.Session():
     model_fn_ops = head.create_model_fn_ops(
         {},
         labels=((1.,), (1.,), (3.,)),
         mode=model_fn.ModeKeys.EVAL,
         train_op_fn=_noop_train_op,
         logits=((0.,), (1.,), (1.,)))
     self._assert_output_alternatives(model_fn_ops)
     self.assertIsNone(model_fn_ops.train_op)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #22
0
 def testRegressionWithWeights(self):
   head = head_lib._regression_head(weight_column_name="label_weight")
   with ops.Graph().as_default(), session.Session():
     weights = ((2.,), (5.,), (0.,))
     model_fn_ops = head.create_model_fn_ops(
         features={"label_weight": weights},
         labels=((0.,), (1.,), (1.,)),
         mode=model_fn.ModeKeys.TRAIN,
         train_op_fn=_noop_train_op,
         logits=((1.,), (1.,), (3.,)))
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     _assert_metrics(self, 2. / len(weights), {"loss": 2. / np.sum(weights)},
                     model_fn_ops)
Exemple #23
0
 def testRegressionWithLabelName(self):
   label_name = "my_label"
   head = head_lib._regression_head(label_name=label_name)
   with ops.Graph().as_default(), session.Session():
     model_fn_ops = head.create_model_fn_ops(
         {},
         labels={label_name: ((0.,), (1.,), (1.,))},
         mode=model_fn.ModeKeys.TRAIN,
         train_op_fn=_noop_train_op,
         logits=((1.,), (1.,), (3.,)))
     self._assert_output_alternatives(model_fn_ops)
     _assert_no_variables(self)
     _assert_summary_tags(self, ["loss"])
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #24
0
 def testRegressionEvalMode(self):
     head = head_lib._regression_head()
     with tf.Graph().as_default(), tf.Session():
         prediction = tf.constant([[1.], [1.], [3.]])
         labels = tf.constant([[0.], [1.], [1.]])
         model_fn_ops = head.head_ops({},
                                      labels,
                                      tf.contrib.learn.ModeKeys.EVAL,
                                      _noop_train_op,
                                      logits=prediction)
         self.assertIsNone(model_fn_ops.train_op)
         _assert_no_variables(self)
         _assert_summary_tags(self, ["loss"])
         _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #25
0
 def testRegressionErrorInSparseTensorLabels(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default():
     labels = sparse_tensor.SparseTensorValue(
         indices=((0, 0), (1, 0), (2, 0)),
         values=(0., 1., 1.),
         dense_shape=(3, 1))
     with self.assertRaisesRegexp(ValueError,
                                  "SparseTensor is not supported as labels."):
       head.create_model_fn_ops(
           {},
           labels=labels,
           mode=model_fn.ModeKeys.TRAIN,
           train_op_fn=_noop_train_op,
           logits=((1.,), (1.,), (3.,)))
Exemple #26
0
 def testRegressionWithLogitsInput(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default(), session.Session():
     model_fn_ops = head.create_model_fn_ops(
         {},
         labels=((0.,), (1.,), (1.,)),
         mode=model_fn.ModeKeys.TRAIN,
         train_op_fn=_noop_train_op,
         logits_input=((0., 0.), (0., 0.), (0., 0.)))
     w = ("logits/weights:0", "logits/biases:0")
     _assert_variables(
         self, expected_global=w, expected_model=w, expected_trainable=w)
     variables.global_variables_initializer().run()
     _assert_summary_tags(self, ["loss"])
     _assert_metrics(self, 2. / 3, {"loss": 2. / 3}, model_fn_ops)
Exemple #27
0
 def testRegressionErrorInSparseTensorLabels(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default():
     labels = sparse_tensor.SparseTensorValue(
         indices=((0, 0), (1, 0), (2, 0)),
         values=(0., 1., 1.),
         dense_shape=(3, 1))
     with self.assertRaisesRegexp(ValueError,
                                  "Must set num_classes when passing"):
       head.create_model_fn_ops(
           {},
           labels=labels,
           mode=model_fn.ModeKeys.TRAIN,
           train_op_fn=_noop_train_op,
           logits=((1.,), (1.,), (3.,)))
Exemple #28
0
 def testRegressionWithWeights(self):
   head = head_lib._regression_head(
       weight_column_name="label_weight")
   with tf.Graph().as_default(), tf.Session():
     weights = ((2.,), (5.,), (0.,))
     features = {"label_weight": tf.constant(weights)}
     prediction = tf.constant([[1.], [1.], [3.]])
     labels = tf.constant([[0.], [1.], [1.]])
     model_fn_ops = head.head_ops(features, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=prediction)
     _assert_no_variables(self)
     _assert_metrics(self, 2. / len(weights), {
         "loss": 2. / np.sum(weights)
     }, model_fn_ops)
  def testRegression(self):
    head = head_lib._regression_head()
    with tf.Graph().as_default(), tf.Session() as sess:
      prediction = tf.constant([[1.], [1.], [3.]])
      labels = tf.constant([[0.], [1.], [1.]])
      model_fn_ops = head.head_ops({}, labels,
                                   tf.contrib.learn.ModeKeys.TRAIN,
                                   _noop_train_op, logits=prediction)
      self._assert_metrics(model_fn_ops)
      _assert_no_variables(self)
      self.assertAlmostEqual(5. / 3, sess.run(model_fn_ops.loss))

      model_fn_ops = head.head_ops({}, labels,
                                   tf.contrib.learn.ModeKeys.EVAL,
                                   _noop_train_op, logits=prediction)
      self.assertIsNone(model_fn_ops.train_op)
Exemple #30
0
 def testRegressionWithCenteredBias(self):
   head = head_lib._regression_head(enable_centered_bias=True)
   with tf.Graph().as_default(), tf.Session():
     prediction = tf.constant([[1.], [1.], [3.]])
     labels = tf.constant([[0.], [1.], [1.]])
     model_fn_ops = head.head_ops({}, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=prediction)
     _assert_variables(self, expected_global=(
         "centered_bias_weight:0",
         "centered_bias_weight/Adagrad:0",
     ), expected_trainable=(
         "centered_bias_weight:0",
     ))
     tf.global_variables_initializer().run()
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #31
0
 def testErrorInSparseTensorLabels(self):
   head = head_lib._regression_head()
   with ops.Graph().as_default():
     prediction = constant_op.constant([[1.], [1.], [3.]])
     labels = sparse_tensor.SparseTensor(
         indices=constant_op.constant(
             [[0, 0], [1, 0], [2, 0]], dtype=dtypes.int64),
         values=constant_op.constant([0., 1., 1.]),
         dense_shape=[3, 1])
     with self.assertRaisesRegexp(ValueError,
                                  "SparseTensor is not supported as labels."):
       head.head_ops(
           {},
           labels,
           model_fn.ModeKeys.TRAIN,
           _noop_train_op,
           logits=prediction)
Exemple #32
0
 def testRegressionWithCenteredBias(self):
   head = head_lib._regression_head(enable_centered_bias=True)
   with ops.Graph().as_default(), session.Session():
     model_fn_ops = head.create_model_fn_ops(
         {},
         labels=((0.,), (1.,), (1.,)),
         mode=model_fn.ModeKeys.TRAIN,
         train_op_fn=_noop_train_op,
         logits=((1.,), (1.,), (3.,)))
     _assert_variables(
         self,
         expected_global=(
             "centered_bias_weight:0",
             "centered_bias_weight/Adagrad:0",),
         expected_trainable=("centered_bias_weight:0",))
     variables.global_variables_initializer().run()
     _assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
     _assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
Exemple #33
0
 def testRegressionWithCenteredBias(self):
   head = head_lib._regression_head(
       weight_column_name="label_weight", enable_centered_bias=True)
   with tf.Graph().as_default(), tf.Session() as sess:
     features = {"label_weight": tf.constant([[2.], [5.], [0.]])}
     prediction = tf.constant([[1.], [1.], [3.]])
     labels = tf.constant([[0.], [1.], [1.]])
     model_fn_ops = head.head_ops(features, labels,
                                  tf.contrib.learn.ModeKeys.TRAIN,
                                  _noop_train_op, logits=prediction)
     self._assert_metrics(model_fn_ops)
     _assert_variables(self, expected_global=(
         "centered_bias_weight:0",
         "centered_bias_weight/Adagrad:0",
     ), expected_trainable=(
         "centered_bias_weight:0",
     ))
     tf.global_variables_initializer().run()
     self.assertAlmostEqual(2. / 3, sess.run(model_fn_ops.loss), places=3)
Exemple #34
0
    def __init__(self,
                 model_dir=None,
                 label_dimension=1,
                 weight_column_name=None,
                 config=None,
                 feature_engineering_fn=None):
        """Initializes a DebugRegressor instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      label_dimension: Number of regression targets per example. This is the
        size of the last dimension of the labels and logits `Tensor` objects
        (typically, these have shape `[batch_size, label_dimension]`).
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and returns
                        features and labels which will be fed into the model.
    Returns:
      A `DebugRegressor` estimator.
    """

        params = {
            "head":
            head_lib._regression_head(  # pylint: disable=protected-access
                weight_column_name=weight_column_name,
                label_dimension=label_dimension,
                enable_centered_bias=True)
        }

        super(DebugRegressor,
              self).__init__(model_fn=debug_model_fn,
                             model_dir=model_dir,
                             config=config,
                             params=params,
                             feature_engineering_fn=feature_engineering_fn)
Exemple #35
0
  def __init__(self,
               model_dir=None,
               label_dimension=1,
               weight_column_name=None,
               config=None,
               feature_engineering_fn=None):
    """Initializes a DebugRegressor instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      label_dimension: Number of regression targets per example. This is the
        size of the last dimension of the labels and logits `Tensor` objects
        (typically, these have shape `[batch_size, label_dimension]`).
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and returns
                        features and labels which will be fed into the model.
    Returns:
      A `DebugRegressor` estimator.
    """

    params = {
        "head":
            head_lib._regression_head(  # pylint: disable=protected-access
                weight_column_name=weight_column_name,
                label_dimension=label_dimension,
                enable_centered_bias=True)
    }

    super(DebugRegressor, self).__init__(
        model_fn=debug_model_fn,
        model_dir=model_dir,
        config=config,
        params=params,
        feature_engineering_fn=feature_engineering_fn)
Exemple #36
0
  def __init__(self,  # _joint_weights: pylint: disable=invalid-name
               feature_columns,
               model_dir=None,
               weight_column_name=None,
               optimizer=None,
               gradient_clip_norm=None,
               enable_centered_bias=False,
               label_dimension=1,
               _joint_weights=False,
               config=None,
               feature_engineering_fn=None):
    """Construct a `LinearRegressor` estimator object.

    Args:
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph, etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Ftrl optimizer.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      label_dimension: Dimension of the label for multilabels. Defaults to 1.
      _joint_weights: If True use a single (possibly partitioned) variable to
        store the weights. It's faster, but requires all feature columns are
        sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and
                        returns features and labels which will be fed
                        into the model.

    Returns:
      A `LinearRegressor` estimator.
    """
    self._feature_columns = tuple(feature_columns or [])
    assert self._feature_columns
    self._optimizer = optimizer

    chief_hook = None
    if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
        enable_centered_bias):
      enable_centered_bias = False
      logging.warning("centered_bias is not supported with SDCA, "
                      "please disable it explicitly.")
    head = head_lib._regression_head(  # pylint: disable=protected-access
        weight_column_name=weight_column_name,
        label_dimension=label_dimension,
        enable_centered_bias=enable_centered_bias)
    params = {
        "head": head,
        "feature_columns": feature_columns,
        "optimizer": optimizer,
    }

    if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
      assert label_dimension == 1, "SDCA only applies for label_dimension=1."
      assert not _joint_weights, ("_joint_weights is incompatible with"
                                  " SDCAOptimizer.")

      model_fn = sdca_model_fn
      # The model_fn passes the model parameters to the chief_hook. We then use
      # the hook to update weights and shrink step only on the chief.
      chief_hook = _SdcaUpdateWeightsHook()
      params.update({
          "weight_column_name": weight_column_name,
          "update_weights_hook": chief_hook,
      })
    else:
      model_fn = _linear_model_fn
      params.update({
          "gradient_clip_norm": gradient_clip_norm,
          "joint_weights": _joint_weights,
      })

    super(LinearRegressor, self).__init__(
        model_fn=model_fn,
        model_dir=model_dir,
        config=config,
        params=params,
        feature_engineering_fn=feature_engineering_fn)
Exemple #37
0
    def __init__(self,
                 hidden_units,
                 feature_columns,
                 model_dir=None,
                 weight_column_name=None,
                 optimizer=None,
                 activation_fn=nn.relu,
                 dropout=None,
                 gradient_clip_norm=None,
                 enable_centered_bias=False,
                 config=None,
                 feature_engineering_fn=None,
                 label_dimension=1,
                 embedding_lr_multipliers=None):
        """Initializes a `DNNRegressor` instance.

    Args:
      hidden_units: List of hidden units per layer. All layers are fully
        connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
        has 32.
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Adagrad optimizer.
      activation_fn: Activation function applied to each layer. If `None`, will
        use `tf.nn.relu`.
      dropout: When not `None`, the probability we will drop out a given
        coordinate.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and
                        returns features and labels which will be fed
                        into the model.
      label_dimension: Dimension of the label for multilabels. Defaults to 1.
      embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to
          a `float` multiplier. Multiplier will be used to multiply with
          learning rate for the embedding variables.

    Returns:
      A `DNNRegressor` estimator.
    """
        self._feature_columns = feature_columns

        self._estimator = estimator.Estimator(
            model_fn=_dnn_model_fn,
            model_dir=model_dir,
            config=config,
            params={
                "head":
                head_lib._regression_head(  # pylint: disable=protected-access
                    label_dimension=label_dimension,
                    weight_column_name=weight_column_name,
                    enable_centered_bias=enable_centered_bias),
                "hidden_units":
                hidden_units,
                "feature_columns":
                feature_columns,
                "optimizer":
                optimizer,
                "activation_fn":
                activation_fn,
                "dropout":
                dropout,
                "gradient_clip_norm":
                gradient_clip_norm,
                "num_ps_replicas":
                config.num_ps_replicas if config else 0,
                "embedding_lr_multipliers":
                embedding_lr_multipliers,
            },
            feature_engineering_fn=feature_engineering_fn)
    def __init__(
            self,  # _joint_linear_weights pylint: disable=invalid-name
            model_dir=None,
            weight_column_name=None,
            linear_feature_columns=None,
            linear_optimizer=None,
            _joint_linear_weights=False,
            dnn_feature_columns=None,
            dnn_optimizer=None,
            dnn_hidden_units=None,
            dnn_activation_fn=nn.relu,
            dnn_dropout=None,
            gradient_clip_norm=None,
            enable_centered_bias=False,
            label_dimension=1,
            config=None,
            feature_engineering_fn=None,
            embedding_lr_multipliers=None,
            input_layer_min_slice_size=None):
        """Initializes a DNNLinearCombinedRegressor instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      linear_feature_columns: An iterable containing all the feature columns
        used by linear part of the model. All items in the set must be
        instances of classes derived from `FeatureColumn`.
      linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the linear part of the model. If `None`, will use a FTRL optimizer.
      _joint_linear_weights: If True a single (possibly partitioned) variable
        will be used to store the linear model weights. It's faster, but
        requires that all columns are sparse and have the 'sum' combiner.
      dnn_feature_columns: An iterable containing all the feature columns used
        by deep part of the model. All items in the set must be instances of
        classes derived from `FeatureColumn`.
      dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the deep part of the model. If `None`, will use an Adagrad optimizer.
      dnn_hidden_units: List of hidden units per layer. All layers are fully
        connected.
      dnn_activation_fn: Activation function applied to each layer. If None,
        will use `tf.nn.relu`.
      dnn_dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      label_dimension: Number of regression targets per example. This is the
        size of the last dimension of the labels and logits `Tensor` objects
        (typically, these have shape `[batch_size, label_dimension]`).
      config: RunConfig object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and
                        returns features and labels which will be fed
                        into the model.
      embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
          a `float` multiplier. Multiplier will be used to multiply with
          learning rate for the embedding variables.
      input_layer_min_slice_size: Optional. The min slice size of input layer
          partitions. If not provided, will use the default of 64M.


    Raises:
      ValueError: If both linear_feature_columns and dnn_features_columns are
        empty at the same time.
    """
        linear_feature_columns = linear_feature_columns or []
        dnn_feature_columns = dnn_feature_columns or []
        self._feature_columns = linear_feature_columns + dnn_feature_columns
        if not self._feature_columns:
            raise ValueError(
                "Either linear_feature_columns or dnn_feature_columns "
                "must be defined.")
        head = head_lib._regression_head(  # pylint: disable=protected-access
            weight_column_name=weight_column_name,
            label_dimension=label_dimension,
            enable_centered_bias=enable_centered_bias)
        super(DNNLinearCombinedRegressor,
              self).__init__(model_fn=_dnn_linear_combined_model_fn,
                             model_dir=model_dir,
                             config=config,
                             params={
                                 "head":
                                 head,
                                 "linear_feature_columns":
                                 linear_feature_columns,
                                 "linear_optimizer":
                                 linear_optimizer,
                                 "joint_linear_weights":
                                 _joint_linear_weights,
                                 "dnn_feature_columns":
                                 dnn_feature_columns,
                                 "dnn_optimizer":
                                 dnn_optimizer,
                                 "dnn_hidden_units":
                                 dnn_hidden_units,
                                 "dnn_activation_fn":
                                 dnn_activation_fn,
                                 "dnn_dropout":
                                 dnn_dropout,
                                 "gradient_clip_norm":
                                 gradient_clip_norm,
                                 "embedding_lr_multipliers":
                                 embedding_lr_multipliers,
                                 "input_layer_min_slice_size":
                                 input_layer_min_slice_size,
                             },
                             feature_engineering_fn=feature_engineering_fn)
  def __init__(self,  # _joint_linear_weights pylint: disable=invalid-name
               model_dir=None,
               weight_column_name=None,
               linear_feature_columns=None,
               linear_optimizer=None,
               _joint_linear_weights=False,
               dnn_feature_columns=None,
               dnn_optimizer=None,
               dnn_hidden_units=None,
               dnn_activation_fn=nn.relu,
               dnn_dropout=None,
               gradient_clip_norm=None,
               enable_centered_bias=None,
               target_dimension=1,
               config=None,
               feature_engineering_fn=None):
    """Initializes a DNNLinearCombinedRegressor instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      linear_feature_columns: An iterable containing all the feature columns
        used by linear part of the model. All items in the set must be
        instances of classes derived from `FeatureColumn`.
      linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the linear part of the model. If `None`, will use a FTRL optimizer.
      _joint_linear_weights: If True a single (possibly partitioned) variable
        will be used to store the linear model weights. It's faster, but
        requires that all columns are sparse and have the 'sum' combiner.
      dnn_feature_columns: An iterable containing all the feature columns used
        by deep part of the model. All items in the set must be instances of
        classes derived from `FeatureColumn`.
      dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the deep part of the model. If `None`, will use an Adagrad optimizer.
      dnn_hidden_units: List of hidden units per layer. All layers are fully
        connected.
      dnn_activation_fn: Activation function applied to each layer. If None,
        will use `tf.nn.relu`.
      dnn_dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      target_dimension: TODO(zakaria): dimension of the target for multilabels.
      config: RunConfig object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        targets which are the output of `input_fn` and
                        returns features and targets which will be fed
                        into the model.

    Raises:
      ValueError: If both linear_feature_columns and dnn_features_columns are
        empty at the same time.
    """
    if enable_centered_bias is None:
      enable_centered_bias = True
      _changing_default_center_bias()
    # pylint: disable=protected-access
    head = head_lib._regression_head(
        weight_column_name=weight_column_name,
        target_dimension=target_dimension,
        enable_centered_bias=enable_centered_bias)
    super(DNNLinearCombinedRegressor, self).__init__(
        model_dir=model_dir,
        linear_feature_columns=linear_feature_columns,
        linear_optimizer=linear_optimizer,
        _joint_linear_weights=_joint_linear_weights,
        dnn_feature_columns=dnn_feature_columns,
        dnn_optimizer=dnn_optimizer,
        dnn_hidden_units=dnn_hidden_units,
        dnn_activation_fn=dnn_activation_fn,
        dnn_dropout=dnn_dropout,
        gradient_clip_norm=gradient_clip_norm,
        head=head,
        config=config,
        feature_engineering_fn=feature_engineering_fn,
        default_prediction_key=head_lib.PedictionKey.SCORES)
Exemple #40
0
  def __init__(self,
               hidden_units,
               feature_columns,
               model_dir=None,
               weight_column_name=None,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               enable_centered_bias=False,
               config=None,
               feature_engineering_fn=None,
               label_dimension=1,
               embedding_lr_multipliers=None):
    """Initializes a `DNNRegressor` instance.

    Args:
      hidden_units: List of hidden units per layer. All layers are fully
        connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
        has 32.
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Adagrad optimizer.
      activation_fn: Activation function applied to each layer. If `None`, will
        use `tf.nn.relu`.
      dropout: When not `None`, the probability we will drop out a given
        coordinate.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and
                        returns features and labels which will be fed
                        into the model.
      label_dimension: Dimension of the label for multilabels. Defaults to 1.
      embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to
          a `float` multiplier. Multiplier will be used to multiply with
          learning rate for the embedding variables.

    Returns:
      A `DNNRegressor` estimator.
    """
    self._feature_columns = tuple(feature_columns or [])
    self._estimator = estimator.Estimator(
        model_fn=_dnn_model_fn,
        model_dir=model_dir,
        config=config,
        params={
            "head":
                head_lib._regression_head(  # pylint: disable=protected-access
                    label_dimension=label_dimension,
                    weight_column_name=weight_column_name,
                    enable_centered_bias=enable_centered_bias),
            "hidden_units":
                hidden_units,
            "feature_columns":
                self._feature_columns,
            "optimizer":
                optimizer,
            "activation_fn":
                activation_fn,
            "dropout":
                dropout,
            "gradient_clip_norm":
                gradient_clip_norm,
            "embedding_lr_multipliers":
                embedding_lr_multipliers,
        },
        feature_engineering_fn=feature_engineering_fn)
    def __init__(
            self,  # _joint_linear_weights pylint: disable=invalid-name
            model_dir=None,
            weight_column_name=None,
            linear_feature_columns=None,
            linear_optimizer=None,
            _joint_linear_weights=False,
            dnn_feature_columns=None,
            dnn_optimizer=None,
            dnn_hidden_units=None,
            dnn_activation_fn=nn.relu,
            dnn_dropout=None,
            gradient_clip_norm=None,
            enable_centered_bias=False,
            label_dimension=1,
            config=None,
            feature_engineering_fn=None):
        """Initializes a DNNLinearCombinedRegressor instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      linear_feature_columns: An iterable containing all the feature columns
        used by linear part of the model. All items in the set must be
        instances of classes derived from `FeatureColumn`.
      linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the linear part of the model. If `None`, will use a FTRL optimizer.
      _joint_linear_weights: If True a single (possibly partitioned) variable
        will be used to store the linear model weights. It's faster, but
        requires that all columns are sparse and have the 'sum' combiner.
      dnn_feature_columns: An iterable containing all the feature columns used
        by deep part of the model. All items in the set must be instances of
        classes derived from `FeatureColumn`.
      dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the deep part of the model. If `None`, will use an Adagrad optimizer.
      dnn_hidden_units: List of hidden units per layer. All layers are fully
        connected.
      dnn_activation_fn: Activation function applied to each layer. If None,
        will use `tf.nn.relu`.
      dnn_dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      label_dimension: TODO(zakaria): dimension of the label for multilabels.
      config: RunConfig object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and
                        returns features and labels which will be fed
                        into the model.

    Raises:
      ValueError: If both linear_feature_columns and dnn_features_columns are
        empty at the same time.
    """
        head = head_lib._regression_head(  # pylint: disable=protected-access
            weight_column_name=weight_column_name,
            label_dimension=label_dimension,
            enable_centered_bias=enable_centered_bias)
        super(DNNLinearCombinedRegressor, self).__init__(
            model_dir=model_dir,
            linear_feature_columns=linear_feature_columns,
            linear_optimizer=linear_optimizer,
            _joint_linear_weights=_joint_linear_weights,
            dnn_feature_columns=dnn_feature_columns,
            dnn_optimizer=dnn_optimizer,
            dnn_hidden_units=dnn_hidden_units,
            dnn_activation_fn=dnn_activation_fn,
            dnn_dropout=dnn_dropout,
            gradient_clip_norm=gradient_clip_norm,
            head=head,
            config=config,
            feature_engineering_fn=feature_engineering_fn,
            default_prediction_key=prediction_key.PredictionKey.SCORES,
            enable_centered_bias=enable_centered_bias)
Exemple #42
0
    def __init__(
            self,  # _joint_weights: pylint: disable=invalid-name
            feature_columns,
            model_dir=None,
            weight_column_name=None,
            optimizer=None,
            gradient_clip_norm=None,
            enable_centered_bias=False,
            label_dimension=1,
            _joint_weights=False,
            config=None,
            feature_engineering_fn=None):
        """Construct a `LinearRegressor` estimator object.

    Args:
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `FeatureColumn`.
      model_dir: Directory to save model parameters, graph, etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      optimizer: An instance of `tf.Optimizer` used to train the model. If
        `None`, will use an Ftrl optimizer.
      gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        `tf.clip_by_global_norm` for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      label_dimension: Dimension of the label for multilabels. Defaults to 1.
      _joint_weights: If True use a single (possibly partitioned) variable to
        store the weights. It's faster, but requires all feature columns are
        sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
      config: `RunConfig` object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and
                        returns features and labels which will be fed
                        into the model.

    Returns:
      A `LinearRegressor` estimator.
    """
        self._feature_columns = tuple(feature_columns or [])
        assert self._feature_columns
        self._optimizer = optimizer

        chief_hook = None
        if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer)
                and enable_centered_bias):
            enable_centered_bias = False
            logging.warning("centered_bias is not supported with SDCA, "
                            "please disable it explicitly.")
        head = head_lib._regression_head(  # pylint: disable=protected-access
            weight_column_name=weight_column_name,
            label_dimension=label_dimension,
            enable_centered_bias=enable_centered_bias)
        params = {
            "head": head,
            "feature_columns": feature_columns,
            "optimizer": optimizer,
        }

        if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
            assert label_dimension == 1, "SDCA only applies for label_dimension=1."
            assert not _joint_weights, ("_joint_weights is incompatible with"
                                        " SDCAOptimizer.")

            model_fn = sdca_model_fn
            # The model_fn passes the model parameters to the chief_hook. We then use
            # the hook to update weights and shrink step only on the chief.
            chief_hook = _SdcaUpdateWeightsHook()
            params.update({
                "weight_column_name": weight_column_name,
                "update_weights_hook": chief_hook,
            })
        else:
            model_fn = _linear_model_fn
            params.update({
                "gradient_clip_norm": gradient_clip_norm,
                "joint_weights": _joint_weights,
            })

        super(LinearRegressor,
              self).__init__(model_fn=model_fn,
                             model_dir=model_dir,
                             config=config,
                             params=params,
                             feature_engineering_fn=feature_engineering_fn)
  def __init__(self,  # _joint_linear_weights pylint: disable=invalid-name
               model_dir=None,
               weight_column_name=None,
               linear_feature_columns=None,
               linear_optimizer=None,
               _joint_linear_weights=False,
               dnn_feature_columns=None,
               dnn_optimizer=None,
               dnn_hidden_units=None,
               dnn_activation_fn=nn.relu,
               dnn_dropout=None,
               gradient_clip_norm=None,
               enable_centered_bias=False,
               label_dimension=1,
               config=None,
               feature_engineering_fn=None,
               embedding_lr_multipliers=None,
               input_layer_min_slice_size=None):
    """Initializes a DNNLinearCombinedRegressor instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator
        to continue training a previously saved model.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example.
      linear_feature_columns: An iterable containing all the feature columns
        used by linear part of the model. All items in the set must be
        instances of classes derived from `FeatureColumn`.
      linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the linear part of the model. If `None`, will use a FTRL optimizer.
      _joint_linear_weights: If True a single (possibly partitioned) variable
        will be used to store the linear model weights. It's faster, but
        requires that all columns are sparse and have the 'sum' combiner.
      dnn_feature_columns: An iterable containing all the feature columns used
        by deep part of the model. All items in the set must be instances of
        classes derived from `FeatureColumn`.
      dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the deep part of the model. If `None`, will use an Adagrad optimizer.
      dnn_hidden_units: List of hidden units per layer. All layers are fully
        connected.
      dnn_activation_fn: Activation function applied to each layer. If None,
        will use `tf.nn.relu`.
      dnn_dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      label_dimension: Number of regression targets per example. This is the
        size of the last dimension of the labels and logits `Tensor` objects
        (typically, these have shape `[batch_size, label_dimension]`).
      config: RunConfig object to configure the runtime settings.
      feature_engineering_fn: Feature engineering function. Takes features and
                        labels which are the output of `input_fn` and
                        returns features and labels which will be fed
                        into the model.
      embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
          a `float` multiplier. Multiplier will be used to multiply with
          learning rate for the embedding variables.
      input_layer_min_slice_size: Optional. The min slice size of input layer
          partitions. If not provided, will use the default of 64M.


    Raises:
      ValueError: If both linear_feature_columns and dnn_features_columns are
        empty at the same time.
    """
    linear_feature_columns = linear_feature_columns or []
    dnn_feature_columns = dnn_feature_columns or []
    self._feature_columns = linear_feature_columns + dnn_feature_columns
    if not self._feature_columns:
      raise ValueError("Either linear_feature_columns or dnn_feature_columns "
                       "must be defined.")
    head = head_lib._regression_head(  # pylint: disable=protected-access
        weight_column_name=weight_column_name,
        label_dimension=label_dimension,
        enable_centered_bias=enable_centered_bias)
    super(DNNLinearCombinedRegressor, self).__init__(
        model_fn=_dnn_linear_combined_model_fn,
        model_dir=model_dir,
        config=config,
        params={
            "head": head,
            "linear_feature_columns": linear_feature_columns,
            "linear_optimizer": linear_optimizer,
            "joint_linear_weights": _joint_linear_weights,
            "dnn_feature_columns": dnn_feature_columns,
            "dnn_optimizer": dnn_optimizer,
            "dnn_hidden_units": dnn_hidden_units,
            "dnn_activation_fn": dnn_activation_fn,
            "dnn_dropout": dnn_dropout,
            "gradient_clip_norm": gradient_clip_norm,
            "embedding_lr_multipliers": embedding_lr_multipliers,
            "input_layer_min_slice_size": input_layer_min_slice_size,
        },
        feature_engineering_fn=feature_engineering_fn)