def test_train_create_loss_logits_tensor_multi_dim(self):
    """Tests create_loss with multi-dimensional logits of shape [2, 2, 5]."""
    head1 = head_lib.regression_head(label_dimension=2, name='head1')
    head2 = head_lib.regression_head(label_dimension=3, name='head2')
    multi_head = multi_head_lib.multi_head([head1, head2])

    logits = np.array(
        [[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
         [[-1.5, 1.5, -2., 2., -2.], [-1.5, 1.5, -2., 2., -2.]]],
        dtype=np.float32)
    labels = {
        'head1': np.array([[[1., 0.], [1., 0.]],
                           [[1.5, 1.5], [1.5, 1.5]]], dtype=np.float32),
        'head2': np.array([[[0., 1., 0.], [0., 1., 0.]],
                           [[2., 2., 0.], [2., 2., 0.]]], dtype=np.float32),
    }
    # Loss for the first head:
    # loss1 = ((1+1)^2 + (0-1)^2 + (1+1)^2 + (0-1)^2 +
    #          (1.5+1.5)^2 + (1.5-1.5)^2 + (1.5+1.5)^2 + (1.5-1.5)^2) / 8
    #       = 3.5
    # Loss for the second head:
    # loss2 = ((0-2)^2 + (1+2)^2 + (0-2)^2 + (0-2)^2 + (1+2)^2 + (0-2)^2 +
    #          (2+2)^2 + (2-2)^2 + (0+2)^2 + (2+2)^2 + (2-2)^2 + (0+2)^2) / 12
    #       = 6.167
    expected_training_loss = 3.5 + 6.167

    training_loss = multi_head.create_loss(
        features={},
        mode=model_fn.ModeKeys.TRAIN,
        logits=logits,
        labels=labels)[0]
    tol = 1e-3
    with self.test_session():
      self.assertAllClose(
          expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
  def test_train_create_loss_logits_tensor_multi_dim(self):
    """Tests create_loss with multi-dimensional logits of shape [2, 2, 5]."""
    head1 = head_lib.regression_head(label_dimension=2, name='head1')
    head2 = head_lib.regression_head(label_dimension=3, name='head2')
    multi_head = multi_head_lib.multi_head([head1, head2])

    logits = np.array(
        [[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
         [[-1.5, 1.5, -2., 2., -2.], [-1.5, 1.5, -2., 2., -2.]]],
        dtype=np.float32)
    labels = {
        'head1': np.array([[[1., 0.], [1., 0.]],
                           [[1.5, 1.5], [1.5, 1.5]]], dtype=np.float32),
        'head2': np.array([[[0., 1., 0.], [0., 1., 0.]],
                           [[2., 2., 0.], [2., 2., 0.]]], dtype=np.float32),
    }
    # Loss for the first head:
    # loss1 = ((1+1)^2 + (0-1)^2 + (1+1)^2 + (0-1)^2 +
    #          (1.5+1.5)^2 + (1.5-1.5)^2 + (1.5+1.5)^2 + (1.5-1.5)^2) / 8
    #       = 3.5
    # Loss for the second head:
    # loss2 = ((0-2)^2 + (1+2)^2 + (0-2)^2 + (0-2)^2 + (1+2)^2 + (0-2)^2 +
    #          (2+2)^2 + (2-2)^2 + (0+2)^2 + (2+2)^2 + (2-2)^2 + (0+2)^2) / 12
    #       = 6.167
    expected_training_loss = 3.5 + 6.167

    training_loss = multi_head.create_loss(
        features={},
        mode=model_fn.ModeKeys.TRAIN,
        logits=logits,
        labels=labels)[0]
    tol = 1e-3
    with self.cached_session():
      self.assertAllClose(
          expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
  def test_predict_two_heads_logits_tensor_multi_dim(self):
    """Tests predict with multi-dimensional logits of shape [2, 2, 5]."""
    head1 = head_lib.regression_head(label_dimension=2, name='head1')
    head2 = head_lib.regression_head(label_dimension=3, name='head2')
    multi_head = multi_head_lib.multi_head([head1, head2])

    logits = np.array(
        [[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
         [[-1.5, 1., -3., 2., -2.], [-1.5, 1., -3., 2., -2.]]],
        dtype=np.float32)
    expected_logits1 = np.array(
        [[[-1., 1.], [-1., 1.]],
         [[-1.5, 1.], [-1.5, 1.]]],
        dtype=np.float32)
    expected_logits2 = np.array(
        [[[2., -2., 2.], [2., -2., 2.]],
         [[-3., 2., -2.], [-3., 2., -2.]]],
        dtype=np.float32)

    spec = multi_head.create_estimator_spec(
        features={'x': np.array(((42,),), dtype=np.int32)},
        mode=model_fn.ModeKeys.PREDICT,
        logits=logits)

    self.assertItemsEqual(
        (_DEFAULT_SERVING_KEY, 'predict', 'head1', 'head1/regression',
         'head1/predict', 'head2', 'head2/regression', 'head2/predict'),
        spec.export_outputs.keys())

    # Assert predictions and export_outputs.
    with self.cached_session() as sess:
      _initialize_variables(self, spec.scaffold)
      self.assertIsNone(spec.scaffold.summary_op)
      predictions = sess.run(spec.predictions)
      self.assertAllClose(
          expected_logits1,
          predictions[('head1', prediction_keys.PredictionKeys.PREDICTIONS)])
      self.assertAllClose(
          expected_logits2,
          predictions[('head2', prediction_keys.PredictionKeys.PREDICTIONS)])

      self.assertAllClose(
          expected_logits1,
          sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].value))
      self.assertAllClose(
          expected_logits1,
          sess.run(spec.export_outputs['head1'].value))
      self.assertAllClose(
          expected_logits2,
          sess.run(spec.export_outputs['head2'].value))
  def test_predict_two_heads_logits_tensor_multi_dim(self):
    """Tests predict with multi-dimensional logits of shape [2, 2, 5]."""
    head1 = head_lib.regression_head(label_dimension=2, name='head1')
    head2 = head_lib.regression_head(label_dimension=3, name='head2')
    multi_head = multi_head_lib.multi_head([head1, head2])

    logits = np.array(
        [[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
         [[-1.5, 1., -3., 2., -2.], [-1.5, 1., -3., 2., -2.]]],
        dtype=np.float32)
    expected_logits1 = np.array(
        [[[-1., 1.], [-1., 1.]],
         [[-1.5, 1.], [-1.5, 1.]]],
        dtype=np.float32)
    expected_logits2 = np.array(
        [[[2., -2., 2.], [2., -2., 2.]],
         [[-3., 2., -2.], [-3., 2., -2.]]],
        dtype=np.float32)

    spec = multi_head.create_estimator_spec(
        features={'x': np.array(((42,),), dtype=np.int32)},
        mode=model_fn.ModeKeys.PREDICT,
        logits=logits)

    self.assertItemsEqual(
        (_DEFAULT_SERVING_KEY, 'predict', 'head1', 'regression/head1',
         'predict/head1', 'head2', 'regression/head2', 'predict/head2'),
        spec.export_outputs.keys())

    # Assert predictions and export_outputs.
    with self.test_session() as sess:
      _initialize_variables(self, spec.scaffold)
      self.assertIsNone(spec.scaffold.summary_op)
      predictions = sess.run(spec.predictions)
      self.assertAllClose(
          expected_logits1,
          predictions[('head1', prediction_keys.PredictionKeys.PREDICTIONS)])
      self.assertAllClose(
          expected_logits2,
          predictions[('head2', prediction_keys.PredictionKeys.PREDICTIONS)])

      self.assertAllClose(
          expected_logits1,
          sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].value))
      self.assertAllClose(
          expected_logits1,
          sess.run(spec.export_outputs['head1'].value))
      self.assertAllClose(
          expected_logits2,
          sess.run(spec.export_outputs['head2'].value))
def _get_default_head(params, weights_name, output_type, name=None):
  """Creates a default head based on a type of a problem."""
  if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
    if params.regression:
      return head_lib.regression_head(
          weight_column_name=weights_name,
          label_dimension=params.num_outputs,
          enable_centered_bias=False,
          head_name=name)
    else:
      return head_lib.multi_class_head(
          params.num_classes,
          weight_column_name=weights_name,
          enable_centered_bias=False,
          head_name=name)
  else:
    if params.regression:
      return core_head_lib.regression_head(
          weight_column=weights_name,
          label_dimension=params.num_outputs,
          name=name,
          loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
    else:
      if params.num_classes == 2:
        return core_head_lib.binary_classification_head(
            weight_column=weights_name,
            name=name,
            loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
      else:
        return core_head_lib.multi_class_head(
            n_classes=params.num_classes,
            weight_column=weights_name,
            name=name,
            loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
Exemple #6
0
  def _test_complete_flow(
      self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
      label_dimension, batch_size):
    feature_columns = [
        feature_column.numeric_column('x', shape=(input_dimension,))]
    est = linear.LinearEstimator(
        head=head_lib.regression_head(label_dimension=label_dimension),
        feature_columns=feature_columns,
        model_dir=self._model_dir)

    # TRAIN
    num_steps = 10
    est.train(train_input_fn, steps=num_steps)

    # EVALUTE
    scores = est.evaluate(eval_input_fn)
    self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
    self.assertIn('loss', six.iterkeys(scores))

    # PREDICT
    predictions = np.array([
        x[prediction_keys.PredictionKeys.PREDICTIONS]
        for x in est.predict(predict_input_fn)
    ])
    self.assertAllEqual((batch_size, label_dimension), predictions.shape)

    # EXPORT
    feature_spec = feature_column.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = est.export_savedmodel(tempfile.mkdtemp(),
                                       serving_input_receiver_fn)
    self.assertTrue(gfile.Exists(export_dir))
Exemple #7
0
def _get_default_head(params, weights_name, output_type, name=None):
    """Creates a default head based on a type of a problem."""
    if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
        if params.regression:
            return head_lib.regression_head(weight_column_name=weights_name,
                                            label_dimension=params.num_outputs,
                                            enable_centered_bias=False,
                                            head_name=name)
        else:
            return head_lib.multi_class_head(params.num_classes,
                                             weight_column_name=weights_name,
                                             enable_centered_bias=False,
                                             head_name=name)
    else:
        if params.regression:
            return core_head_lib.regression_head(
                weight_column=weights_name,
                label_dimension=params.num_outputs,
                name=name,
                loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
        else:
            if params.num_classes == 2:
                return core_head_lib.binary_classification_head(
                    weight_column=weights_name,
                    name=name,
                    loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
            else:
                return core_head_lib.multi_class_head(
                    n_classes=params.num_classes,
                    weight_column=weights_name,
                    name=name,
                    loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
Exemple #8
0
    def _test_complete_flow(self, train_input_fn, eval_input_fn,
                            predict_input_fn, input_dimension, label_dimension,
                            batch_size):
        feature_columns = [
            feature_column.numeric_column('x', shape=(input_dimension, ))
        ]
        est = linear.LinearEstimator(
            head=head_lib.regression_head(label_dimension=label_dimension),
            feature_columns=feature_columns,
            model_dir=self._model_dir)

        # TRAIN
        num_steps = 10
        est.train(train_input_fn, steps=num_steps)

        # EVALUTE
        scores = est.evaluate(eval_input_fn)
        self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
        self.assertIn('loss', six.iterkeys(scores))

        # PREDICT
        predictions = np.array([
            x[prediction_keys.PredictionKeys.PREDICTIONS]
            for x in est.predict(predict_input_fn)
        ])
        self.assertAllEqual((batch_size, label_dimension), predictions.shape)

        # EXPORT
        feature_spec = feature_column.make_parse_example_spec(feature_columns)
        serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
            feature_spec)
        export_dir = est.export_savedmodel(tempfile.mkdtemp(),
                                           serving_input_receiver_fn)
        self.assertTrue(gfile.Exists(export_dir))
def _linear_estimator_fn(
    weight_column=None, label_dimension=1, *args, **kwargs):
  """Returns a LinearEstimator that uses regression_head."""
  return linear.LinearEstimator(
      head=head_lib.regression_head(
          weight_column=weight_column, label_dimension=label_dimension),
      *args, **kwargs)
Exemple #10
0
def _dnn_estimator_fn(weight_column=None, label_dimension=1, *args, **kwargs):  # pylint: disable=keyword-arg-before-vararg
  """Returns a DNNEstimator that uses regression_head."""
  return dnn.DNNEstimator(
      head=head_lib.regression_head(
          weight_column=weight_column, label_dimension=label_dimension,
          # Tests in core (from which this test inherits) test the sum loss.
          loss_reduction=losses.Reduction.SUM),
      *args, **kwargs)
Exemple #11
0
def _dnn_estimator_fn(weight_column=None, label_dimension=1, *args, **kwargs):
  """Returns a DNNEstimator that uses regression_head."""
  return dnn.DNNEstimator(
      head=head_lib.regression_head(
          weight_column=weight_column, label_dimension=label_dimension,
          # Tests in core (from which this test inherits) test the sum loss.
          loss_reduction=losses.Reduction.SUM),
      *args, **kwargs)
Exemple #12
0
def _linear_estimator_fn(weight_column=None,
                         label_dimension=1,
                         *args,
                         **kwargs):
    """Returns a LinearEstimator that uses regression_head."""
    return linear.LinearEstimator(head=head_lib.regression_head(
        weight_column=weight_column, label_dimension=label_dimension),
                                  *args,
                                  **kwargs)
Exemple #13
0
def _linear_estimator_fn(
    weight_column=None, label_dimension=1, *args, **kwargs):
  """Returns a LinearEstimator that uses regression_head."""
  return linear.LinearEstimator(
      head=head_lib.regression_head(
          weight_column=weight_column, label_dimension=label_dimension,
          # Tests in core (from which this test inherits) test the sum loss.
          loss_reduction=losses.Reduction.SUM),
      *args, **kwargs)
    def test_train_create_loss_logits_tensor_multi_dim(self):
        """Tests create_loss with multi-dimensional logits of shape [2, 2, 5]."""
        head1 = head_lib.regression_head(label_dimension=2, name='head1')
        head2 = head_lib.regression_head(label_dimension=3, name='head2')
        multi_head = multi_head_lib.multi_head([head1, head2])

        logits = np.array(
            [[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
             [[-1.5, 1.5, -2., 2., -2.], [-1.5, 1.5, -2., 2., -2.]]],
            dtype=np.float32)
        labels = {
            'head1':
            np.array([[[1., 0.], [1., 0.]], [[1.5, 1.5], [1.5, 1.5]]],
                     dtype=np.float32),
            'head2':
            np.array(
                [[[0., 1., 0.], [0., 1., 0.]], [[2., 2., 0.], [2., 2., 0.]]],
                dtype=np.float32),
        }
        # Loss for the first head:
        # loss1 = (1+1)^2 + (0-1)^2 + (1+1)^2 + (0-1)^2 +
        #         (1.5+1.5)^2 + (1.5-1.5)^2 + (1.5+1.5)^2 + (1.5-1.5)^2
        #       = 28
        # Loss for the second head:
        # loss2 = (0-2)^2 + (1+2)^2 + (0-2)^2 + (0-2)^2 + (1+2)^2 + (0-2)^2 +
        #         (2+2)^2 + (2-2)^2 + (0+2)^2 + (2+2)^2 + (2-2)^2 + (0+2)^2
        #       = 74
        expected_weighted_sum_loss = 28. + 74.

        weighted_sum_loss, example_weight_sum, _ = multi_head.create_loss(
            features={},
            mode=model_fn.ModeKeys.TRAIN,
            logits=logits,
            labels=labels)
        tol = 1e-3
        with self.test_session():
            self.assertAllClose(expected_weighted_sum_loss,
                                weighted_sum_loss.eval(),
                                rtol=tol,
                                atol=tol)
            self.assertAllClose(2. * 2. * 5.,
                                example_weight_sum.eval(),
                                rtol=tol,
                                atol=tol)
Exemple #15
0
def _linear_only_estimator_fn(feature_columns,
                              model_dir=None,
                              label_dimension=1,
                              weight_column=None,
                              optimizer='Ftrl',
                              config=None,
                              partitioner=None):
    return dnn_linear_combined.DNNLinearCombinedEstimator(
        head=head_lib.regression_head(weight_column=weight_column,
                                      label_dimension=label_dimension),
        model_dir=model_dir,
        linear_feature_columns=feature_columns,
        linear_optimizer=optimizer,
        input_layer_partitioner=partitioner,
        config=config)
def _linear_only_estimator_fn(
    feature_columns,
    model_dir=None,
    label_dimension=1,
    weight_column=None,
    optimizer='Ftrl',
    config=None,
    partitioner=None):
  return dnn_linear_combined.DNNLinearCombinedEstimator(
      head=head_lib.regression_head(
          weight_column=weight_column, label_dimension=label_dimension),
      model_dir=model_dir,
      linear_feature_columns=feature_columns,
      linear_optimizer=optimizer,
      input_layer_partitioner=partitioner,
      config=config)
def _linear_only_estimator_fn(
    feature_columns,
    model_dir=None,
    label_dimension=1,
    weight_column=None,
    optimizer='Ftrl',
    config=None,
    partitioner=None):
  return dnn_linear_combined.DNNLinearCombinedEstimator(
      head=head_lib.regression_head(
          weight_column=weight_column, label_dimension=label_dimension,
          # Tests in core (from which this test inherits) test the sum loss.
          loss_reduction=losses.Reduction.SUM),
      model_dir=model_dir,
      linear_feature_columns=feature_columns,
      linear_optimizer=optimizer,
      input_layer_partitioner=partitioner,
      config=config)
Exemple #18
0
def _dnn_only_estimator_fn(hidden_units,
                           feature_columns,
                           model_dir=None,
                           label_dimension=1,
                           weight_column=None,
                           optimizer='Adagrad',
                           activation_fn=nn.relu,
                           dropout=None,
                           input_layer_partitioner=None,
                           config=None):
    return dnn_linear_combined.DNNLinearCombinedEstimator(
        head=head_lib.regression_head(weight_column=weight_column,
                                      label_dimension=label_dimension),
        model_dir=model_dir,
        dnn_feature_columns=feature_columns,
        dnn_optimizer=optimizer,
        dnn_hidden_units=hidden_units,
        dnn_activation_fn=activation_fn,
        dnn_dropout=dropout,
        input_layer_partitioner=input_layer_partitioner,
        config=config)
def _dnn_only_estimator_fn(
    hidden_units,
    feature_columns,
    model_dir=None,
    label_dimension=1,
    weight_column=None,
    optimizer='Adagrad',
    activation_fn=nn.relu,
    dropout=None,
    input_layer_partitioner=None,
    config=None):
  return dnn_linear_combined.DNNLinearCombinedEstimator(
      head=head_lib.regression_head(
          weight_column=weight_column, label_dimension=label_dimension),
      model_dir=model_dir,
      dnn_feature_columns=feature_columns,
      dnn_optimizer=optimizer,
      dnn_hidden_units=hidden_units,
      dnn_activation_fn=activation_fn,
      dnn_dropout=dropout,
      input_layer_partitioner=input_layer_partitioner,
      config=config)
def _dnn_only_estimator_fn(
    hidden_units,
    feature_columns,
    model_dir=None,
    label_dimension=1,
    weight_column=None,
    optimizer='Adagrad',
    activation_fn=nn.relu,
    dropout=None,
    input_layer_partitioner=None,
    config=None):
  return dnn_linear_combined.DNNLinearCombinedEstimator(
      head=head_lib.regression_head(
          weight_column=weight_column, label_dimension=label_dimension,
          # Tests in core (from which this test inherits) test the sum loss.
          loss_reduction=losses.Reduction.SUM),
      model_dir=model_dir,
      dnn_feature_columns=feature_columns,
      dnn_optimizer=optimizer,
      dnn_hidden_units=hidden_units,
      dnn_activation_fn=activation_fn,
      dnn_dropout=dropout,
      input_layer_partitioner=input_layer_partitioner,
      config=config)
Exemple #21
0
    def __init__(self,
                 n_classes,
                 feature_columns=None,
                 model_dir=None,
                 optimizer=None,
                 config=None,
                 hparams=None,
                 head=None,
                 weight_column=None,
                 dtype=dtypes.float32,
                 name="model"):
        """Construct Classifier/Regressor.

    Args:
      n_classes: Number of classes, set to 0 if used for regression. If head
        is not provided, only n_classes = 0 or 2 are currently supported.
      feature_columns: Optional, if not set the model will use all features
        returned by input_fn. An iterable containing all the feature
        columns used by the model. All items in the set should be instances of
        classes derived from `FeatureColumn` and are used to transform the input
        columns into a numeric format that is fed into the rest of the graph.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      optimizer: `Optimizer` object, or callable (with no inputs) that
        returns an `Optimizer` object, defines the optimizer to use for
        training. This is typically one of the optimizers defined in tf.train.
      config: RunConfig object to configure the runtime settings. Typically set
        to learn_runner.EstimatorConfig().
      hparams: Hyper parameter object to be passed to prediction builder.
      head: a `TensorFlow Estimator Head` which specifies how the loss function,
        final predictions, and so on are generated from model outputs. Defaults
        to using a sigmoid cross entropy head for binary classification and mean
        squared error head for regression.
      weight_column: A string or a `tf.feature_column.numeric_column` defining
        feature column representing weights. It is used to down weight or boost
        examples during training. It will be multiplied by the loss of the
        example.
      dtype: The internal type to be used for tensors.
      name: Name to be used as suffix to top-level variable scope for model.

    Raises:
      ValueError: invalid parameters.
      KeyError: type of feature not supported.
    """
        # We sort the list of feature_columns here, since we will later create
        # the ops that implement their represented transformations (e.g. embedding)
        # in the order in which they are listed in self._feature_columns.
        # The constructed ops are then given names by the tensorflow framework
        # that depend on their creation order (for example, if two ops have the
        # same type they will be suffixed by an ordinal reflecting the creation
        # order). As this code must be deterministic (since it could be
        # executed in a multi-machine tensorflow cluster), we must have the order
        # of feature columns deterministic as well (which would not be the case if
        # it's, for example, the result of calling keys() on a dictionary); thus
        # we sort the feature columns here by their names.
        self._feature_columns = (
            None if feature_columns is None else
            tools.get_sorted_feature_columns(feature_columns))
        self._weight_column = weight_column
        self._optimizer = optimizer
        self._config = config
        self._hparams = hparams
        self._name = _SCOPE_TENSORFLOW_LATTICE_PREFIX + name
        self._n_classes = n_classes
        self._dtype = dtype

        if head is not None:
            self._head = head
        else:
            if n_classes == 0:
                self._head = (head_lib.regression_head(
                    label_dimension=1,
                    weight_column=self._weight_column,
                    loss_reduction=losses.Reduction.SUM))
            elif n_classes == 2:
                self._head = (head_lib.binary_classification_head(
                    weight_column=self._weight_column,
                    loss_reduction=losses.Reduction.SUM))
            else:
                raise ValueError("Invalid value for n_classes=%d" % n_classes)

        super(Base, self).__init__(model_fn=self._base_model_fn,
                                   model_dir=model_dir,
                                   config=config)

        # Make sure model directory exists after initialization.
        # Notice self.model_dir is set by Estimator class.
        file_io.recursive_create_dir(self.model_dir)

        self._projection_hook = _ProjectionHook()