def _dnn_only_estimator_fn(
    hidden_units,
    feature_columns,
    model_dir=None,
    label_dimension=1,
    weight_column=None,
    optimizer='Adagrad',
    activation_fn=nn.relu,
    dropout=None,
    input_layer_partitioner=None,
    config=None):
  return dnn_linear_combined.DNNLinearCombinedEstimatorV2(
      head=regression_head.RegressionHead(
          weight_column=weight_column,
          label_dimension=label_dimension,
          # Tests in core (from which this test inherits) test the sum loss.
          loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE),
      model_dir=model_dir,
      dnn_feature_columns=feature_columns,
      dnn_optimizer=optimizer,
      dnn_hidden_units=hidden_units,
      dnn_activation_fn=activation_fn,
      dnn_dropout=dropout,
      input_layer_partitioner=input_layer_partitioner,
      config=config)
Example #2
0
  def _test_ckpt_converter(self, train_input_fn, eval_input_fn,
                           predict_input_fn, input_dimension, label_dimension,
                           batch_size, dnn_optimizer, linear_optimizer):

    # Create checkpoint in CannedEstimator v1.
    linear_feature_columns_v1 = [
        feature_column._numeric_column('x', shape=(input_dimension,))
    ]
    dnn_feature_columns_v1 = [
        feature_column._numeric_column('x', shape=(input_dimension,))
    ]
    est_v1 = dnn_linear_combined.DNNLinearCombinedEstimator(
        head=head_lib._regression_head(label_dimension=label_dimension),
        linear_feature_columns=linear_feature_columns_v1,
        dnn_feature_columns=dnn_feature_columns_v1,
        dnn_hidden_units=(2, 2),
        model_dir=self._old_ckpt_dir,
        dnn_optimizer=dnn_optimizer,
        linear_optimizer=linear_optimizer)
    # Train
    num_steps = 10
    est_v1.train(train_input_fn, steps=num_steps)
    self.assertIsNotNone(est_v1.latest_checkpoint())
    self.assertTrue(est_v1.latest_checkpoint().startswith(self._old_ckpt_dir))

    # Convert checkpoint from v1 to v2.
    source_checkpoint = os.path.join(self._old_ckpt_dir, 'model.ckpt-10')
    source_graph = os.path.join(self._old_ckpt_dir, 'graph.pbtxt')
    target_checkpoint = os.path.join(self._new_ckpt_dir, 'model.ckpt-10')
    checkpoint_converter.convert_checkpoint('combined', source_checkpoint,
                                            source_graph, target_checkpoint)

    # Create CannedEstimator V2 and restore from the converted checkpoint.
    linear_feature_columns_v2 = [
        tf.feature_column.numeric_column('x', shape=(input_dimension,))
    ]
    dnn_feature_columns_v2 = [
        tf.feature_column.numeric_column('x', shape=(input_dimension,))
    ]
    est_v2 = dnn_linear_combined.DNNLinearCombinedEstimatorV2(
        head=regression_head.RegressionHead(label_dimension=label_dimension),
        linear_feature_columns=linear_feature_columns_v2,
        dnn_feature_columns=dnn_feature_columns_v2,
        dnn_hidden_units=(2, 2),
        model_dir=self._new_ckpt_dir,
        dnn_optimizer=dnn_optimizer,
        linear_optimizer=linear_optimizer)
    # Train
    extra_steps = 10
    est_v2.train(train_input_fn, steps=extra_steps)
    self.assertIsNotNone(est_v2.latest_checkpoint())
    self.assertTrue(est_v2.latest_checkpoint().startswith(self._new_ckpt_dir))
    # Make sure estimator v2 restores from the converted checkpoint, and
    # continues training extra steps.
    self.assertEqual(num_steps + extra_steps,
                     est_v2.get_variable_value(tf.compat.v1.GraphKeys.GLOBAL_STEP))
    def _test_complete_flow(self,
                            train_input_fn,
                            eval_input_fn,
                            predict_input_fn,
                            input_dimension,
                            label_dimension,
                            batch_size,
                            dnn_optimizer='Adagrad',
                            linear_optimizer='Ftrl'):
        linear_feature_columns = [
            feature_column_v2.numeric_column('x', shape=(input_dimension, ))
        ]
        dnn_feature_columns = [
            feature_column_v2.numeric_column('x', shape=(input_dimension, ))
        ]
        feature_columns = linear_feature_columns + dnn_feature_columns
        est = dnn_linear_combined.DNNLinearCombinedEstimatorV2(
            head=regression_head.RegressionHead(
                label_dimension=label_dimension),
            linear_feature_columns=linear_feature_columns,
            dnn_feature_columns=dnn_feature_columns,
            dnn_hidden_units=(2, 2),
            model_dir=self._model_dir,
            dnn_optimizer=dnn_optimizer,
            linear_optimizer=linear_optimizer)

        # Train
        num_steps = 10
        est.train(train_input_fn, steps=num_steps)

        # Evaluate
        scores = est.evaluate(eval_input_fn)
        self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
        self.assertIn('loss', six.iterkeys(scores))

        # Predict
        predictions = np.array([
            x[prediction_keys.PredictionKeys.PREDICTIONS]
            for x in est.predict(predict_input_fn)
        ])
        self.assertAllEqual((batch_size, label_dimension), predictions.shape)

        # Export
        feature_spec = feature_column_v2.make_parse_example_spec_v2(
            feature_columns)
        serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
            feature_spec)
        export_dir = est.export_saved_model(tempfile.mkdtemp(),
                                            serving_input_receiver_fn)
        self.assertTrue(gfile.Exists(export_dir))
def _linear_only_estimator_fn(feature_columns,
                              model_dir=None,
                              label_dimension=1,
                              weight_column=None,
                              optimizer='Ftrl',
                              config=None,
                              sparse_combiner='sum'):
    return dnn_linear_combined.DNNLinearCombinedEstimatorV2(
        head=regression_head.RegressionHead(
            weight_column=weight_column,
            label_dimension=label_dimension,
            # Tests in core (from which this test inherits) test the sum loss.
            loss_reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE),
        model_dir=model_dir,
        linear_feature_columns=feature_columns,
        linear_optimizer=optimizer,
        config=config,
        linear_sparse_combiner=sparse_combiner)