コード例 #1
0
  def _test_complete_flow(
      self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
      label_dimension, batch_size):
    feature_columns = [
        feature_column.numeric_column('x', shape=(input_dimension,))]
    est = linear.LinearEstimator(
        head=head_lib._regression_head(label_dimension=label_dimension),
        feature_columns=feature_columns,
        model_dir=self._model_dir)

    # Train
    num_steps = 10
    est.train(train_input_fn, steps=num_steps)

    # Evaluate
    scores = est.evaluate(eval_input_fn)
    self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
    self.assertIn('loss', six.iterkeys(scores))

    # Predict
    predictions = np.array([
        x[prediction_keys.PredictionKeys.PREDICTIONS]
        for x in est.predict(predict_input_fn)
    ])
    self.assertAllEqual((batch_size, label_dimension), predictions.shape)

    # Export
    feature_spec = feature_column.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = est.export_savedmodel(tempfile.mkdtemp(),
                                       serving_input_receiver_fn)
    self.assertTrue(gfile.Exists(export_dir))
コード例 #2
0
  def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
                          input_dimension, n_classes, batch_size):
    feature_columns = [
        tf.feature_column.numeric_column('x', shape=(input_dimension,))
    ]

    est = dnn.DNNClassifierV2(
        hidden_units=(2, 2),
        feature_columns=feature_columns,
        n_classes=n_classes,
        model_dir=self._model_dir)

    # TRAIN
    num_steps = 10
    est.train(train_input_fn, steps=num_steps)

    # EVALUATE
    scores = est.evaluate(eval_input_fn)
    self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
    self.assertIn('loss', six.iterkeys(scores))

    # PREDICT
    predicted_proba = np.array([
        x[prediction_keys.PredictionKeys.PROBABILITIES]
        for x in est.predict(predict_input_fn)
    ])
    self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)

    # EXPORT
    feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = est.export_saved_model(tempfile.mkdtemp(),
                                        serving_input_receiver_fn)
    self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
コード例 #3
0
  def test_complete_flow_with_no_tpu(self):
    # Choose the train_batch_size divisible by 2 and 8 (common shards in test
    # env) and batch_size for eval and predict prime number.
    train_batch_size = 16
    eval_batch_size = 8
    predict_batch_size = 1

    run_config = create_run_config(iterations_per_loop=4)
    num_shards = run_config.tpu_config.num_shards

    (expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
     expected_called_count_for_input_fn) = (
         self._generate_expected_batch_size_and_called_count(
             num_shards, train_batch_size, eval_batch_size, predict_batch_size,
             train_sharding_policy=_UNSHARDED,
             eval_sharding_policy=_UNSHARDED))

    est = tpu_estimator.TPUEstimator(
        model_fn=self._make_model_fn(
            expected_batch_size_for_model_fn, use_tpu_estimator_spec=True),
        config=run_config,
        train_batch_size=train_batch_size,
        eval_batch_size=eval_batch_size,
        predict_batch_size=predict_batch_size,
        use_tpu=False)

    # TRAIN
    # learn y = x
    # Note: Gradients are all zero. Just testing execution.
    train_input_fn = self._make_input_fn(mode=_TRAIN, repeat=True)
    est.train(train_input_fn, steps=7)

    # EVALUTE
    eval_input_fn = self._make_input_fn(mode=_EVAL)
    scores = est.evaluate(eval_input_fn, steps=2)
    self.assertEqual(7, scores['global_step'])
    self.assertGreater(0.1, scores['absolute_error'])

    # PREDICT
    predict_input_fn = self._make_input_fn(mode=_PREDICT)
    predictions = [x['predictions'] for x in est.predict(predict_input_fn)]
    self.assertAllClose(self._data, predictions, atol=0.01)

    # Verify all input_fn invoke recorded metadata.
    self.assertInputFnCalledCountAndBatch(
        expected_called_count_for_input_fn, expected_batch_size_for_input_fn)

    # EXPORT
    feature_spec = {'x': tf.io.FixedLenFeature([1], tf.float32)}
    serving_input_receiver_fn = (
        export.build_parsing_serving_input_receiver_fn(feature_spec))
    with self.export_mode():
      export_dir = est.export_saved_model(
          tempfile.mkdtemp(dir=self.get_temp_dir()), serving_input_receiver_fn)
    self.assertTrue(tf.gfile.Exists(export_dir))
    self._test_identity_savedmodel(export_dir)
コード例 #4
0
    def test_build_parsing_serving_input_receiver_fn(self):
        feature_spec = {
            "int_feature": parsing_ops.VarLenFeature(dtypes.int64),
            "float_feature": parsing_ops.VarLenFeature(dtypes.float32)
        }
        serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
            feature_spec)
        with ops.Graph().as_default():
            serving_input_receiver = serving_input_receiver_fn()
            self.assertEqual(set(["int_feature", "float_feature"]),
                             set(serving_input_receiver.features.keys()))
            self.assertEqual(
                set(["examples"]),
                set(serving_input_receiver.receiver_tensors.keys()))

            example = example_pb2.Example()
            text_format.Parse(
                "features: { "
                "  feature: { "
                "    key: 'int_feature' "
                "    value: { "
                "      int64_list: { "
                "        value: [ 21, 2, 5 ] "
                "      } "
                "    } "
                "  } "
                "  feature: { "
                "    key: 'float_feature' "
                "    value: { "
                "      float_list: { "
                "        value: [ 525.25 ] "
                "      } "
                "    } "
                "  } "
                "} ", example)

            with self.cached_session() as sess:
                sparse_result = sess.run(
                    serving_input_receiver.features,
                    feed_dict={
                        serving_input_receiver.receiver_tensors["examples"].name:
                        [example.SerializeToString()]
                    })
                self.assertAllEqual([[0, 0], [0, 1], [0, 2]],
                                    sparse_result["int_feature"].indices)
                self.assertAllEqual([21, 2, 5],
                                    sparse_result["int_feature"].values)
                self.assertAllEqual([[0, 0]],
                                    sparse_result["float_feature"].indices)
                self.assertAllEqual([525.25],
                                    sparse_result["float_feature"].values)
    def _test_complete_flow(self,
                            train_input_fn,
                            eval_input_fn,
                            predict_input_fn,
                            input_dimension,
                            label_dimension,
                            batch_size,
                            dnn_optimizer='Adagrad',
                            linear_optimizer='Ftrl'):
        linear_feature_columns = [
            feature_column_v2.numeric_column('x', shape=(input_dimension, ))
        ]
        dnn_feature_columns = [
            feature_column_v2.numeric_column('x', shape=(input_dimension, ))
        ]
        feature_columns = linear_feature_columns + dnn_feature_columns
        est = dnn_linear_combined.DNNLinearCombinedEstimatorV2(
            head=regression_head.RegressionHead(
                label_dimension=label_dimension),
            linear_feature_columns=linear_feature_columns,
            dnn_feature_columns=dnn_feature_columns,
            dnn_hidden_units=(2, 2),
            model_dir=self._model_dir,
            dnn_optimizer=dnn_optimizer,
            linear_optimizer=linear_optimizer)

        # Train
        num_steps = 10
        est.train(train_input_fn, steps=num_steps)

        # Evaluate
        scores = est.evaluate(eval_input_fn)
        self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
        self.assertIn('loss', six.iterkeys(scores))

        # Predict
        predictions = np.array([
            x[prediction_keys.PredictionKeys.PREDICTIONS]
            for x in est.predict(predict_input_fn)
        ])
        self.assertAllEqual((batch_size, label_dimension), predictions.shape)

        # Export
        feature_spec = feature_column_v2.make_parse_example_spec_v2(
            feature_columns)
        serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
            feature_spec)
        export_dir = est.export_saved_model(tempfile.mkdtemp(),
                                            serving_input_receiver_fn)
        self.assertTrue(gfile.Exists(export_dir))
コード例 #6
0
    def _test_complete_flow(self, train_input_fn, eval_input_fn,
                            predict_input_fn, input_dimension, label_dimension,
                            batch_size):
        feature_columns = [
            feature_column_v2.numeric_column('x', shape=(input_dimension, ))
        ]

        est = dnn.DNNRegressor(hidden_units=(2, 2),
                               feature_columns=feature_columns,
                               label_dimension=label_dimension,
                               model_dir=self._model_dir)

        # TRAIN
        num_steps = 10
        est.train(train_input_fn, steps=num_steps)

        # EVALUATE
        scores = est.evaluate(eval_input_fn)
        self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
        self.assertIn('loss', six.iterkeys(scores))

        # PREDICT
        predictions = np.array([
            x[prediction_keys.PredictionKeys.PREDICTIONS]
            for x in est.predict(predict_input_fn)
        ])
        self.assertAllEqual((batch_size, label_dimension), predictions.shape)

        # EXPORT
        feature_spec = feature_column_v2.make_parse_example_spec_v2(
            feature_columns)
        serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
            feature_spec)
        export_dir = est.export_savedmodel(tempfile.mkdtemp(),
                                           serving_input_receiver_fn)
        self.assertTrue(gfile.Exists(export_dir))
コード例 #7
0
 def _get_exporter(self, name, fc):
     feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(fc)
     serving_input_receiver_fn = (
         export_lib.build_parsing_serving_input_receiver_fn(feature_spec))
     return exporter_lib.LatestExporter(
         name, serving_input_receiver_fn=serving_input_receiver_fn)
コード例 #8
0
ファイル: export_test.py プロジェクト: yupbank/estimator
def _get_serving_input_receiver_fn():
  feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
                  'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
  return export.build_parsing_serving_input_receiver_fn(feature_spec)