예제 #1
0
    def test_one_dim(self):
        """Asserts predictions for one-dimensional input and logits."""
        dnn_testing_utils.create_checkpoint((
            ([[.6, .5]], [.1, -.1]),
            ([[1., .8], [-.8, -1.]], [.2, -.2]),
            ([[-1.], [1.]], [.3]),
        ),
                                            global_step=0,
                                            model_dir=self._model_dir)

        dnn_classifier = dnn.DNNClassifier(
            hidden_units=(2, 2),
            feature_columns=(feature_column.numeric_column('x'), ),
            model_dir=self._model_dir)
        input_fn = numpy_io.numpy_input_fn(x={'x': np.array([[10.]])},
                                           batch_size=1,
                                           shuffle=False)
        # Uses identical numbers as DNNModelTest.test_one_dim_logits.
        # See that test for calculation of logits.
        # logits = [-2.08] =>
        # logistic = exp(-2.08)/(1 + exp(-2.08)) = 0.11105597
        # probabilities = [1-logistic, logistic] = [0.88894403, 0.11105597]
        # class_ids = argmax(probabilities) = [0]
        predictions = next(dnn_classifier.predict(input_fn=input_fn))
        self.assertAllClose([-2.08],
                            predictions[prediction_keys.PredictionKeys.LOGITS])
        self.assertAllClose(
            [0.11105597], predictions[prediction_keys.PredictionKeys.LOGISTIC])
        self.assertAllClose(
            [0.88894403, 0.11105597],
            predictions[prediction_keys.PredictionKeys.PROBABILITIES])
        self.assertAllClose(
            [0], predictions[prediction_keys.PredictionKeys.CLASS_IDS])
        self.assertAllEqual(
            [b'0'], predictions[prediction_keys.PredictionKeys.CLASSES])
예제 #2
0
    def test_from_scratch_validate_summary(self):
        hidden_units = (2, 2)
        mock_optimizer = _mock_optimizer(self, hidden_units=hidden_units)
        dnn_classifier = dnn.DNNClassifier(
            hidden_units=hidden_units,
            feature_columns=(feature_column.numeric_column('age'), ),
            optimizer=mock_optimizer,
            model_dir=self._model_dir)
        self.assertEqual(0, mock_optimizer.minimize.call_count)

        # Train for a few steps, then validate optimizer, summaries, and
        # checkpoint.
        num_steps = 5
        summary_hook = _SummaryHook()
        dnn_classifier.train(input_fn=lambda: ({
            'age': [[10.]]
        }, [[1]]),
                             steps=num_steps,
                             hooks=(summary_hook, ))
        self.assertEqual(1, mock_optimizer.minimize.call_count)
        _assert_checkpoint(self,
                           num_steps,
                           input_units=1,
                           hidden_units=hidden_units,
                           output_units=1,
                           model_dir=self._model_dir)
        summaries = summary_hook.summaries()
        self.assertEqual(num_steps, len(summaries))
        for summary in summaries:
            summary_keys = [v.tag for v in summary.value]
            self.assertIn(metric_keys.MetricKeys.LOSS, summary_keys)
            self.assertIn(metric_keys.MetricKeys.LOSS_MEAN, summary_keys)
예제 #3
0
  def _test_complete_flow(
      self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
      n_classes, batch_size):
    feature_columns = [
        feature_column.numeric_column('x', shape=(input_dimension,))]
    est = dnn.DNNClassifier(
        hidden_units=(2, 2),
        feature_columns=feature_columns,
        n_classes=n_classes,
        model_dir=self._model_dir)

    # TRAIN
    num_steps = 10
    est.train(train_input_fn, steps=num_steps)

    # EVALUTE
    scores = est.evaluate(eval_input_fn)
    self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
    self.assertIn('loss', six.iterkeys(scores))

    # PREDICT
    predicted_proba = np.array([
        x[prediction_keys.PredictionKeys.PROBABILITIES]
        for x in est.predict(predict_input_fn)
    ])
    self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)

    # EXPORT
    feature_spec = feature_column.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = est.export_savedmodel(tempfile.mkdtemp(),
                                       serving_input_receiver_fn)
    self.assertTrue(gfile.Exists(export_dir))
예제 #4
0
    def test_multi_class(self):
        n_classes = 3
        base_global_step = 100
        hidden_units = (2, 2)
        _create_checkpoint((
            ([[.6, .5]], [.1, -.1]),
            ([[1., .8], [-.8, -1.]], [.2, -.2]),
            ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),
        ), base_global_step, self._model_dir)

        # Uses identical numbers as DNNModelFnTest.test_multi_dim_logits.
        # See that test for calculation of logits.
        # logits = [-2.08, 2.08, 1.19] => probabilities = [0.0109, 0.7011, 0.2879]
        # loss = -1. * log(0.7011) = 0.35505795
        expected_loss = 0.35505795
        mock_optimizer = _mock_optimizer(self,
                                         hidden_units=hidden_units,
                                         expected_loss=expected_loss)
        dnn_classifier = dnn.DNNClassifier(
            n_classes=n_classes,
            hidden_units=hidden_units,
            feature_columns=(feature_column.numeric_column('age'), ),
            optimizer=mock_optimizer,
            model_dir=self._model_dir)
        self.assertEqual(0, mock_optimizer.minimize.call_count)

        # Train for a few steps, then validate optimizer, summaries, and
        # checkpoint.
        num_steps = 5
        summary_hook = _SummaryHook()
        dnn_classifier.train(input_fn=lambda: ({
            'age': [[10.]]
        }, [[1]]),
                             steps=num_steps,
                             hooks=(summary_hook, ))
        self.assertEqual(1, mock_optimizer.minimize.call_count)
        summaries = summary_hook.summaries()
        self.assertEqual(num_steps, len(summaries))
        for summary in summaries:
            _assert_simple_summary(
                self, {
                    metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
                    'dnn/dnn/hiddenlayer_0_fraction_of_zero_values': 0.,
                    'dnn/dnn/hiddenlayer_1_fraction_of_zero_values': .5,
                    'dnn/dnn/logits_fraction_of_zero_values': 0.,
                    metric_keys.MetricKeys.LOSS: expected_loss,
                }, summary)
        _assert_checkpoint(self,
                           base_global_step + num_steps,
                           input_units=1,
                           hidden_units=hidden_units,
                           output_units=n_classes,
                           model_dir=self._model_dir)
예제 #5
0
파일: dnn_test.py 프로젝트: ylch/tensorflow
  def test_from_scratch_with_default_optimizer_binary(self):
    hidden_units = (2, 2)
    dnn_classifier = dnn.DNNClassifier(
        hidden_units=hidden_units,
        feature_columns=(feature_column.numeric_column('age'),),
        model_dir=self._model_dir)

    # Train for a few steps, then validate final checkpoint.
    num_steps = 5
    dnn_classifier.train(
        input_fn=lambda: ({'age': [[10.]]}, [[1]]), steps=num_steps)
    _assert_checkpoint(
        self, num_steps, input_units=1, hidden_units=hidden_units,
        output_units=1, model_dir=self._model_dir)
예제 #6
0
    def test_one_dim(self):
        """Asserts evaluation metrics for one-dimensional input and logits."""
        global_step = 100
        dnn_testing_utils.create_checkpoint((
            ([[.6, .5]], [.1, -.1]),
            ([[1., .8], [-.8, -1.]], [.2, -.2]),
            ([[-1.], [1.]], [.3]),
        ), global_step, self._model_dir)

        dnn_classifier = dnn.DNNClassifier(
            hidden_units=(2, 2),
            feature_columns=[feature_column.numeric_column('age')],
            model_dir=self._model_dir)

        def _input_fn():
            # batch_size = 2, one false label, and one true.
            return {'age': [[10.], [10.]]}, [[1], [0]]

        # Uses identical numbers as DNNModelTest.test_one_dim_logits.
        # See that test for calculation of logits.
        # logits = [[-2.08], [-2.08]] =>
        # logistic = 1/(1 + exp(-logits)) = [[0.11105597], [0.11105597]]
        # loss = -1. * log(0.111) -1. * log(0.889) = 2.31544200
        expected_loss = 2.31544200
        self.assertAllClose(
            {
                metric_keys.MetricKeys.LOSS:
                expected_loss,
                metric_keys.MetricKeys.LOSS_MEAN:
                expected_loss / 2.,
                metric_keys.MetricKeys.ACCURACY:
                0.5,
                metric_keys.MetricKeys.PREDICTION_MEAN:
                0.11105597,
                metric_keys.MetricKeys.LABEL_MEAN:
                0.5,
                metric_keys.MetricKeys.ACCURACY_BASELINE:
                0.5,
                # There is no good way to calculate AUC for only two data points. But
                # that is what the algorithm returns.
                metric_keys.MetricKeys.AUC:
                0.5,
                metric_keys.MetricKeys.AUC_PR:
                0.75,
                ops.GraphKeys.GLOBAL_STEP:
                global_step
            },
            dnn_classifier.evaluate(input_fn=_input_fn, steps=1))
예제 #7
0
    def test_multi_dim(self):
        """Asserts predictions for multi-dimensional input and logits."""
        dnn_testing_utils.create_checkpoint((
            ([[.6, .5], [-.6, -.5]], [.1, -.1]),
            ([[1., .8], [-.8, -1.]], [.2, -.2]),
            ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),
        ),
                                            global_step=0,
                                            model_dir=self._model_dir)

        dnn_classifier = dnn.DNNClassifier(
            hidden_units=(2, 2),
            feature_columns=(feature_column.numeric_column('x',
                                                           shape=(2, )), ),
            n_classes=3,
            model_dir=self._model_dir)
        input_fn = numpy_io.numpy_input_fn(
            # Inputs shape is (batch_size, num_inputs).
            x={'x': np.array([[10., 8.]])},
            batch_size=1,
            shuffle=False)
        # Uses identical numbers as
        # DNNModelFnTest.test_multi_dim_input_multi_dim_logits.
        # See that test for calculation of logits.
        # logits = [-0.48, 0.48, 0.39] =>
        # probabilities[i] = exp(logits[i]) / sum_j exp(logits[j]) =>
        # probabilities = [0.16670536, 0.43538380, 0.39791084]
        # class_ids = argmax(probabilities) = [1]
        predictions = next(dnn_classifier.predict(input_fn=input_fn))
        self.assertItemsEqual([
            prediction_keys.PredictionKeys.LOGITS,
            prediction_keys.PredictionKeys.PROBABILITIES,
            prediction_keys.PredictionKeys.CLASS_IDS,
            prediction_keys.PredictionKeys.CLASSES
        ], six.iterkeys(predictions))
        self.assertAllClose([-0.48, 0.48, 0.39],
                            predictions[prediction_keys.PredictionKeys.LOGITS])
        self.assertAllClose(
            [0.16670536, 0.43538380, 0.39791084],
            predictions[prediction_keys.PredictionKeys.PROBABILITIES])
        self.assertAllEqual(
            [1], predictions[prediction_keys.PredictionKeys.CLASS_IDS])
        self.assertAllEqual(
            [b'1'], predictions[prediction_keys.PredictionKeys.CLASSES])
예제 #8
0
    def test_multi_dim(self):
        """Asserts evaluation metrics for multi-dimensional input and logits."""
        global_step = 100
        dnn_testing_utils.create_checkpoint((
            ([[.6, .5], [-.6, -.5]], [.1, -.1]),
            ([[1., .8], [-.8, -1.]], [.2, -.2]),
            ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),
        ), global_step, self._model_dir)
        n_classes = 3

        dnn_classifier = dnn.DNNClassifier(
            hidden_units=(2, 2),
            feature_columns=[feature_column.numeric_column('age', shape=[2])],
            n_classes=n_classes,
            model_dir=self._model_dir)

        def _input_fn():
            # batch_size = 2, one false label, and one true.
            return {'age': [[10., 8.], [10., 8.]]}, [[1], [0]]

        # Uses identical numbers as
        # DNNModelFnTest.test_multi_dim_input_multi_dim_logits.
        # See that test for calculation of logits.
        # logits = [[-0.48, 0.48, 0.39], [-0.48, 0.48, 0.39]]
        # probabilities = exp(logits)/sum(exp(logits))
        #               = [[0.16670536, 0.43538380, 0.39791084],
        #                  [0.16670536, 0.43538380, 0.39791084]]
        # loss = -log(0.43538380) - log(0.16670536)
        expected_loss = 2.62305466
        self.assertAllClose(
            {
                metric_keys.MetricKeys.LOSS: expected_loss,
                metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
                metric_keys.MetricKeys.ACCURACY: 0.5,
                ops.GraphKeys.GLOBAL_STEP: global_step
            }, dnn_classifier.evaluate(input_fn=_input_fn, steps=1))
예제 #9
0
def DNNClassifierWithLayerAnnotations(  # pylint: disable=invalid-name
    hidden_units,
    feature_columns,
    model_dir=None,
    n_classes=2,
    weight_column=None,
    label_vocabulary=None,
    optimizer='Adagrad',
    activation_fn=nn.relu,
    dropout=None,
    input_layer_partitioner=None,
    config=None,
    warm_start_from=None,
    loss_reduction=losses.Reduction.SUM):
  """A classifier for TensorFlow DNN models with layer annotations.

  This classifier is fuctionally identical to estimator.DNNClassifier as far as
  training and evaluating models is concerned. The key difference is that this
  classifier adds additional layer annotations, which can be used for computing
  Integrated Gradients.

  Integrated Gradients is a method for attributing a classifier's predictions
  to its input features (https://arxiv.org/pdf/1703.01365.pdf). Given an input
  instance, the method assigns attribution scores to individual features in
  proportion to the feature's importance to the classifier's prediction.

  See estimator.DNNClassifer for example code for training and evaluating models
  using this classifier.

  This classifier is checkpoint-compatible with estimator.DNNClassifier and
  therefore the following should work seamlessly:

  # Instantiate ordinary estimator as usual.
  estimator = tf.estimator.DNNClassifier(
    config, feature_columns, hidden_units, ...)

  # Train estimator, export checkpoint.
  tf.estimator.train_and_evaluate(estimator, ...)

  # Instantiate estimator with annotations with the same configuration as the
  # ordinary estimator.
  estimator_with_annotations = (
    tf.contrib.estimator.DNNClassifierWithLayerAnnotations(
      config, feature_columns, hidden_units, ...))

  # Call export_savedmodel with the same arguments as the ordinary estimator,
  # using the checkpoint produced for the ordinary estimator.
  estimator_with_annotations.export_saved_model(
    export_dir_base, serving_input_receiver, ...
    checkpoint_path='/path/to/ordinary/estimator/checkpoint/model.ckpt-1234')

  Args:
    hidden_units: Iterable of number hidden units per layer. All layers are
      fully connected. Ex. `[64, 32]` means first layer has 64 nodes and second
      one has 32.
    feature_columns: An iterable containing all the feature columns used by the
      model. All items in the set should be instances of classes derived from
      `_FeatureColumn`.
    model_dir: Directory to save model parameters, graph and etc. This can also
      be used to load checkpoints from the directory into a estimator to
      continue training a previously saved model.
    n_classes: Number of label classes. Defaults to 2, namely binary
      classification. Must be > 1.
    weight_column: A string or a `_NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It is used to down weight or boost examples during training. It
      will be multiplied by the loss of the example. If it is a string, it is
      used as a key to fetch weight tensor from the `features`. If it is a
      `_NumericColumn`, raw tensor is fetched by key `weight_column.key`, then
      weight_column.normalizer_fn is applied on it to get weight tensor.
    label_vocabulary: A list of strings represents possible label values. If
      given, labels must be string type and have any value in
      `label_vocabulary`. If it is not given, that means labels are already
      encoded as integer or float within [0, 1] for `n_classes=2` and encoded as
      integer values in {0, 1,..., n_classes-1} for `n_classes`>2 . Also there
      will be errors if vocabulary is not provided and labels are string.
    optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
      to Adagrad optimizer.
    activation_fn: Activation function applied to each layer. If `None`, will
      use `tf.nn.relu`.
    dropout: When not `None`, the probability we will drop out a given
      coordinate.
    input_layer_partitioner: Optional. Partitioner for input layer. Defaults to
      `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
    config: `RunConfig` object to configure the runtime settings.
    warm_start_from: A string filepath to a checkpoint to warm-start from, or a
      `WarmStartSettings` object to fully configure warm-starting.  If the
      string filepath is provided instead of a `WarmStartSettings`, then all
      weights are warm-started, and it is assumed that vocabularies and Tensor
      names are unchanged.
    loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
      reduce training loss over batch. Defaults to `SUM`.

  Returns:
    DNNClassifier with layer annotations.
  """

  original = dnn.DNNClassifier(
      hidden_units=hidden_units,
      feature_columns=feature_columns,
      model_dir=model_dir,
      n_classes=n_classes,
      weight_column=weight_column,
      label_vocabulary=label_vocabulary,
      optimizer=optimizer,
      activation_fn=activation_fn,
      dropout=dropout,
      input_layer_partitioner=input_layer_partitioner,
      config=config,
      warm_start_from=warm_start_from,
      loss_reduction=loss_reduction)

  def _model_fn(features, labels, mode, config):
    with _monkey_patch(
        feature_column_lib, '_internal_input_layer',
        make_input_layer_with_layer_annotations(
            feature_column_lib._internal_input_layer)):  # pylint: disable=protected-access
      return original.model_fn(features, labels, mode, config)

  return estimator.Estimator(
      model_fn=_model_fn,
      model_dir=model_dir,
      config=config,
      warm_start_from=warm_start_from)
    def test_complete_flow(self):
        n_classes = 3
        input_dimension = 2
        batch_size = 12

        data = np.linspace(0.,
                           n_classes - 1.,
                           batch_size * input_dimension,
                           dtype=np.float32)
        x_data = data.reshape(batch_size, input_dimension)
        categorical_data = np.random.random_integers(0,
                                                     len(x_data),
                                                     size=len(x_data))
        y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
        train_input_fn = numpy_io.numpy_input_fn(x={
            'x': x_data,
            'categories': categorical_data
        },
                                                 y=y_data,
                                                 batch_size=batch_size,
                                                 num_epochs=None,
                                                 shuffle=True)
        eval_input_fn = numpy_io.numpy_input_fn(x={
            'x': x_data,
            'categories': categorical_data
        },
                                                y=y_data,
                                                batch_size=batch_size,
                                                shuffle=False)
        predict_input_fn = numpy_io.numpy_input_fn(x={
            'x':
            x_data,
            'categories':
            categorical_data
        },
                                                   batch_size=batch_size,
                                                   shuffle=False)

        feature_columns = [
            feature_column.numeric_column('x', shape=(input_dimension, )),
            feature_column.embedding_column(
                feature_column.categorical_column_with_vocabulary_list(
                    'categories',
                    vocabulary_list=np.linspace(0.,
                                                len(x_data),
                                                len(x_data),
                                                dtype=np.int64)), 1)
        ]

        estimator = dnn.DNNClassifier(hidden_units=(2, 2),
                                      feature_columns=feature_columns,
                                      n_classes=n_classes,
                                      model_dir=self._model_dir)

        def optimizer_fn():
            return optimizers.get_optimizer_instance('Adagrad',
                                                     learning_rate=0.05)

        estimator = estimator_lib.Estimator(
            model_fn=replicate_model_fn.replicate_model_fn(
                estimator.model_fn,
                optimizer_fn,
                devices=['/gpu:0', '/gpu:1', '/gpu:2']),
            model_dir=estimator.model_dir,
            config=estimator.config,
            params=estimator.params)

        num_steps = 10
        estimator.train(train_input_fn, steps=num_steps)

        scores = estimator.evaluate(eval_input_fn)
        self.assertEqual(num_steps, scores[ops_lib.GraphKeys.GLOBAL_STEP])
        self.assertIn('loss', six.iterkeys(scores))

        predicted_proba = np.array([
            x[prediction_keys.PredictionKeys.PROBABILITIES]
            for x in estimator.predict(predict_input_fn)
        ])
        self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)

        feature_spec = feature_column.make_parse_example_spec(feature_columns)
        serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
            feature_spec)
        export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),
                                                 serving_input_receiver_fn)
        self.assertTrue(gfile.Exists(export_dir))
예제 #11
0
def _dnn_classifier_fn(*args, **kwargs):
  return dnn.DNNClassifier(*args, **kwargs)
예제 #12
0
    def test_complete_flow(self):
        n_classes = 3
        input_dimension = 2
        batch_size = 12

        data = np.linspace(0.,
                           n_classes - 1.,
                           batch_size * input_dimension,
                           dtype=np.float32)
        x_data = data.reshape(batch_size, input_dimension)
        y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
        train_input_fn = numpy_io.numpy_input_fn(x={'x': x_data},
                                                 y=y_data,
                                                 batch_size=batch_size,
                                                 num_epochs=None,
                                                 shuffle=True)
        eval_input_fn = numpy_io.numpy_input_fn(x={'x': x_data},
                                                y=y_data,
                                                batch_size=batch_size,
                                                shuffle=False)
        predict_input_fn = numpy_io.numpy_input_fn(x={'x': x_data},
                                                   batch_size=batch_size,
                                                   shuffle=False)

        feature_columns = [
            feature_column.numeric_column('x', shape=(input_dimension, ))
        ]

        estimator = dnn.DNNClassifier(hidden_units=(2, 2),
                                      feature_columns=feature_columns,
                                      n_classes=n_classes,
                                      model_dir=self._model_dir)

        def optimizer_fn():
            return optimizers.get_optimizer_instance('Adagrad',
                                                     learning_rate=0.05)

        # TODO(isaprykin):  Switch Estimator to use allow_soft_placement=True
        # during export_savedmodel and then switch this test to replicate over
        # GPUs instead of CPUs.
        estimator = estimator_lib.Estimator(
            model_fn=replicate_model_fn.replicate_model_fn(
                estimator.model_fn,
                optimizer_fn,
                devices=['/cpu:0', '/cpu:0', '/cpu:0']),
            model_dir=estimator.model_dir,
            config=estimator.config,
            params=estimator.params)

        num_steps = 10
        estimator.train(train_input_fn, steps=num_steps)

        scores = estimator.evaluate(eval_input_fn)
        self.assertEqual(num_steps, scores[ops_lib.GraphKeys.GLOBAL_STEP])
        self.assertIn('loss', six.iterkeys(scores))

        predicted_proba = np.array([
            x[prediction_keys.PredictionKeys.PROBABILITIES]
            for x in estimator.predict(predict_input_fn)
        ])
        self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)

        feature_spec = feature_column.make_parse_example_spec(feature_columns)
        serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
            feature_spec)
        export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),
                                                 serving_input_receiver_fn)
        self.assertTrue(gfile.Exists(export_dir))