def test_multi_dim(self): """Asserts evaluation metrics for multi-dimensional input and logits.""" # Create checkpoint: num_inputs=2, hidden_units=(2, 2), num_outputs=3. global_step = 100 dnn_testing_utils.create_checkpoint( (([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),), global_step, self._model_dir) label_dimension = 3 # Create DNNRegressor and evaluate. dnn_regressor = dnn.DNNRegressor( hidden_units=(2, 2), feature_columns=[feature_column.numeric_column('age', shape=[2])], label_dimension=label_dimension, model_dir=self._model_dir) def _input_fn(): return {'age': [[10., 8.]]}, [[1., -1., 0.5]] # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [[-0.48, 0.48, 0.39]] # loss = (1+0.48)^2 + (-1-0.48)^2 + (0.5-0.39)^2 = 4.3929 expected_loss = 4.3929 self.assertAllClose({ metric_keys.MetricKeys.LOSS: expected_loss, metric_keys.MetricKeys.LOSS_MEAN: expected_loss / label_dimension, ops.GraphKeys.GLOBAL_STEP: global_step }, dnn_regressor.evaluate(input_fn=_input_fn, steps=1))
def test_multi_dim(self): """Asserts evaluation metrics for multi-dimensional input and logits.""" # Create checkpoint: num_inputs=2, hidden_units=(2, 2), num_outputs=3. global_step = 100 dnn_testing_utils.create_checkpoint(( ([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]), ), global_step, self._model_dir) label_dimension = 3 # Create DNNRegressor and evaluate. dnn_regressor = dnn.DNNRegressor( hidden_units=(2, 2), feature_columns=[feature_column.numeric_column('age', shape=[2])], label_dimension=label_dimension, model_dir=self._model_dir) def _input_fn(): return {'age': [[10., 8.]]}, [[1., -1., 0.5]] # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [[-0.48, 0.48, 0.39]] # loss = (1+0.48)^2 + (-1-0.48)^2 + (0.5-0.39)^2 = 4.3929 expected_loss = 4.3929 self.assertAllClose( { metric_keys.MetricKeys.LOSS: expected_loss, metric_keys.MetricKeys.LOSS_MEAN: expected_loss / label_dimension, ops.GraphKeys.GLOBAL_STEP: global_step }, dnn_regressor.evaluate(input_fn=_input_fn, steps=1))
def test_one_dim(self): """Asserts evaluation metrics for one-dimensional input and logits.""" global_step = 100 dnn_testing_utils.create_checkpoint( (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]),), global_step, self._model_dir) dnn_classifier = dnn.DNNClassifier( hidden_units=(2, 2), feature_columns=[feature_column.numeric_column('age')], model_dir=self._model_dir) def _input_fn(): # batch_size = 2, one false label, and one true. return {'age': [[10.], [10.]]}, [[1], [0]] # Uses identical numbers as DNNModelTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [[-2.08], [-2.08]] => # logistic = 1/(1 + exp(-logits)) = [[0.11105597], [0.11105597]] # loss = -1. * log(0.111) -1. * log(0.889) = 2.31544200 expected_loss = 2.31544200 self.assertAllClose({ metric_keys.MetricKeys.LOSS: expected_loss, metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2., metric_keys.MetricKeys.ACCURACY: 0.5, metric_keys.MetricKeys.PREDICTION_MEAN: 0.11105597, metric_keys.MetricKeys.LABEL_MEAN: 0.5, metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5, # There is no good way to calculate AUC for only two data points. But # that is what the algorithm returns. metric_keys.MetricKeys.AUC: 0.5, metric_keys.MetricKeys.AUC_PR: 0.75, ops.GraphKeys.GLOBAL_STEP: global_step }, dnn_classifier.evaluate(input_fn=_input_fn, steps=1))
def test_multi_dim(self): """Asserts evaluation metrics for multi-dimensional input and logits.""" global_step = 100 dnn_testing_utils.create_checkpoint( (([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),), global_step, self._model_dir) n_classes = 3 dnn_classifier = dnn.DNNClassifier( hidden_units=(2, 2), feature_columns=[feature_column.numeric_column('age', shape=[2])], n_classes=n_classes, model_dir=self._model_dir) def _input_fn(): # batch_size = 2, one false label, and one true. return {'age': [[10., 8.], [10., 8.]]}, [[1], [0]] # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [[-0.48, 0.48, 0.39], [-0.48, 0.48, 0.39]] # probabilities = exp(logits)/sum(exp(logits)) # = [[0.16670536, 0.43538380, 0.39791084], # [0.16670536, 0.43538380, 0.39791084]] # loss = -log(0.43538380) - log(0.16670536) expected_loss = 2.62305466 self.assertAllClose({ metric_keys.MetricKeys.LOSS: expected_loss, metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2, metric_keys.MetricKeys.ACCURACY: 0.5, ops.GraphKeys.GLOBAL_STEP: global_step }, dnn_classifier.evaluate(input_fn=_input_fn, steps=1))
def test_one_dim(self): """Asserts evaluation metrics for one-dimensional input and logits.""" # Create checkpoint: num_inputs=1, hidden_units=(2, 2), num_outputs=1. global_step = 100 dnn_testing_utils.create_checkpoint( (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]),), global_step, self._model_dir) # Create DNNRegressor and evaluate. dnn_regressor = dnn.DNNRegressor( hidden_units=(2, 2), feature_columns=[feature_column.numeric_column('age')], model_dir=self._model_dir) def _input_fn(): return {'age': [[10.]]}, [[1.]] # Uses identical numbers as DNNModelTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [[-2.08]] => predictions = [-2.08]. # loss = (1+2.08)^2 = 9.4864 expected_loss = 9.4864 self.assertAllClose({ metric_keys.MetricKeys.LOSS: expected_loss, metric_keys.MetricKeys.LOSS_MEAN: expected_loss, ops.GraphKeys.GLOBAL_STEP: global_step }, dnn_regressor.evaluate(input_fn=_input_fn, steps=1))
def test_one_dim(self): """Asserts predictions for one-dimensional input and logits.""" dnn_testing_utils.create_checkpoint( (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]),), global_step=0, model_dir=self._model_dir) dnn_classifier = dnn.DNNClassifier( hidden_units=(2, 2), feature_columns=(feature_column.numeric_column('x'),), model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[10.]])}, batch_size=1, shuffle=False) # Uses identical numbers as DNNModelTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [-2.08] => # logistic = exp(-2.08)/(1 + exp(-2.08)) = 0.11105597 # probabilities = [1-logistic, logistic] = [0.88894403, 0.11105597] # class_ids = argmax(probabilities) = [0] predictions = next(dnn_classifier.predict(input_fn=input_fn)) self.assertAllClose([-2.08], predictions[prediction_keys.PredictionKeys.LOGITS]) self.assertAllClose([0.11105597], predictions[prediction_keys.PredictionKeys.LOGISTIC]) self.assertAllClose( [0.88894403, 0.11105597], predictions[prediction_keys.PredictionKeys.PROBABILITIES]) self.assertAllClose([0], predictions[prediction_keys.PredictionKeys.CLASS_IDS]) self.assertAllEqual([b'0'], predictions[prediction_keys.PredictionKeys.CLASSES])
def test_one_dim(self): """Asserts evaluation metrics for one-dimensional input and logits.""" # Create checkpoint: num_inputs=1, hidden_units=(2, 2), num_outputs=1. global_step = 100 dnn_testing_utils.create_checkpoint(( ([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]), ), global_step, self._model_dir) # Create DNNRegressor and evaluate. dnn_regressor = dnn.DNNRegressor( hidden_units=(2, 2), feature_columns=[feature_column.numeric_column('age')], model_dir=self._model_dir) def _input_fn(): return {'age': [[10.]]}, [[1.]] # Uses identical numbers as DNNModelTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [[-2.08]] => predictions = [-2.08]. # loss = (1+2.08)^2 = 9.4864 expected_loss = 9.4864 self.assertAllClose( { metric_keys.MetricKeys.LOSS: expected_loss, metric_keys.MetricKeys.LOSS_MEAN: expected_loss, ops.GraphKeys.GLOBAL_STEP: global_step }, dnn_regressor.evaluate(input_fn=_input_fn, steps=1))
def test_one_dim(self): """Asserts predictions for one-dimensional input and logits.""" dnn_testing_utils.create_checkpoint(( ([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]), ), global_step=0, model_dir=self._model_dir) dnn_classifier = dnn.DNNClassifier( hidden_units=(2, 2), feature_columns=(feature_column.numeric_column('x'), ), model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn(x={'x': np.array([[10.]])}, batch_size=1, shuffle=False) # Uses identical numbers as DNNModelTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [-2.08] => # logistic = exp(-2.08)/(1 + exp(-2.08)) = 0.11105597 # probabilities = [1-logistic, logistic] = [0.88894403, 0.11105597] # class_ids = argmax(probabilities) = [0] predictions = next(dnn_classifier.predict(input_fn=input_fn)) self.assertAllClose([-2.08], predictions[prediction_keys.PredictionKeys.LOGITS]) self.assertAllClose( [0.11105597], predictions[prediction_keys.PredictionKeys.LOGISTIC]) self.assertAllClose( [0.88894403, 0.11105597], predictions[prediction_keys.PredictionKeys.PROBABILITIES]) self.assertAllClose( [0], predictions[prediction_keys.PredictionKeys.CLASS_IDS]) self.assertAllEqual( [b'0'], predictions[prediction_keys.PredictionKeys.CLASSES])
def test_multi_dim(self): """Asserts predictions for multi-dimensional input and logits.""" # Create checkpoint: num_inputs=2, hidden_units=(2, 2), num_outputs=3. dnn_testing_utils.create_checkpoint(( ([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]), ), 100, self._model_dir) # Create DNNRegressor and predict. dnn_regressor = dnn.DNNRegressor( hidden_units=(2, 2), feature_columns=(feature_column.numeric_column('x', shape=(2, )), ), label_dimension=3, model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( # Inputs shape is (batch_size, num_inputs). x={'x': np.array([[10., 8.]])}, batch_size=1, shuffle=False) # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [[-0.48, 0.48, 0.39]] => predictions = [-0.48, 0.48, 0.39] self.assertAllClose( { prediction_keys.PredictionKeys.PREDICTIONS: [-0.48, 0.48, 0.39], }, next(dnn_regressor.predict(input_fn=input_fn)))
def test_one_dim(self): """Asserts predictions for one-dimensional input and logits.""" # Create checkpoint: num_inputs=1, hidden_units=(2, 2), num_outputs=1. dnn_testing_utils.create_checkpoint(( ([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]), ), global_step=0, model_dir=self._model_dir) # Create DNNRegressor and predict. dnn_regressor = dnn.DNNRegressor( hidden_units=(2, 2), feature_columns=(feature_column.numeric_column('x'), ), model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn(x={'x': np.array([[10.]])}, batch_size=1, shuffle=False) # Uses identical numbers as DNNModelTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [[-2.08]] => predictions = [-2.08]. self.assertAllClose( { prediction_keys.PredictionKeys.PREDICTIONS: [-2.08], }, next(dnn_regressor.predict(input_fn=input_fn)))
def test_multi_dim(self): """Asserts predictions for multi-dimensional input and logits.""" # Create checkpoint: num_inputs=2, hidden_units=(2, 2), num_outputs=3. dnn_testing_utils.create_checkpoint( (([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),), 100, self._model_dir) # Create DNNRegressor and predict. dnn_regressor = dnn.DNNRegressor( hidden_units=(2, 2), feature_columns=(feature_column.numeric_column('x', shape=(2,)),), label_dimension=3, model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( # Inputs shape is (batch_size, num_inputs). x={'x': np.array([[10., 8.]])}, batch_size=1, shuffle=False) # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [[-0.48, 0.48, 0.39]] => predictions = [-0.48, 0.48, 0.39] self.assertAllClose({ prediction_keys.PredictionKeys.PREDICTIONS: [-0.48, 0.48, 0.39], }, next(dnn_regressor.predict(input_fn=input_fn)))
def test_multi_dim(self): """Asserts train loss for multi-dimensional input and logits.""" base_global_step = 100 hidden_units = (2, 2) dnn_testing_utils.create_checkpoint(( ([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]), ), base_global_step, self._model_dir) input_dimension = 2 label_dimension = 3 # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [[-0.48, 0.48, 0.39]] # loss = (1+0.48)^2 + (-1-0.48)^2 + (0.5-0.39)^2 = 4.3929 expected_loss = 4.3929 mock_optimizer = dnn_testing_utils.mock_optimizer( self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_regressor = dnn.DNNRegressor(hidden_units=hidden_units, feature_columns=[ feature_column.numeric_column( 'age', shape=[input_dimension]) ], label_dimension=label_dimension, optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, then validate optimizer, summaries, and # checkpoint. num_steps = 5 summary_hook = _SummaryHook() dnn_regressor.train(input_fn=lambda: ({ 'age': [[10., 8.]] }, [[1., -1., 0.5]]), steps=num_steps, hooks=(summary_hook, )) self.assertEqual(1, mock_optimizer.minimize.call_count) summaries = summary_hook.summaries() self.assertEqual(num_steps, len(summaries)) for summary in summaries: _assert_simple_summary( self, { metric_keys.MetricKeys.LOSS_MEAN: expected_loss / label_dimension, 'dnn/dnn/hiddenlayer_0_fraction_of_zero_values': 0., 'dnn/dnn/hiddenlayer_1_fraction_of_zero_values': 0.5, 'dnn/dnn/logits_fraction_of_zero_values': 0., metric_keys.MetricKeys.LOSS: expected_loss, }, summary) _assert_checkpoint(self, base_global_step + num_steps, input_units=input_dimension, hidden_units=hidden_units, output_units=label_dimension, model_dir=self._model_dir)
def test_multi_dim(self): """Asserts train loss for multi-dimensional input and logits.""" base_global_step = 100 hidden_units = (2, 2) dnn_testing_utils.create_checkpoint( (([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),), base_global_step, self._model_dir) input_dimension = 2 label_dimension = 3 # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [[-0.48, 0.48, 0.39]] # loss = (1+0.48)^2 + (-1-0.48)^2 + (0.5-0.39)^2 = 4.3929 expected_loss = 4.3929 mock_optimizer = dnn_testing_utils.mock_optimizer( self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_regressor = dnn.DNNRegressor( hidden_units=hidden_units, feature_columns=[ feature_column.numeric_column('age', shape=[input_dimension])], label_dimension=label_dimension, optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, then validate optimizer, summaries, and # checkpoint. num_steps = 5 summary_hook = _SummaryHook() dnn_regressor.train( input_fn=lambda: ({'age': [[10., 8.]]}, [[1., -1., 0.5]]), steps=num_steps, hooks=(summary_hook,)) self.assertEqual(1, mock_optimizer.minimize.call_count) summaries = summary_hook.summaries() self.assertEqual(num_steps, len(summaries)) for summary in summaries: _assert_simple_summary( self, { metric_keys.MetricKeys.LOSS_MEAN: expected_loss / label_dimension, 'dnn/dnn/hiddenlayer_0_fraction_of_zero_values': 0., 'dnn/dnn/hiddenlayer_1_fraction_of_zero_values': 0.5, 'dnn/dnn/logits_fraction_of_zero_values': 0., metric_keys.MetricKeys.LOSS: expected_loss, }, summary) _assert_checkpoint( self, base_global_step + num_steps, input_units=input_dimension, hidden_units=hidden_units, output_units=label_dimension, model_dir=self._model_dir)
def test_multi_class(self): n_classes = 3 base_global_step = 100 hidden_units = (2, 2) dnn_testing_utils.create_checkpoint(( ([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]), ), base_global_step, self._model_dir) # Uses identical numbers as DNNModelFnTest.test_multi_dim_logits. # See that test for calculation of logits. # logits = [-2.08, 2.08, 1.19] => probabilities = [0.0109, 0.7011, 0.2879] # loss = -1. * log(0.7011) = 0.35505795 expected_loss = 0.35505795 mock_optimizer = dnn_testing_utils.mock_optimizer( self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_classifier = dnn.DNNClassifier( n_classes=n_classes, hidden_units=hidden_units, feature_columns=(feature_column.numeric_column('age'), ), optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, then validate optimizer, summaries, and # checkpoint. num_steps = 5 summary_hook = _SummaryHook() dnn_classifier.train(input_fn=lambda: ({ 'age': [[10.]] }, [[1]]), steps=num_steps, hooks=(summary_hook, )) self.assertEqual(1, mock_optimizer.minimize.call_count) summaries = summary_hook.summaries() self.assertEqual(num_steps, len(summaries)) for summary in summaries: _assert_simple_summary( self, { metric_keys.MetricKeys.LOSS_MEAN: expected_loss, 'dnn/dnn/hiddenlayer_0_fraction_of_zero_values': 0., 'dnn/dnn/hiddenlayer_1_fraction_of_zero_values': .5, 'dnn/dnn/logits_fraction_of_zero_values': 0., metric_keys.MetricKeys.LOSS: expected_loss, }, summary) _assert_checkpoint(self, base_global_step + num_steps, input_units=1, hidden_units=hidden_units, output_units=n_classes, model_dir=self._model_dir)
def test_one_dim(self): """Asserts train loss for one-dimensional input and logits.""" base_global_step = 100 hidden_units = (2, 2) dnn_testing_utils.create_checkpoint(( ([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]), ), base_global_step, self._model_dir) # Uses identical numbers as DNNModelFnTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [-2.08] => predictions = [-2.08] # loss = (1 + 2.08)^2 = 9.4864 expected_loss = 9.4864 mock_optimizer = dnn_testing_utils.mock_optimizer( self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_regressor = dnn.DNNRegressor( hidden_units=hidden_units, feature_columns=(feature_column.numeric_column('age'), ), optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, then validate optimizer, summaries, and # checkpoint. num_steps = 5 summary_hook = _SummaryHook() dnn_regressor.train(input_fn=lambda: ({ 'age': [[10.]] }, [[1.]]), steps=num_steps, hooks=(summary_hook, )) self.assertEqual(1, mock_optimizer.minimize.call_count) summaries = summary_hook.summaries() self.assertEqual(num_steps, len(summaries)) for summary in summaries: _assert_simple_summary( self, { metric_keys.MetricKeys.LOSS_MEAN: expected_loss, 'dnn/dnn/hiddenlayer_0_fraction_of_zero_values': 0., 'dnn/dnn/hiddenlayer_1_fraction_of_zero_values': 0.5, 'dnn/dnn/logits_fraction_of_zero_values': 0., metric_keys.MetricKeys.LOSS: expected_loss, }, summary) _assert_checkpoint(self, base_global_step + num_steps, input_units=1, hidden_units=hidden_units, output_units=1, model_dir=self._model_dir)
def test_multi_class(self): n_classes = 3 base_global_step = 100 hidden_units = (2, 2) dnn_testing_utils.create_checkpoint( (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),), base_global_step, self._model_dir) # Uses identical numbers as DNNModelFnTest.test_multi_dim_logits. # See that test for calculation of logits. # logits = [-2.08, 2.08, 1.19] => probabilities = [0.0109, 0.7011, 0.2879] # loss = -1. * log(0.7011) = 0.35505795 expected_loss = 0.35505795 mock_optimizer = dnn_testing_utils.mock_optimizer( self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_classifier = dnn.DNNClassifier( n_classes=n_classes, hidden_units=hidden_units, feature_columns=(feature_column.numeric_column('age'),), optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, then validate optimizer, summaries, and # checkpoint. num_steps = 5 summary_hook = _SummaryHook() dnn_classifier.train( input_fn=lambda: ({'age': [[10.]]}, [[1]]), steps=num_steps, hooks=(summary_hook,)) self.assertEqual(1, mock_optimizer.minimize.call_count) summaries = summary_hook.summaries() self.assertEqual(num_steps, len(summaries)) for summary in summaries: _assert_simple_summary( self, { metric_keys.MetricKeys.LOSS_MEAN: expected_loss, 'dnn/dnn/hiddenlayer_0_fraction_of_zero_values': 0., 'dnn/dnn/hiddenlayer_1_fraction_of_zero_values': .5, 'dnn/dnn/logits_fraction_of_zero_values': 0., metric_keys.MetricKeys.LOSS: expected_loss, }, summary) _assert_checkpoint( self, base_global_step + num_steps, input_units=1, hidden_units=hidden_units, output_units=n_classes, model_dir=self._model_dir)
def test_one_dim(self): """Asserts evaluation metrics for one-dimensional input and logits.""" global_step = 100 dnn_testing_utils.create_checkpoint(( ([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]), ), global_step, self._model_dir) dnn_classifier = dnn.DNNClassifier( hidden_units=(2, 2), feature_columns=[feature_column.numeric_column('age')], model_dir=self._model_dir) def _input_fn(): # batch_size = 2, one false label, and one true. return {'age': [[10.], [10.]]}, [[1], [0]] # Uses identical numbers as DNNModelTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [[-2.08], [-2.08]] => # logistic = 1/(1 + exp(-logits)) = [[0.11105597], [0.11105597]] # loss = -1. * log(0.111) -1. * log(0.889) = 2.31544200 expected_loss = 2.31544200 self.assertAllClose( { metric_keys.MetricKeys.LOSS: expected_loss, metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2., metric_keys.MetricKeys.ACCURACY: 0.5, metric_keys.MetricKeys.PREDICTION_MEAN: 0.11105597, metric_keys.MetricKeys.LABEL_MEAN: 0.5, metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5, # There is no good way to calculate AUC for only two data points. But # that is what the algorithm returns. metric_keys.MetricKeys.AUC: 0.5, metric_keys.MetricKeys.AUC_PR: 0.75, ops.GraphKeys.GLOBAL_STEP: global_step }, dnn_classifier.evaluate(input_fn=_input_fn, steps=1))
def test_one_dim(self): """Asserts train loss for one-dimensional input and logits.""" base_global_step = 100 hidden_units = (2, 2) dnn_testing_utils.create_checkpoint( (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]),), base_global_step, self._model_dir) # Uses identical numbers as DNNModelFnTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [-2.08] => predictions = [-2.08] # loss = (1 + 2.08)^2 = 9.4864 expected_loss = 9.4864 mock_optimizer = dnn_testing_utils.mock_optimizer( self, hidden_units=hidden_units, expected_loss=expected_loss) dnn_regressor = dnn.DNNRegressor( hidden_units=hidden_units, feature_columns=(feature_column.numeric_column('age'),), optimizer=mock_optimizer, model_dir=self._model_dir) self.assertEqual(0, mock_optimizer.minimize.call_count) # Train for a few steps, then validate optimizer, summaries, and # checkpoint. num_steps = 5 summary_hook = _SummaryHook() dnn_regressor.train( input_fn=lambda: ({'age': [[10.]]}, [[1.]]), steps=num_steps, hooks=(summary_hook,)) self.assertEqual(1, mock_optimizer.minimize.call_count) summaries = summary_hook.summaries() self.assertEqual(num_steps, len(summaries)) for summary in summaries: _assert_simple_summary( self, { metric_keys.MetricKeys.LOSS_MEAN: expected_loss, 'dnn/dnn/hiddenlayer_0_fraction_of_zero_values': 0., 'dnn/dnn/hiddenlayer_1_fraction_of_zero_values': 0.5, 'dnn/dnn/logits_fraction_of_zero_values': 0., metric_keys.MetricKeys.LOSS: expected_loss, }, summary) _assert_checkpoint( self, base_global_step + num_steps, input_units=1, hidden_units=hidden_units, output_units=1, model_dir=self._model_dir)
def test_multi_dim(self): """Asserts predictions for multi-dimensional input and logits.""" dnn_testing_utils.create_checkpoint(( ([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]), ), global_step=0, model_dir=self._model_dir) dnn_classifier = dnn.DNNClassifier( hidden_units=(2, 2), feature_columns=(feature_column.numeric_column('x', shape=(2, )), ), n_classes=3, model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( # Inputs shape is (batch_size, num_inputs). x={'x': np.array([[10., 8.]])}, batch_size=1, shuffle=False) # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [-0.48, 0.48, 0.39] => # probabilities[i] = exp(logits[i]) / sum_j exp(logits[j]) => # probabilities = [0.16670536, 0.43538380, 0.39791084] # class_ids = argmax(probabilities) = [1] predictions = next(dnn_classifier.predict(input_fn=input_fn)) self.assertItemsEqual([ prediction_keys.PredictionKeys.LOGITS, prediction_keys.PredictionKeys.PROBABILITIES, prediction_keys.PredictionKeys.CLASS_IDS, prediction_keys.PredictionKeys.CLASSES ], six.iterkeys(predictions)) self.assertAllClose([-0.48, 0.48, 0.39], predictions[prediction_keys.PredictionKeys.LOGITS]) self.assertAllClose( [0.16670536, 0.43538380, 0.39791084], predictions[prediction_keys.PredictionKeys.PROBABILITIES]) self.assertAllEqual( [1], predictions[prediction_keys.PredictionKeys.CLASS_IDS]) self.assertAllEqual( [b'1'], predictions[prediction_keys.PredictionKeys.CLASSES])
def test_multi_dim(self): """Asserts predictions for multi-dimensional input and logits.""" dnn_testing_utils.create_checkpoint( (([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),), global_step=0, model_dir=self._model_dir) dnn_classifier = dnn.DNNClassifier( hidden_units=(2, 2), feature_columns=(feature_column.numeric_column('x', shape=(2,)),), n_classes=3, model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( # Inputs shape is (batch_size, num_inputs). x={'x': np.array([[10., 8.]])}, batch_size=1, shuffle=False) # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [-0.48, 0.48, 0.39] => # probabilities[i] = exp(logits[i]) / sum_j exp(logits[j]) => # probabilities = [0.16670536, 0.43538380, 0.39791084] # class_ids = argmax(probabilities) = [1] predictions = next(dnn_classifier.predict(input_fn=input_fn)) self.assertItemsEqual( [prediction_keys.PredictionKeys.LOGITS, prediction_keys.PredictionKeys.PROBABILITIES, prediction_keys.PredictionKeys.CLASS_IDS, prediction_keys.PredictionKeys.CLASSES], six.iterkeys(predictions)) self.assertAllClose( [-0.48, 0.48, 0.39], predictions[prediction_keys.PredictionKeys.LOGITS]) self.assertAllClose( [0.16670536, 0.43538380, 0.39791084], predictions[prediction_keys.PredictionKeys.PROBABILITIES]) self.assertAllEqual( [1], predictions[prediction_keys.PredictionKeys.CLASS_IDS]) self.assertAllEqual( [b'1'], predictions[prediction_keys.PredictionKeys.CLASSES])
def test_one_dim(self): """Asserts predictions for one-dimensional input and logits.""" # Create checkpoint: num_inputs=1, hidden_units=(2, 2), num_outputs=1. dnn_testing_utils.create_checkpoint( (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1.], [1.]], [.3]),), global_step=0, model_dir=self._model_dir) # Create DNNRegressor and predict. dnn_regressor = dnn.DNNRegressor( hidden_units=(2, 2), feature_columns=(feature_column.numeric_column('x'),), model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[10.]])}, batch_size=1, shuffle=False) # Uses identical numbers as DNNModelTest.test_one_dim_logits. # See that test for calculation of logits. # logits = [[-2.08]] => predictions = [-2.08]. self.assertAllClose({ prediction_keys.PredictionKeys.PREDICTIONS: [-2.08], }, next(dnn_regressor.predict(input_fn=input_fn)))
def test_multi_dim(self): """Asserts evaluation metrics for multi-dimensional input and logits.""" global_step = 100 dnn_testing_utils.create_checkpoint(( ([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]), ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]), ), global_step, self._model_dir) n_classes = 3 dnn_classifier = dnn.DNNClassifier( hidden_units=(2, 2), feature_columns=[feature_column.numeric_column('age', shape=[2])], n_classes=n_classes, model_dir=self._model_dir) def _input_fn(): # batch_size = 2, one false label, and one true. return {'age': [[10., 8.], [10., 8.]]}, [[1], [0]] # Uses identical numbers as # DNNModelFnTest.test_multi_dim_input_multi_dim_logits. # See that test for calculation of logits. # logits = [[-0.48, 0.48, 0.39], [-0.48, 0.48, 0.39]] # probabilities = exp(logits)/sum(exp(logits)) # = [[0.16670536, 0.43538380, 0.39791084], # [0.16670536, 0.43538380, 0.39791084]] # loss = -log(0.43538380) - log(0.16670536) expected_loss = 2.62305466 self.assertAllClose( { metric_keys.MetricKeys.LOSS: expected_loss, metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2, metric_keys.MetricKeys.ACCURACY: 0.5, ops.GraphKeys.GLOBAL_STEP: global_step }, dnn_classifier.evaluate(input_fn=_input_fn, steps=1))