Пример #1
0
    def __init__(self,
                 n_classes,
                 feature_columns=None,
                 model_dir=None,
                 quantiles_dir=None,
                 keypoints_initializers_fn=None,
                 lattice_initializers_fn=None,
                 optimizer=None,
                 config=None,
                 hparams=None,
                 head=None,
                 weight_column=None):
        """Construct CalibrateLatticeClassifier/Regressor."""
        if not hparams:
            hparams = tfl_hparams.CalibratedLatticeHParams([])
        self.check_hparams(hparams)
        hparams = self._set_calibration_params(hparams)

        self.lattice_initializers_fn_ = lattice_initializers_fn

        super(_CalibratedLattice,
              self).__init__(n_classes, feature_columns, model_dir,
                             quantiles_dir, keypoints_initializers_fn,
                             optimizer, config, hparams, head, weight_column,
                             'lattice')
    def testCalibratedLatticeMonotonicClassifierTraining(self):
        # Construct the following training/testing pair.
        #
        # Training: (x, y)
        # ([0., 0.], 0.0)
        # ([0., 1.], 1.0)
        # ([1., 0.], 1.0)
        # ([1., 1.], 0.0)
        #
        # Test: (x, y)
        # ([0., 0.], 0.0)
        # ([0., 1.], 1.0)
        # ([1., 0.], 1.0)
        # ([1., 1.], 1.0)
        #
        # Note that training example has a noisy sample, ([1., 1.], 0.0), and test
        # examples are generated by the logical-OR function. Therefore by enforcing
        # increasing monotonicity to all features, we should be able to work well
        # in the test examples.
        x0 = np.array([0.0, 0.0, 1.0, 1.0])
        x1 = np.array([0.0, 1.0, 0.0, 1.0])
        x_samples = {'x0': x0, 'x1': x1}
        training_y = np.array([[False], [True], [True], [False]])
        test_y = np.array([[False], [True], [True], [True]])

        train_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                 y=training_y,
                                                 batch_size=4,
                                                 num_epochs=1000,
                                                 shuffle=False)
        test_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                y=test_y,
                                                shuffle=False)

        # Define monotonic lattice classifier.
        feature_columns = [
            feature_column_lib.numeric_column('x0'),
            feature_column_lib.numeric_column('x1'),
        ]

        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                2, 0., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedLatticeHParams(num_keypoints=2)
        # Monotonic calibrated lattice.

        hparams.set_param('monotonicity', +1)
        hparams.set_param('learning_rate', 0.1)
        hparams.set_param('interpolation_type', 'hypercube')

        estimator = calibrated_lattice.calibrated_lattice_classifier(
            feature_columns=feature_columns,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)

        estimator.train(input_fn=train_input_fn)
        results = estimator.evaluate(input_fn=test_input_fn)
        # We should expect 1.0 accuracy.
        self.assertGreater(results['accuracy'], 0.999)
 def setUp(self):
     self.empty_estimator = calibrated_lattice.calibrated_lattice_classifier(
     )
     self.hparams = tfl_hparams.CalibratedLatticeHParams(
         feature_names=['x'])
     self.hparams.set_param('lattice_size', 2)
     self.hparams.set_param('calibrator_output_min', 0)
     self.hparams.set_param('calibrator_output_max', 1)
     self.hparams.set_param('calibration_bound', True)
Пример #4
0
  def testConstructorsAllTypes(self):
    _ = hparams.CalibratedHParams(['x0', 'x1'])
    _ = hparams.CalibratedLinearHParams(['x0', 'x1'], learning_rate=0.1)
    _ = hparams.CalibratedLatticeHParams(['x0', 'x1'], learning_rate=0.1)
    _ = hparams.CalibratedRtlHParams(['x0', 'x1'], learning_rate=0.1)
    etl = hparams.CalibratedEtlHParams(['x0', 'x1'], learning_rate=0.1)

    etl.parse('calibration_bound=yes')
    self.assertTrue(etl.calibration_bound)
    etl.parse('calibration_bound=off')
    self.assertFalse(etl.calibration_bound)
    with self.assertRaises(ValueError):
      etl.parse('calibration_bound=foobar')
    def _CalibratedLatticeRegressor(self, feature_names, feature_columns,
                                    **hparams_args):
        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                _NUM_KEYPOINTS, -1., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedLatticeHParams(
            feature_names, num_keypoints=_NUM_KEYPOINTS, **hparams_args)
        # Turn off monotonic calibrator.
        hparams.set_param('calibration_monotonic', None)
        hparams.set_param('learning_rate', 0.1)
        return calibrated_lattice.calibrated_lattice_regressor(
            feature_columns=feature_columns,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)
    def testCalibratedLatticeWithMissingTraining(self):
        # x0 is missing with it's own vertex: so it can take very different values,
        # while x1 is missing and calibrated, in this case to the middle of the
        # lattice.
        x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.])
        x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.])
        training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.])
        x_samples = {'x0': x0, 'x1': x1}

        train_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                 y=training_y,
                                                 batch_size=x0.shape[0],
                                                 num_epochs=2000,
                                                 shuffle=False)
        test_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                y=training_y,
                                                shuffle=False)
        feature_columns = [
            feature_column_lib.numeric_column('x0'),
            feature_column_lib.numeric_column('x1'),
        ]

        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                2, 0., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedLatticeHParams(['x0', 'x1'],
                                                       num_keypoints=2,
                                                       learning_rate=0.1,
                                                       missing_input_value=-1.)
        hparams.set_feature_param('x0', 'missing_vertex', True)

        estimator = calibrated_lattice.calibrated_lattice_regressor(
            feature_columns=feature_columns,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)

        estimator.train(input_fn=train_input_fn)
        results = estimator.evaluate(input_fn=test_input_fn)
        self.assertLess(results['average_loss'], 0.1)