コード例 #1
0
    def __init__(self,
                 n_classes,
                 feature_columns=None,
                 model_dir=None,
                 quantiles_dir=None,
                 keypoints_initializers_fn=None,
                 optimizer=None,
                 config=None,
                 hparams=None,
                 feature_engineering_fn=None,
                 head=None,
                 weight_column=None):
        """Construct CalibrateEtlClassifier/Regressor."""
        if not hparams:
            hparams = tfl_hparams.CalibratedEtlHParams([])
        self.check_hparams(hparams)
        hparams = self._adjust_calibration_params(hparams)

        super(_CalibratedEtl,
              self).__init__(n_classes, feature_columns, model_dir,
                             quantiles_dir, keypoints_initializers_fn,
                             optimizer, config, hparams, head, weight_column,
                             'etl')
        # After initialization, we expect model_dir exists.
        if self._model_dir is None:
            raise ValueError('model_dir is not created')
コード例 #2
0
    def _CalibratedEtlRegressor(self,
                                feature_names,
                                feature_columns,
                                weight_column=None,
                                **hparams_args):
        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                _NUM_KEYPOINTS, -1., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedEtlHParams(
            feature_names,
            num_keypoints=_NUM_KEYPOINTS,
            monotonic_num_lattices=1,
            monotonic_lattice_rank=1,
            monotonic_lattice_size=2,
            non_monotonic_num_lattices=1,
            non_monotonic_lattice_rank=1,
            non_monotonic_lattice_size=2,
            **hparams_args)
        # Turn off monotonic calibrator.
        hparams.set_param('calibration_monotonic', None)
        hparams.set_param('learning_rate', 0.1)

        return calibrated_etl.calibrated_etl_regressor(
            feature_columns=feature_columns,
            weight_column=weight_column,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)
コード例 #3
0
 def testEmptyNonMonotonicLatticeSizeExpectsError(self):
     hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
     hparams.set_param('non_monotonic_num_lattices', 2)
     hparams.set_param('non_monotonic_lattice_rank', 2)
     with self.assertRaisesRegexp(
             ValueError,
             'Hyperparameter configuration cannot be used in the calibrated etl '
             'estimator.'):
         calibrated_etl.calibrated_etl_classifier(hparams=hparams)
コード例 #4
0
  def testConstructorsAllTypes(self):
    _ = hparams.CalibratedHParams(['x0', 'x1'])
    _ = hparams.CalibratedLinearHParams(['x0', 'x1'], learning_rate=0.1)
    _ = hparams.CalibratedLatticeHParams(['x0', 'x1'], learning_rate=0.1)
    _ = hparams.CalibratedRtlHParams(['x0', 'x1'], learning_rate=0.1)
    etl = hparams.CalibratedEtlHParams(['x0', 'x1'], learning_rate=0.1)

    etl.parse('calibration_bound=yes')
    self.assertTrue(etl.calibration_bound)
    etl.parse('calibration_bound=off')
    self.assertFalse(etl.calibration_bound)
    with self.assertRaises(ValueError):
      etl.parse('calibration_bound=foobar')
コード例 #5
0
    def testWrongLatticeRegularization(self):
        hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
        hparams.set_param('non_monotonic_num_lattices', 2)
        hparams.set_param('non_monotonic_lattice_size', 2)
        hparams.set_param('nno_monotonic_lattice_rank', 2)
        hparams.set_feature_param('x', 'lattice_l1_reg', 0.1)
        hparams.set_feature_param('x', 'lattice_l2_reg', 0.1)
        hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1)
        hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1)

        with self.assertRaisesRegexp(
                ValueError,
                'Hyperparameter configuration cannot be used in the calibrated etl '
                'estimator.'):
            calibrated_etl.calibrated_etl_classifier(hparams=hparams)
コード例 #6
0
    def testCalibratedEtlWithMissingTraining(self):
        # x0 is missing with it's own vertex: so it can take very different values,
        # while x1 is missing and calibrated, in this case to the middle of the
        # lattice.
        x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.])
        x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.])
        training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.])
        x_samples = {'x0': x0, 'x1': x1}

        train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
            x=x_samples,
            y=training_y,
            batch_size=x0.shape[0],
            num_epochs=2000,
            shuffle=False)
        test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
            x=x_samples, y=training_y, shuffle=False)
        feature_columns = [
            tf.feature_column.numeric_column('x0'),
            tf.feature_column.numeric_column('x1'),
        ]

        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                2, 0., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedEtlHParams(
            ['x0', 'x1'],
            num_keypoints=2,
            non_monotonic_num_lattices=5,
            non_monotonic_lattice_rank=2,
            non_monotonic_lattice_size=2,
            learning_rate=0.1,
            missing_input_value=-1.)

        estimator = calibrated_etl.calibrated_etl_regressor(
            feature_columns=feature_columns,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)

        estimator.train(input_fn=train_input_fn)
        # Here we only check the successful evaluation.
        # Checking the actual number, accuracy, etc, makes the test too flaky.
        _ = estimator.evaluate(input_fn=test_input_fn)
コード例 #7
0
    def testCalibratedEtlWithMissingTraining(self):
        # x0 is missing with it's own vertex: so it can take very different values,
        # while x1 is missing and calibrated, in this case to the middle of the
        # lattice.
        x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.])
        x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.])
        training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.])
        x_samples = {'x0': x0, 'x1': x1}

        train_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                 y=training_y,
                                                 batch_size=x0.shape[0],
                                                 num_epochs=2000,
                                                 shuffle=False)
        test_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                y=training_y,
                                                shuffle=False)
        feature_columns = [
            feature_column_lib.numeric_column('x0'),
            feature_column_lib.numeric_column('x1'),
        ]

        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                2, 0., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedEtlHParams(
            ['x0', 'x1'],
            num_keypoints=2,
            non_monotonic_num_lattices=5,
            non_monotonic_lattice_rank=2,
            non_monotonic_lattice_size=2,
            learning_rate=0.1,
            missing_input_value=-1.)

        estimator = calibrated_etl.calibrated_etl_regressor(
            feature_columns=feature_columns,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)

        estimator.train(input_fn=train_input_fn)
        results = estimator.evaluate(input_fn=test_input_fn)
        self.assertLess(results['average_loss'], 0.1)
コード例 #8
0
    def testCalibratedEtlMonotonicClassifierTraining(self):
        # Construct the following training pair.
        #
        # Training: (x, y)
        # ([0., 0.], 0.0)
        # ([0., 1.], 1.0)
        # ([1., 0.], 1.0)
        # ([1., 1.], 0.0)
        #
        # which is not a monotonic function. Then check the forcing monotonicity
        # resulted in the following monotonicity or not.
        # f(0, 0) <= f(0, 1), f(0, 0) <= f(1, 0), f(0, 1) <= f(1, 1),
        # f(1, 0) < = f(1, 1).
        x0 = np.array([0.0, 0.0, 1.0, 1.0])
        x1 = np.array([0.0, 1.0, 0.0, 1.0])
        x_samples = {'x0': x0, 'x1': x1}
        training_y = np.array([[False], [True], [True], [False]])

        train_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                 y=training_y,
                                                 batch_size=4,
                                                 num_epochs=1000,
                                                 shuffle=False)
        test_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                y=None,
                                                shuffle=False)

        # Define monotonic lattice classifier.
        feature_columns = [
            feature_column_lib.numeric_column('x0'),
            feature_column_lib.numeric_column('x1'),
        ]

        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                2, 0., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedEtlHParams(num_keypoints=2,
                                                   monotonic_num_lattices=2,
                                                   monotonic_lattice_rank=2,
                                                   monotonic_lattice_size=2)
        hparams.set_param('calibration_monotonic', +1)
        hparams.set_param('lattice_monotonic', True)
        hparams.set_param('learning_rate', 0.1)

        estimator = calibrated_etl.calibrated_etl_classifier(
            feature_columns=feature_columns,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)
        estimator.train(input_fn=train_input_fn)
        predictions = [
            results['logits'][0]
            for results in estimator.predict(input_fn=test_input_fn)
        ]

        self.assertEqual(len(predictions), 4)
        # Check monotonicity. Note that projection has its own precision, so we
        # add a small number.
        self.assertLess(predictions[0], predictions[1] + 1e-6)
        self.assertLess(predictions[0], predictions[2] + 1e-6)
        self.assertLess(predictions[1], predictions[3] + 1e-6)
        self.assertLess(predictions[2], predictions[3] + 1e-6)