Example #1
0
    def _CalibratedEtlRegressor(self,
                                feature_names,
                                feature_columns,
                                weight_column=None,
                                **hparams_args):
        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                _NUM_KEYPOINTS, -1., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedEtlHParams(
            feature_names,
            num_keypoints=_NUM_KEYPOINTS,
            monotonic_num_lattices=1,
            monotonic_lattice_rank=1,
            monotonic_lattice_size=2,
            non_monotonic_num_lattices=1,
            non_monotonic_lattice_rank=1,
            non_monotonic_lattice_size=2,
            **hparams_args)
        # Turn off monotonic calibrator.
        hparams.set_param('calibration_monotonic', None)
        hparams.set_param('learning_rate', 0.1)

        return calibrated_etl.calibrated_etl_regressor(
            feature_columns=feature_columns,
            weight_column=weight_column,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)
Example #2
0
    def testCalibratedEtlWithMissingTraining(self):
        # x0 is missing with it's own vertex: so it can take very different values,
        # while x1 is missing and calibrated, in this case to the middle of the
        # lattice.
        x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.])
        x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.])
        training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.])
        x_samples = {'x0': x0, 'x1': x1}

        train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
            x=x_samples,
            y=training_y,
            batch_size=x0.shape[0],
            num_epochs=2000,
            shuffle=False)
        test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
            x=x_samples, y=training_y, shuffle=False)
        feature_columns = [
            tf.feature_column.numeric_column('x0'),
            tf.feature_column.numeric_column('x1'),
        ]

        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                2, 0., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedEtlHParams(
            ['x0', 'x1'],
            num_keypoints=2,
            non_monotonic_num_lattices=5,
            non_monotonic_lattice_rank=2,
            non_monotonic_lattice_size=2,
            learning_rate=0.1,
            missing_input_value=-1.)

        estimator = calibrated_etl.calibrated_etl_regressor(
            feature_columns=feature_columns,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)

        estimator.train(input_fn=train_input_fn)
        # Here we only check the successful evaluation.
        # Checking the actual number, accuracy, etc, makes the test too flaky.
        _ = estimator.evaluate(input_fn=test_input_fn)
Example #3
0
    def testCalibratedEtlWithMissingTraining(self):
        # x0 is missing with it's own vertex: so it can take very different values,
        # while x1 is missing and calibrated, in this case to the middle of the
        # lattice.
        x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.])
        x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.])
        training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.])
        x_samples = {'x0': x0, 'x1': x1}

        train_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                 y=training_y,
                                                 batch_size=x0.shape[0],
                                                 num_epochs=2000,
                                                 shuffle=False)
        test_input_fn = numpy_io.numpy_input_fn(x=x_samples,
                                                y=training_y,
                                                shuffle=False)
        feature_columns = [
            feature_column_lib.numeric_column('x0'),
            feature_column_lib.numeric_column('x1'),
        ]

        def init_fn():
            return keypoints_initialization.uniform_keypoints_for_signal(
                2, 0., 1., 0., 1.)

        hparams = tfl_hparams.CalibratedEtlHParams(
            ['x0', 'x1'],
            num_keypoints=2,
            non_monotonic_num_lattices=5,
            non_monotonic_lattice_rank=2,
            non_monotonic_lattice_size=2,
            learning_rate=0.1,
            missing_input_value=-1.)

        estimator = calibrated_etl.calibrated_etl_regressor(
            feature_columns=feature_columns,
            hparams=hparams,
            keypoints_initializers_fn=init_fn)

        estimator.train(input_fn=train_input_fn)
        results = estimator.evaluate(input_fn=test_input_fn)
        self.assertLess(results['average_loss'], 0.1)