def _CalibratedRtlRegressor(self, feature_names, feature_columns, num_lattices=1, lattice_rank=1, num_keypoints=_NUM_KEYPOINTS, weight_column=None, **hparams_args): def init_fn(): return keypoints_initialization.uniform_keypoints_for_signal( num_keypoints, -1., 1., 0., 1.) hparams = tfl_hparams.CalibratedRtlHParams(feature_names, num_keypoints=num_keypoints, num_lattices=num_lattices, lattice_rank=lattice_rank, **hparams_args) # Turn off monotonic calibrator. hparams.set_param('calibration_monotonic', None) hparams.set_param('learning_rate', 0.1) return calibrated_rtl.calibrated_rtl_regressor( feature_columns=feature_columns, weight_column=weight_column, hparams=hparams, keypoints_initializers_fn=init_fn)
def testCalibratedRtlWithMissingTraining(self): # x0 is missing with it's own vertex: so it can take very different values, # while x1 is missing and calibrated, in this case to the middle of the # lattice. x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.]) x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.]) training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.]) x_samples = {'x0': x0, 'x1': x1} train_input_fn = numpy_io.numpy_input_fn( x=x_samples, y=training_y, batch_size=x0.shape[0], num_epochs=2000, shuffle=False) test_input_fn = numpy_io.numpy_input_fn( x=x_samples, y=training_y, shuffle=False) feature_columns = [ feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1'), ] def init_fn(): return keypoints_initialization.uniform_keypoints_for_signal( 2, 0., 1., 0., 1.) hparams = tfl_hparams.CalibratedRtlHParams( ['x0', 'x1'], num_keypoints=2, num_lattices=3, lattice_rank=2, learning_rate=0.1, missing_input_value=-1.) hparams.set_feature_param('x0', 'missing_vertex', True) estimator = calibrated_rtl.calibrated_rtl_regressor( feature_columns=feature_columns, hparams=hparams, keypoints_initializers_fn=init_fn) estimator.train(input_fn=train_input_fn) results = estimator.evaluate(input_fn=test_input_fn) self.assertLess(results['average_loss'], 0.1)