Beispiel #1
0
 def testCalibratedLinearClassifier(self, feature_names, output_calibration,
                                    use_bias, auc):
   self._ResetAllBackends()
   feature_columns = [
       feature_column for feature_column in self.heart_feature_columns
       if feature_column.name in feature_names
   ]
   feature_configs = [
       feature_config for feature_config in self.heart_feature_configs
       if feature_config.name in feature_names
   ]
   model_config = configs.CalibratedLinearConfig(
       use_bias=use_bias,
       regularizer_configs=[
           configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
       ],
       output_calibration=output_calibration,
       feature_configs=feature_configs)
   estimator = estimators.CannedClassifier(
       feature_columns=feature_columns,
       model_config=model_config,
       feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1),
       optimizer=tf.keras.optimizers.Adam(0.01))
   estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=200))
   results = estimator.evaluate(input_fn=self._GetHeartTestInputFn())
   logging.info('Calibrated linear classifier results:')
   logging.info(results)
   self.assertGreater(results['auc'], auc)
Beispiel #2
0
 def testCalibratedLinearEstimator(self, feature_names, output_calibration,
                                   use_bias, average_loss):
   self._ResetAllBackends()
   feature_columns = [
       feature_column for feature_column in self.boston_feature_columns
       if feature_column.name in feature_names
   ]
   feature_configs = [
       feature_config for feature_config in self.boston_feature_configs
       if feature_config.name in feature_names
   ]
   model_config = configs.CalibratedLinearConfig(
       use_bias=use_bias,
       regularizer_configs=[
           configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
       ],
       output_calibration=output_calibration,
       feature_configs=feature_configs)
   estimator = estimators.CannedEstimator(
       head=regression_head.RegressionHead(),
       feature_columns=feature_columns,
       model_config=model_config,
       feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1),
       optimizer=tf.keras.optimizers.Adam(0.01))
   estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200))
   results = estimator.evaluate(input_fn=self._GetBostonTestInputFn())
   logging.info('Calibrated linear regressor results:')
   logging.info(results)
   self.assertLess(results['average_loss'], average_loss)
Beispiel #3
0
 def testLinearH5FormatSaveLoad(self):
   model_config = configs.CalibratedLinearConfig(
       feature_configs=copy.deepcopy(feature_configs),
       regularizer_configs=[
           configs.RegularizerConfig('calib_hessian', l2=1e-4),
           configs.RegularizerConfig('torsion', l2=1e-3),
       ],
       use_bias=True,
       output_min=0.0,
       output_max=1.0,
       output_calibration=True,
       output_calibration_num_keypoints=6,
       output_initialization=[0.0, 1.0])
   model = premade.CalibratedLinear(model_config)
   # Compile and fit model.
   model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(0.1))
   model.fit(fake_data['train_xs'], fake_data['train_ys'])
   # Save model using H5 format.
   with tempfile.NamedTemporaryFile(suffix='.h5') as f:
     tf.keras.models.save_model(model, f.name)
     loaded_model = tf.keras.models.load_model(
         f.name, custom_objects=premade.get_custom_objects())
     self.assertAllClose(
         model.predict(fake_data['eval_xs']),
         loaded_model.predict(fake_data['eval_xs']))
Beispiel #4
0
    def testCalibratedModelInfo(self, model_type, parameterization,
                                output_calibration):
        self._ResetAllBackends()
        if model_type == 'linear':
            model_config = configs.CalibratedLinearConfig(
                feature_configs=self.heart_feature_configs,
                output_calibration=output_calibration,
            )
        else:
            feature_configs = copy.deepcopy(self.heart_feature_configs)
            if parameterization == 'kronecker_factored':
                # RTL Layer only supports monotonicity and bound constraints.
                for feature_config in feature_configs:
                    feature_config.lattice_size = 2
                    feature_config.unimodality = 'none'
                    feature_config.reflects_trust_in = None
                    feature_config.dominates = None
                    feature_config.regularizer_configs = None
            model_config = configs.CalibratedLatticeConfig(
                feature_configs=feature_configs,
                parameterization=parameterization,
                output_calibration=output_calibration,
            )
        estimator = estimators.CannedClassifier(
            feature_columns=self.heart_feature_columns,
            model_config=model_config,
            feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1),
            prefitting_input_fn=self._GetHeartTrainInputFn(num_epochs=5),
            optimizer=tf.keras.optimizers.Adam(0.01),
            prefitting_optimizer=tf.keras.optimizers.Adam(0.01))
        estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=20))

        # Serving input fn is used to create saved models.
        serving_input_fn = (
            tf.estimator.export.build_parsing_serving_input_receiver_fn(
                feature_spec=fc.make_parse_example_spec(
                    self.heart_feature_columns)))
        saved_model_path = estimator.export_saved_model(
            estimator.model_dir, serving_input_fn)
        logging.info('Model exported to %s', saved_model_path)
        model = estimators.get_model_graph(saved_model_path)

        expected_num_nodes = (
            2 * len(self.heart_feature_columns)
            +  # Input features and calibration
            1 +  # Linear or lattice layer
            int(output_calibration))  # Output calibration

        self.assertLen(model.nodes, expected_num_nodes)
Beispiel #5
0
 def testLinearFromConfig(self):
   model_config = configs.CalibratedLinearConfig(
       feature_configs=copy.deepcopy(feature_configs),
       regularizer_configs=[
           configs.RegularizerConfig('calib_hessian', l2=1e-4),
           configs.RegularizerConfig('torsion', l2=1e-3),
       ],
       use_bias=True,
       output_min=0.0,
       output_max=1.0,
       output_calibration=True,
       output_calibration_num_keypoints=6,
       output_initialization=[0.0, 1.0])
   model = premade.CalibratedLinear(model_config)
   loaded_model = premade.CalibratedLinear.from_config(model.get_config())
   self.assertEqual(
       json.dumps(model.get_config(), sort_keys=True, cls=self.Encoder),
       json.dumps(loaded_model.get_config(), sort_keys=True, cls=self.Encoder))
Beispiel #6
0
    def testCalibratedModelInfo(self, model_type, output_calibration):
        self._ResetAllBackends()
        if model_type == 'linear':
            model_config = configs.CalibratedLinearConfig(
                feature_configs=self.heart_feature_configs,
                output_calibration=output_calibration,
            )
        else:
            model_config = configs.CalibratedLatticeConfig(
                feature_configs=self.heart_feature_configs,
                output_calibration=output_calibration,
            )
        estimator = estimators.CannedClassifier(
            feature_columns=self.heart_feature_columns,
            model_config=model_config,
            feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1),
            prefitting_input_fn=self._GetHeartTrainInputFn(num_epochs=5),
            optimizer=tf.keras.optimizers.Adam(0.01),
            prefitting_optimizer=tf.keras.optimizers.Adam(0.01))
        estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=20))

        # Serving input fn is used to create saved models.
        serving_input_fn = (
            tf.estimator.export.build_parsing_serving_input_receiver_fn(
                feature_spec=fc.make_parse_example_spec(
                    self.heart_feature_columns)))
        saved_model_path = estimator.export_saved_model(
            estimator.model_dir, serving_input_fn)
        logging.info('Model exported to %s', saved_model_path)
        model = estimators.get_model_graph(saved_model_path)

        expected_num_nodes = (
            2 * len(self.heart_feature_columns)
            +  # Input features and calibration
            1 +  # Linear or lattice layer
            int(output_calibration))  # Output calibration

        self.assertLen(model.nodes, expected_num_nodes)
Beispiel #7
0
 def test_from_config(self):
   feature_configs = [
       configs.FeatureConfig(
           name='feature_a',
           pwl_calibration_input_keypoints='quantiles',
           pwl_calibration_num_keypoints=8,
           monotonicity=1,
           pwl_calibration_clip_max=100,
       ),
       configs.FeatureConfig(
           name='feature_b',
           lattice_size=3,
           unimodality='valley',
           pwl_calibration_input_keypoints='uniform',
           pwl_calibration_num_keypoints=5,
           pwl_calibration_clip_min=130,
           pwl_calibration_convexity='convex',
           regularizer_configs=[
               configs.RegularizerConfig(name='calib_hesian', l2=3e-3),
           ],
       ),
       configs.FeatureConfig(
           name='feature_c',
           pwl_calibration_input_keypoints=[0.0, 0.5, 1.0],
           reflects_trust_in=[
               configs.TrustConfig(feature_name='feature_a'),
               configs.TrustConfig(feature_name='feature_b', direction=-1),
           ],
           dominates=[
               configs.DominanceConfig(
                   feature_name='feature_d', dominance_type='monotonic'),
           ],
       ),
       configs.FeatureConfig(
           name='feature_d',
           num_buckets=3,
           vocabulary_list=['a', 'b', 'c'],
           default_value=-1,
       ),
   ]
   # First we test CalibratedLatticeEnsembleConfig
   model_config = configs.CalibratedLatticeEnsembleConfig(
       feature_configs=feature_configs,
       lattices=[['feature_a', 'feature_b'], ['feature_c', 'feature_d']],
       separate_calibrators=True,
       regularizer_configs=[
           configs.RegularizerConfig('torsion', l2=1e-4),
       ],
       output_min=0.0,
       output_max=1.0,
       output_calibration=True,
       output_calibration_num_keypoints=5,
       output_initialization=[0.0, 1.0])
   model_config_copy = configs.CalibratedLatticeEnsembleConfig.from_config(
       model_config.get_config(), tfl_custom_objects)
   self.assertDictEqual(model_config.get_config(),
                        model_config_copy.get_config())
   # Next we test CalibratedLatticeConfig
   model_config = configs.CalibratedLatticeConfig(
       feature_configs=feature_configs,
       regularizer_configs=[
           configs.RegularizerConfig('torsion', l2=1e-4),
       ],
       output_min=0.0,
       output_max=1.0,
       output_calibration=True,
       output_calibration_num_keypoints=8,
       output_initialization='quantiles')
   model_config_copy = configs.CalibratedLatticeConfig.from_config(
       model_config.get_config(), tfl_custom_objects)
   self.assertDictEqual(model_config.get_config(),
                        model_config_copy.get_config())
   # Last we test CalibratedLinearConfig
   model_config = configs.CalibratedLinearConfig(
       feature_configs=feature_configs,
       regularizer_configs=[
           configs.RegularizerConfig('calib_hessian', l2=1e-4),
       ],
       use_bias=True,
       output_min=0.0,
       output_max=None,
       output_calibration=True,
       output_initialization='uniform')
   model_config_copy = configs.CalibratedLinearConfig.from_config(
       model_config.get_config(), tfl_custom_objects)
   self.assertDictEqual(model_config.get_config(),
                        model_config_copy.get_config())