예제 #1
0
  def testAverageMoreThanNumTreesExist(self):
    with self.test_session():
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      adjusted_tree_ensemble_config = (
          tree_config_pb2.DecisionTreeEnsembleConfig())
      # When we say to average over more trees than possible, it is averaging
      # across all trees.
      total_num = 100
      for i in range(0, total_num):
        tree = tree_ensemble_config.trees.add()
        _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)

        tree_ensemble_config.tree_metadata.add().is_finalized = True
        tree_ensemble_config.tree_weights.append(1.0)
        # This is how the weight will look after averaging
        copy_tree = adjusted_tree_ensemble_config.trees.add()
        _append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)

        adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
        adjusted_tree_ensemble_config.tree_weights.append(
            1.0 * (total_num - i) / total_num)

      # Prepare learner config WITH AVERAGING.
      learner_config = learner_pb2.LearnerConfig()
      learner_config.num_classes = 2
      # We have only 100 trees but we ask to average over 250.
      learner_config.averaging_config.average_last_n_trees = 250

      # No averaging config.
      learner_config_no_averaging = learner_pb2.LearnerConfig()
      learner_config_no_averaging.num_classes = 2

      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="existing")

      # This is how our ensemble will "look" during averaging
      adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
          ),
          name="adjusted")

      resources.initialize_resources(resources.shared_resources()).run()

      result, dropout_info = self._get_predictions(
          tree_ensemble_handle,
          learner_config.SerializeToString(),
          apply_averaging=True,
          reduce_dim=True)

      pattern_result, pattern_dropout_info = self._get_predictions(
          adjusted_tree_ensemble_handle,
          learner_config_no_averaging.SerializeToString(),
          apply_averaging=False,
          reduce_dim=True)

      self.assertAllEqual(result.eval(), pattern_result.eval())
      self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval())
    def testWithExistingEnsembleAndShrinkage(self):
        with self.test_session():
            # Add shrinkage config.
            learning_rate = 0.0001
            tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            # Add 10 trees with some weights.
            for i in range(0, 5):
                tree = tree_ensemble.trees.add()
                _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
                tree_ensemble.tree_weights.append(i + 1)
                meta = tree_ensemble.tree_metadata.add()
                meta.num_tree_weight_updates = 1
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0,
                tree_ensemble_config=tree_ensemble.SerializeToString(),
                name="existing")

            # Create non-zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=np.array([4, 7], np.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=np.array(
                [0.2, 0.8], np.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()

            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [1, 2],
                        feature_gains, [0.5, 0.3], [[], []],
                        learning_rate=learning_rate)
            ]):
                output_ensemble.ParseFromString(
                    model_ops.tree_ensemble_serialize(tree_ensemble_handle)
                    [1].eval())

            # The weights of previous trees stayed the same, new tree (LAST) is added
            # with shrinkage weight.
            self.assertAllClose([1.0, 2.0, 3.0, 4.0, 5.0, learning_rate],
                                output_ensemble.tree_weights)

            # Check that all number of updates are equal to 1 (e,g, no old tree weight
            # got adjusted.
            for i in range(0, 6):
                self.assertEqual(
                    1,
                    output_ensemble.tree_metadata[i].num_tree_weight_updates)

            # Ensure feature importance was aggregated correctly.
            self.assertAllEqual([5, 9], feature_usage_counts.eval())
            self.assertArrayNear(
                [0.2 + 0.5 * learning_rate, 0.8 + 0.3 * learning_rate],
                feature_gains.eval(), 1e-6)
예제 #3
0
  def testCreate(self):
    with self.cached_session():
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      tree = tree_ensemble_config.trees.add()
      _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
      tree_ensemble_config.tree_weights.append(1.0)

      # Prepare learner config.
      learner_config = learner_pb2.LearnerConfig()
      learner_config.num_classes = 2

      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=3,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="create_tree")
      resources.initialize_resources(resources.shared_resources()).run()

      result, _ = prediction_ops.gradient_trees_prediction(
          tree_ensemble_handle,
          self._seed, [self._dense_float_tensor], [
              self._sparse_float_indices1, self._sparse_float_indices2
          ], [self._sparse_float_values1, self._sparse_float_values2],
          [self._sparse_float_shape1,
           self._sparse_float_shape2], [self._sparse_int_indices1],
          [self._sparse_int_values1], [self._sparse_int_shape1],
          learner_config=learner_config.SerializeToString(),
          apply_dropout=False,
          apply_averaging=False,
          center_bias=False,
          reduce_dim=True)
      self.assertAllClose(result.eval(), [[-0.4], [-0.4]])
      stamp_token = model_ops.tree_ensemble_stamp_token(tree_ensemble_handle)
      self.assertEqual(stamp_token.eval(), 3)
예제 #4
0
  def testBiasEnsembleMultiClass(self):
    with self.test_session():
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      tree = tree_ensemble_config.trees.add()
      tree_ensemble_config.tree_metadata.add().is_finalized = True
      leaf = tree.nodes.add().leaf
      _append_to_leaf(leaf, 0, -0.4)
      _append_to_leaf(leaf, 1, 0.9)

      tree_ensemble_config.tree_weights.append(1.0)

      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="multiclass")
      resources.initialize_resources(resources.shared_resources()).run()

      # Prepare learner config.
      learner_config = learner_pb2.LearnerConfig()
      learner_config.num_classes = 3

      result, dropout_info = self._get_predictions(
          tree_ensemble_handle,
          learner_config=learner_config.SerializeToString(),
          reduce_dim=True)
      self.assertAllClose([[-0.4, 0.9], [-0.4, 0.9]], result.eval())

      # Empty dropout.
      self.assertAllEqual([[], []], dropout_info.eval())
예제 #5
0
    def testCreate(self):
        with self.test_session():
            tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
            tree = tree_ensemble_config.trees.add()
            _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
            tree_ensemble_config.tree_weights.append(1.0)

            # Prepare learner config.
            learner_config = learner_pb2.LearnerConfig()
            learner_config.num_classes = 2

            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=3,
                tree_ensemble_config=tree_ensemble_config.SerializeToString(),
                name="create_tree")
            resources.initialize_resources(resources.shared_resources()).run()

            result, _, _ = prediction_ops.gradient_trees_prediction(
                tree_ensemble_handle,
                self._seed, [self._dense_float_tensor],
                [self._sparse_float_indices1, self._sparse_float_indices2],
                [self._sparse_float_values1, self._sparse_float_values2],
                [self._sparse_float_shape1, self._sparse_float_shape2],
                [self._sparse_int_indices1], [self._sparse_int_values1],
                [self._sparse_int_shape1],
                learner_config=learner_config.SerializeToString(),
                apply_dropout=False,
                apply_averaging=False,
                center_bias=False,
                reduce_dim=True)
            self.assertAllClose(result.eval(), [[-0.4], [-0.4]])
            stamp_token = model_ops.tree_ensemble_stamp_token(
                tree_ensemble_handle)
            self.assertEqual(stamp_token.eval(), 3)
예제 #6
0
  def testTreeFinalized(self):
    with self.test_session():
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Depth 3 tree.
      tree1 = tree_ensemble_config.trees.add()
      _set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
      _set_float_split(tree1.nodes.add()
                       .sparse_float_binary_split_default_left.split, 0, -20.0,
                       3, 4)
      _append_to_leaf(tree1.nodes.add().leaf, 0, 0.2)
      _append_to_leaf(tree1.nodes.add().leaf, 0, 0.3)
      _set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split,
                                0, 9, 5, 6)
      _append_to_leaf(tree1.nodes.add().leaf, 0, 0.5)
      _append_to_leaf(tree1.nodes.add().leaf, 0, 0.6)

      tree_ensemble_config.tree_weights.append(1.0)
      tree_ensemble_config.tree_metadata.add().is_finalized = True

      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="full_ensemble")
      resources.initialize_resources(resources.shared_resources()).run()

      result = prediction_ops.gradient_trees_partition_examples(
          tree_ensemble_handle, [self._dense_float_tensor], [
              self._sparse_float_indices1, self._sparse_float_indices2
          ], [self._sparse_float_values1, self._sparse_float_values2],
          [self._sparse_float_shape1,
           self._sparse_float_shape2], [self._sparse_int_indices1],
          [self._sparse_int_values1], [self._sparse_int_shape1])

      self.assertAllEqual([0, 0], result.eval())
예제 #7
0
    def testTrainFnNonChiefWithCentering(self):
        """Tests the train function running on worker with bias centering."""
        with self.test_session():
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
            learner_config.num_classes = 2
            learner_config.regularization.l1 = 0
            learner_config.regularization.l2 = 0
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            features = {}
            features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)

            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=False,
                num_ps_replicas=0,
                center_bias=True,
                ensemble_handle=ensemble_handle,
                examples_per_layer=1,
                learner_config=learner_config,
                features=features)

            predictions = array_ops.constant([[0.0], [1.0], [0.0], [2.0]],
                                             dtype=dtypes.float32)
            partition_ids = array_ops.zeros([4], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp
            }

            labels = array_ops.ones([4, 1], dtypes.float32)
            weights = array_ops.ones([4, 1], dtypes.float32)
            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                _squared_loss(labels, weights, predictions)),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # Regardless of how many times the train op is run, a non-chief worker
            # can only accumulate stats so the tree ensemble never changes.
            for _ in range(5):
                train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEquals(len(output.trees), 0)
            self.assertEquals(len(output.tree_weights), 0)
            self.assertEquals(stamp_token.eval(), 0)
예제 #8
0
  def testDropout(self):
    with self.test_session():
      # Empty tree ensenble.
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Add 1000 trees with some weights.
      for i in range(0, 999):
        tree = tree_ensemble_config.trees.add()
        tree_ensemble_config.tree_metadata.add().is_finalized = True
        _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
        tree_ensemble_config.tree_weights.append(i + 1)

      # Prepare learner/dropout config.
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5
      learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
      learner_config.num_classes = 2

      # Apply dropout.
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="existing")
      resources.initialize_resources(resources.shared_resources()).run()

      result, dropout_info = self._get_predictions(
          tree_ensemble_handle,
          learner_config=learner_config.SerializeToString(),
          apply_dropout=True,
          apply_averaging=False,
          center_bias=False,
          reduce_dim=True)

      # We expect approx 500 trees were dropped.
      dropout_info = dropout_info.eval()
      self.assertIn(dropout_info[0].size, range(400, 601))
      self.assertEqual(dropout_info[0].size, dropout_info[1].size)

      for i in range(dropout_info[0].size):
        dropped_index = dropout_info[0][i]
        dropped_weight = dropout_info[1][i]
        # We constructed the trees so tree number + 1 is the tree weight, so
        # we can check here the weights for dropped trees.
        self.assertEqual(dropped_index + 1, dropped_weight)

      # Don't apply dropout.
      result_no_dropout, no_dropout_info = self._get_predictions(
          tree_ensemble_handle,
          learner_config=learner_config.SerializeToString(),
          apply_dropout=False,
          apply_averaging=False,
          center_bias=False,
          reduce_dim=True)

      self.assertEqual(result.eval().size, result_no_dropout.eval().size)
      for i in range(result.eval().size):
        self.assertNotEqual(result.eval()[i], result_no_dropout.eval()[i])

      # We expect none of the trees were dropped.
      self.assertAllEqual([[], []], no_dropout_info.eval())
  def testWithExistingEnsembleAndShrinkage(self):
    with self.test_session():
      # Add shrinkage config.
      learning_rate = 0.0001
      tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Add 10 trees with some weights.
      for i in range(0, 5):
        tree = tree_ensemble.trees.add()
        _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
        tree_ensemble.tree_weights.append(i + 1)
        meta = tree_ensemble.tree_metadata.add()
        meta.num_tree_weight_updates = 1
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble.SerializeToString(),
          name="existing")

      # Create non-zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=np.array([4, 7], np.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=np.array([0.2, 0.8], np.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()

      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [1, 2],
              feature_gains, [0.5, 0.3], [[], []],
              learning_rate=learning_rate)
      ]):
        output_ensemble.ParseFromString(
            model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())

      # The weights of previous trees stayed the same, new tree (LAST) is added
      # with shrinkage weight.
      self.assertAllClose([1.0, 2.0, 3.0, 4.0, 5.0, learning_rate],
                          output_ensemble.tree_weights)

      # Check that all number of updates are equal to 1 (e,g, no old tree weight
      # got adjusted.
      for i in range(0, 6):
        self.assertEqual(
            1, output_ensemble.tree_metadata[i].num_tree_weight_updates)

      # Ensure feature importance was aggregated correctly.
      self.assertAllEqual([5, 9], feature_usage_counts.eval())
      self.assertArrayNear(
          [0.2 + 0.5 * learning_rate, 0.8 + 0.3 * learning_rate],
          feature_gains.eval(), 1e-6)
예제 #10
0
    def testPredictFn(self):
        """Tests the predict function."""
        with self.test_session() as sess:
            # Create ensemble with one bias node.
            ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
            text_format.Merge(
                """
          trees {
            nodes {
              leaf {
                vector {
                  value: 0.25
                }
              }
            }
          }
          tree_weights: 1.0
          tree_metadata {
            num_tree_weight_updates: 1
            num_layers_grown: 1
            is_finalized: true
          }""", ensemble_config)
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=3,
                tree_ensemble_config=ensemble_config.SerializeToString(),
                name="tree_ensemble")
            resources.initialize_resources(resources.shared_resources()).run()
            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
            learner_config.num_classes = 2
            learner_config.regularization.l1 = 0
            learner_config.regularization.l2 = 0
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            features = {}
            features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=False,
                num_ps_replicas=0,
                center_bias=True,
                ensemble_handle=ensemble_handle,
                examples_per_layer=1,
                learner_config=learner_config,
                features=features)

            # Create predict op.
            mode = model_fn.ModeKeys.EVAL
            predictions_dict = sess.run(gbdt_model.predict(mode))
            self.assertEquals(predictions_dict["ensemble_stamp"], 3)
            self.assertAllClose(predictions_dict["predictions"],
                                [[0.25], [0.25], [0.25], [0.25]])
            self.assertAllClose(predictions_dict["partition_ids"],
                                [0, 0, 0, 0])
예제 #11
0
  def testPredictFn(self):
    """Tests the predict function."""
    with self.test_session() as sess:
      # Create ensemble with one bias node.
      ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      text_format.Merge("""
          trees {
            nodes {
              leaf {
                vector {
                  value: 0.25
                }
              }
            }
          }
          tree_weights: 1.0
          tree_metadata {
            num_tree_weight_updates: 1
            num_layers_grown: 1
            is_finalized: true
          }""", ensemble_config)
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=3,
          tree_ensemble_config=ensemble_config.SerializeToString(),
          name="tree_ensemble")
      resources.initialize_resources(resources.shared_resources()).run()
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
      learner_config.num_classes = 2
      learner_config.regularization.l1 = 0
      learner_config.regularization.l2 = 0
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      features = {}
      features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=False,
          num_ps_replicas=0,
          center_bias=True,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          features=features)

      # Create predict op.
      mode = model_fn.ModeKeys.EVAL
      predictions_dict = sess.run(gbdt_model.predict(mode))
      self.assertEquals(predictions_dict["ensemble_stamp"], 3)
      self.assertAllClose(predictions_dict["predictions"], [[0.25], [0.25],
                                                            [0.25], [0.25]])
      self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0])
예제 #12
0
  def testMetadataMissing(self):
    # Sometimes we want to do prediction on trees that are not added to ensemble
    # (for example in
    with self.test_session():
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Bias tree.
      tree1 = tree_ensemble_config.trees.add()
      _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)

      # Depth 3 tree.
      tree2 = tree_ensemble_config.trees.add()
      # We are not setting the tree_ensemble_config.tree_metadata in this test.
      _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
      _set_float_split(tree2.nodes.add()
                       .sparse_float_binary_split_default_left.split, 0, -20.0,
                       3, 4)
      _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
      _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
      _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
                                0, 9, 5, 6)
      _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
      _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)

      tree_ensemble_config.tree_weights.append(1.0)
      tree_ensemble_config.tree_weights.append(1.0)

      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="full_ensemble")
      resources.initialize_resources(resources.shared_resources()).run()

      # Prepare learner config.
      learner_config = learner_pb2.LearnerConfig()
      learner_config.num_classes = 2

      result, dropout_info = self._get_predictions(
          tree_ensemble_handle,
          learner_config=learner_config.SerializeToString(),
          reduce_dim=True)

      # The first example will get bias -0.4 from first tree and
      # leaf 4 payload of -0.9 hence -1.3, the second example will
      # get the same bias -0.4 and leaf 3 payload (sparse feature missing)
      # of 1.2 hence 0.8.
      self.assertAllClose([[-1.3], [0.8]], result.eval())

      # Empty dropout.
      self.assertAllEqual([[], []], dropout_info.eval())
예제 #13
0
 def testUsedHandlers(self):
   with self.cached_session():
     tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
     tree_ensemble_config.growing_metadata.used_handler_ids.append(1)
     tree_ensemble_config.growing_metadata.used_handler_ids.append(5)
     stamp_token = 3
     tree_ensemble_handle = model_ops.tree_ensemble_variable(
         stamp_token=stamp_token,
         tree_ensemble_config=tree_ensemble_config.SerializeToString(),
         name="create_tree")
     resources.initialize_resources(resources.shared_resources()).run()
     result = model_ops.tree_ensemble_used_handlers(
         tree_ensemble_handle, stamp_token, num_all_handlers=6)
     self.assertAllEqual([0, 1, 0, 0, 0, 1], result.used_handlers_mask.eval())
     self.assertEqual(2, result.num_used_handlers.eval())
예제 #14
0
  def testExcludeNonFinalTree(self):
    with self.test_session():
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Bias tree.
      tree1 = tree_ensemble_config.trees.add()
      tree_ensemble_config.tree_metadata.add().is_finalized = True
      _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)

      # Depth 3 tree.
      tree2 = tree_ensemble_config.trees.add()
      tree_ensemble_config.tree_metadata.add().is_finalized = False
      _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
      _set_float_split(tree2.nodes.add()
                       .sparse_float_binary_split_default_left.split, 0, -20.0,
                       3, 4)
      _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
      _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
      _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
                                0, 9, 5, 6)
      _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
      _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)

      tree_ensemble_config.tree_weights.append(1.0)
      tree_ensemble_config.tree_weights.append(1.0)

      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="full_ensemble")
      resources.initialize_resources(resources.shared_resources()).run()

      # Prepare learner config.
      learner_config = learner_pb2.LearnerConfig()
      learner_config.num_classes = 2
      learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE

      result, dropout_info = self._get_predictions(
          tree_ensemble_handle,
          learner_config=learner_config.SerializeToString(),
          reduce_dim=True)

      # All the examples should get only the bias since the second tree is
      # non-finalized
      self.assertAllClose([[-0.4], [-0.4]], result.eval())

      # Empty dropout.
      self.assertAllEqual([[], []], dropout_info.eval())
    def testWithExistingEnsemble(self):
        with self.test_session():
            # Create existing tree ensemble.
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0,
                tree_ensemble_config=self._tree_ensemble.SerializeToString(),
                name="existing")
            # Create non-zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=np.array([0, 4, 1], np.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=np.array(
                [0.0, 0.3, 0.05], np.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()
            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [1, 2, 0],
                        feature_gains, [0.02, 0.1, 0.0], [[], []],
                        learning_rate=1)
            ]):
                output_ensemble.ParseFromString(
                    model_ops.tree_ensemble_serialize(tree_ensemble_handle)
                    [1].eval())

            # Output.
            self.assertEqual(3, len(output_ensemble.trees))
            self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[2])

            self.assertAllEqual([1.0, 1.0, 1.0], output_ensemble.tree_weights)

            self.assertEqual(
                2, output_ensemble.tree_metadata[0].num_tree_weight_updates)
            self.assertEqual(
                3, output_ensemble.tree_metadata[1].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[2].num_tree_weight_updates)
            self.assertAllEqual([1, 6, 1], feature_usage_counts.eval())
            self.assertArrayNear([0.02, 0.4, 0.05], feature_gains.eval(), 1e-6)
예제 #16
0
  def testFullEnsembleMultiNotClassTreePerClassStrategyDenseVector(self):
    with self.test_session():
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Bias tree only for second class.
      tree1 = tree_ensemble_config.trees.add()
      tree_ensemble_config.tree_metadata.add().is_finalized = True
      _append_multi_values_to_dense_leaf(tree1.nodes.add().leaf, [0, -0.2, -2])

      # Depth 2 tree.
      tree2 = tree_ensemble_config.trees.add()
      tree_ensemble_config.tree_metadata.add().is_finalized = True
      _set_float_split(tree2.nodes.add()
                       .sparse_float_binary_split_default_right.split, 1, 4.0,
                       1, 2)
      _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
      _append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0.5, 0, 0])
      _append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0, 1.2, -0.7])
      _append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [-0.9, 0, 0])

      tree_ensemble_config.tree_weights.append(1.0)
      tree_ensemble_config.tree_weights.append(1.0)

      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="ensemble_multi_class")
      resources.initialize_resources(resources.shared_resources()).run()

      # Prepare learner config.
      learner_config = learner_pb2.LearnerConfig()
      learner_config.num_classes = 3
      learner_config.multi_class_strategy = (
          learner_pb2.LearnerConfig.FULL_HESSIAN)

      result, dropout_info = self._get_predictions(
          tree_ensemble_handle,
          learner_config=learner_config.SerializeToString(),
          reduce_dim=False)
      # The first example will get bias class 1 -0.2 and -2 for class 2 from
      # first tree and leaf 2 payload (sparse feature missing) of 0.5 hence
      # 0.5, -0.2], the second example will get the same bias and leaf 3 payload
      # of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -2.7].
      self.assertAllClose([[0.5, -0.2, -2.0], [0, 1.0, -2.7]], result.eval())

      # Empty dropout.
      self.assertAllEqual([[], []], dropout_info.eval())
  def testWithExistingEnsemble(self):
    with self.test_session():
      # Create existing tree ensemble.
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=self._tree_ensemble.SerializeToString(),
          name="existing")
      # Create non-zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=np.array([0, 4, 1], np.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=np.array([0.0, 0.3, 0.05], np.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()
      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [1, 2, 0],
              feature_gains, [0.02, 0.1, 0.0], [[], []],
              learning_rate=1)
      ]):
        output_ensemble.ParseFromString(
            model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())

      # Output.
      self.assertEqual(3, len(output_ensemble.trees))
      self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[2])

      self.assertAllEqual([1.0, 1.0, 1.0], output_ensemble.tree_weights)

      self.assertEqual(2,
                       output_ensemble.tree_metadata[0].num_tree_weight_updates)
      self.assertEqual(3,
                       output_ensemble.tree_metadata[1].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[2].num_tree_weight_updates)
      self.assertAllEqual([1, 6, 1], feature_usage_counts.eval())
      self.assertArrayNear([0.02, 0.4, 0.05], feature_gains.eval(), 1e-6)
    def testWithEmptyEnsembleAndShrinkage(self):
        with self.test_session():
            # Add shrinkage config.
            learning_rate = 0.0001
            tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0,
                tree_ensemble_config=tree_ensemble.SerializeToString(),
                name="existing")

            # Create zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=np.array([0, 0], np.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=np.array(
                [0.0, 0.0], np.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()

            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [1, 2],
                        feature_gains, [0.5, 0.3], [[], []],
                        learning_rate=learning_rate)
            ]):
                output_ensemble.ParseFromString(
                    model_ops.tree_ensemble_serialize(tree_ensemble_handle)
                    [1].eval())

            # New tree is added with shrinkage weight.
            self.assertAllClose([learning_rate], output_ensemble.tree_weights)
            self.assertEqual(
                1, output_ensemble.tree_metadata[0].num_tree_weight_updates)
            self.assertAllEqual([1, 2], feature_usage_counts.eval())
            self.assertArrayNear([0.5 * learning_rate, 0.3 * learning_rate],
                                 feature_gains.eval(), 1e-6)
    def testWithEmptyEnsemble(self):
        with self.test_session():
            # Create an empty ensemble.
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="empty")

            # Create zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=array_ops.zeros([1], dtypes.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=array_ops.zeros(
                [1], dtypes.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()

            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [2],
                        feature_gains, [0.4], [[]],
                        learning_rate=1.0)
            ]):
                result = model_ops.tree_ensemble_serialize(
                    tree_ensemble_handle)[1]

            # Output.
            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            output_ensemble.ParseFromString(result.eval())
            self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[0])
            self.assertEqual(1, len(output_ensemble.trees))

            self.assertAllEqual([1.0], output_ensemble.tree_weights)

            self.assertEqual(
                1, output_ensemble.tree_metadata[0].num_tree_weight_updates)

            self.assertAllEqual([2], feature_usage_counts.eval())
            self.assertArrayNear([0.4], feature_gains.eval(), 1e-6)
예제 #20
0
  def testEnsembleEmpty(self):
    with self.test_session():
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()

      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="full_ensemble")
      resources.initialize_resources(resources.shared_resources()).run()

      result = prediction_ops.gradient_trees_partition_examples(
          tree_ensemble_handle, [self._dense_float_tensor], [
              self._sparse_float_indices1, self._sparse_float_indices2
          ], [self._sparse_float_values1, self._sparse_float_values2],
          [self._sparse_float_shape1,
           self._sparse_float_shape2], [self._sparse_int_indices1],
          [self._sparse_int_values1], [self._sparse_int_shape1])

      self.assertAllEqual([0, 0], result.eval())
  def testWithEmptyEnsembleAndShrinkage(self):
    with self.test_session():
      # Add shrinkage config.
      learning_rate = 0.0001
      tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble.SerializeToString(),
          name="existing")

      # Create zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=np.array([0, 0], np.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=np.array([0.0, 0.0], np.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()

      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [1, 2],
              feature_gains, [0.5, 0.3], [[], []],
              learning_rate=learning_rate)
      ]):
        output_ensemble.ParseFromString(
            model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())

      # New tree is added with shrinkage weight.
      self.assertAllClose([learning_rate], output_ensemble.tree_weights)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[0].num_tree_weight_updates)
      self.assertAllEqual([1, 2], feature_usage_counts.eval())
      self.assertArrayNear([0.5 * learning_rate, 0.3 * learning_rate],
                           feature_gains.eval(), 1e-6)
예제 #22
0
  def testDropOutZeroProb(self):
    with self.test_session():
      # Empty tree ensenble.
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Add 1000 trees with some weights.
      for i in range(0, 999):
        tree = tree_ensemble_config.trees.add()
        tree_ensemble_config.tree_metadata.add().is_finalized = True
        _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
        tree_ensemble_config.tree_weights.append(i + 1)

      # Dropout with 0 probability.
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.dropout.dropout_probability = 0.0
      learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
      learner_config.num_classes = 2

      # Apply dropout, but expect nothing dropped.
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="existing")
      resources.initialize_resources(resources.shared_resources()).run()

      result, dropout_info = self._get_predictions(
          tree_ensemble_handle,
          learner_config=learner_config.SerializeToString(),
          apply_dropout=True,
          apply_averaging=False,
          center_bias=False,
          reduce_dim=True)

      result_no_dropout, _ = self._get_predictions(
          tree_ensemble_handle,
          learner_config=learner_config.SerializeToString(),
          apply_dropout=False,
          apply_averaging=False,
          center_bias=False,
          reduce_dim=True)

      self.assertAllEqual([[], []], dropout_info.eval())
      self.assertAllClose(result.eval(), result_no_dropout.eval())
  def testWithEmptyEnsemble(self):
    with self.test_session():
      # Create an empty ensemble.
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="empty")

      # Create zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=array_ops.zeros([1], dtypes.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=array_ops.zeros([1], dtypes.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()

      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [2],
              feature_gains, [0.4], [[]],
              learning_rate=1.0)
      ]):
        result = model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1]

      # Output.
      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      output_ensemble.ParseFromString(result.eval())
      self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[0])
      self.assertEqual(1, len(output_ensemble.trees))

      self.assertAllEqual([1.0], output_ensemble.tree_weights)

      self.assertEqual(1,
                       output_ensemble.tree_metadata[0].num_tree_weight_updates)

      self.assertAllEqual([2], feature_usage_counts.eval())
      self.assertArrayNear([0.4], feature_gains.eval(), 1e-6)
예제 #24
0
  def testEmptyEnsemble(self):
    with self.test_session():
      # Empty tree ensenble.
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()

      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble_config.SerializeToString(),
          name="empty")
      resources.initialize_resources(resources.shared_resources()).run()

      # Prepare learner config.
      learner_config = learner_pb2.LearnerConfig()
      learner_config.num_classes = 2

      result, dropout_info = self._get_predictions(
          tree_ensemble_handle,
          learner_config=learner_config.SerializeToString(),
          reduce_dim=True)
      self.assertAllEqual([[0], [0]], result.eval())
      # Empty dropout.
      self.assertAllEqual([[], []], dropout_info.eval())
예제 #25
0
def model_builder(features, labels, mode, params, config):
    """Multi-machine batch gradient descent tree model.

  Args:
    features: `Tensor` or `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    params: A dict of hyperparameters.
      The following hyperparameters are expected:
      * head: A `Head` instance.
      * learner_config: A config for the learner.
      * feature_columns: An iterable containing all the feature columns used by
          the model.
      * examples_per_layer: Number of examples to accumulate before growing a
          layer. It can also be a function that computes the number of examples
          based on the depth of the layer that's being built.
      * weight_column_name: The name of weight column.
      * center_bias: Whether a separate tree should be created for first fitting
          the bias.
    config: `RunConfig` of the estimator.

  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
    head = params["head"]
    learner_config = params["learner_config"]
    examples_per_layer = params["examples_per_layer"]
    feature_columns = params["feature_columns"]
    weight_column_name = params["weight_column_name"]
    num_trees = params["num_trees"]
    if features is None:
        raise ValueError("At least one feature must be specified.")

    if config is None:
        raise ValueError("Missing estimator RunConfig.")

    center_bias = params["center_bias"]

    # Make a shallow copy of features to ensure downstream usage
    # is unaffected by modifications in the model function.
    training_features = copy.copy(features)
    training_features.pop(weight_column_name, None)
    global_step = training_util.get_global_step()
    with ops.device(global_step.device):
        ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=0,
            tree_ensemble_config="",  # Initialize an empty ensemble.
            name="ensemble_model")

    # Create GBDT model.
    gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
        is_chief=config.is_chief,
        num_ps_replicas=config.num_ps_replicas,
        ensemble_handle=ensemble_handle,
        center_bias=center_bias,
        examples_per_layer=examples_per_layer,
        learner_config=learner_config,
        feature_columns=feature_columns,
        features=features)
    with ops.name_scope("gbdt", "gbdt_optimizer"):
        predictions_dict = gbdt_model.predict(mode)
        logits = predictions_dict["predictions"]

        def _train_op_fn(loss):
            """Returns the op to optimize the loss."""
            update_op = gbdt_model.train(loss, predictions_dict, labels)
            with ops.control_dependencies(
                [update_op]), (ops.colocate_with(global_step)):
                update_op = state_ops.assign_add(global_step, 1).op
                return update_op

    model_fn_ops = head.create_model_fn_ops(features=features,
                                            mode=mode,
                                            labels=labels,
                                            train_op_fn=_train_op_fn,
                                            logits=logits)
    if num_trees:
        if center_bias:
            num_trees += 1
        finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor(
        )
        model_fn_ops.training_hooks.append(
            trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                          finalized_trees))
    return model_fn_ops
예제 #26
0
  def testSerialization(self):
    with ops.Graph().as_default() as graph:
      with self.session(graph):
        tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
        # Bias tree only for second class.
        tree1 = tree_ensemble_config.trees.add()
        _append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)

        tree_ensemble_config.tree_weights.append(1.0)

        # Depth 2 tree.
        tree2 = tree_ensemble_config.trees.add()
        tree_ensemble_config.tree_weights.append(1.0)
        _set_float_split(tree2.nodes.add()
                         .sparse_float_binary_split_default_right.split, 1, 4.0,
                         1, 2)
        _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3,
                         4)
        _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
        _append_to_leaf(tree2.nodes.add().leaf, 1, 1.2)
        _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)

        tree_ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=7,
            tree_ensemble_config=tree_ensemble_config.SerializeToString(),
            name="saver_tree")
        stamp_token, serialized_config = model_ops.tree_ensemble_serialize(
            tree_ensemble_handle)
        resources.initialize_resources(resources.shared_resources()).run()
        self.assertEqual(stamp_token.eval(), 7)
        serialized_config = serialized_config.eval()

    with ops.Graph().as_default() as graph:
      with self.session(graph):
        tree_ensemble_handle2 = model_ops.tree_ensemble_variable(
            stamp_token=9,
            tree_ensemble_config=serialized_config,
            name="saver_tree2")
        resources.initialize_resources(resources.shared_resources()).run()

        # Prepare learner config.
        learner_config = learner_pb2.LearnerConfig()
        learner_config.num_classes = 3

        result, _ = prediction_ops.gradient_trees_prediction(
            tree_ensemble_handle2,
            self._seed, [self._dense_float_tensor], [
                self._sparse_float_indices1, self._sparse_float_indices2
            ], [self._sparse_float_values1, self._sparse_float_values2],
            [self._sparse_float_shape1,
             self._sparse_float_shape2], [self._sparse_int_indices1],
            [self._sparse_int_values1], [self._sparse_int_shape1],
            learner_config=learner_config.SerializeToString(),
            apply_dropout=False,
            apply_averaging=False,
            center_bias=False,
            reduce_dim=True)

        # Re-serialize tree.
        stamp_token2, serialized_config2 = model_ops.tree_ensemble_serialize(
            tree_ensemble_handle2)

        # The first example will get bias class 1 -0.2 from first tree and
        # leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
        # the second example will get the same bias class 1 -0.2 and leaf 3
        # payload of class 1 1.2 hence [0.0, 1.0].
        self.assertEqual(stamp_token2.eval(), 9)

        # Class 2 does have scores in the leaf => it gets score 0.
        self.assertEqual(serialized_config2.eval(), serialized_config)
        self.assertAllClose(result.eval(), [[0.5, -0.2], [0, 1.0]])
예제 #27
0
  def testRestore(self):
    # Calling self.cached_session() without a graph specified results in
    # TensorFlowTestCase caching the session and returning the same one
    # every time. In this test, we need to create two different sessions
    # which is why we also create a graph and pass it to self.cached_session()
    # to ensure no caching occurs under the hood.
    save_path = os.path.join(self.get_temp_dir(), "restore-test")
    with ops.Graph().as_default() as graph:
      with self.session(graph) as sess:
        # Prepare learner config.
        learner_config = learner_pb2.LearnerConfig()
        learner_config.num_classes = 2

        # Add the first tree and save.
        tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
        tree = tree_ensemble_config.trees.add()
        tree_ensemble_config.tree_metadata.add().is_finalized = True
        tree_ensemble_config.tree_weights.append(1.0)
        _append_to_leaf(tree.nodes.add().leaf, 0, -0.1)
        tree_ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=3,
            tree_ensemble_config=tree_ensemble_config.SerializeToString(),
            name="restore_tree")
        resources.initialize_resources(resources.shared_resources()).run()
        variables.global_variables_initializer().run()
        my_saver = saver.Saver()

        # Add the second tree and replace the ensemble of the handle.
        tree2 = tree_ensemble_config.trees.add()
        tree_ensemble_config.tree_weights.append(1.0)
        _append_to_leaf(tree2.nodes.add().leaf, 0, -1.0)
        # Predict to confirm.
        with ops.control_dependencies([
            model_ops.tree_ensemble_deserialize(
                tree_ensemble_handle,
                stamp_token=3,
                tree_ensemble_config=tree_ensemble_config.SerializeToString())
        ]):
          result, _ = prediction_ops.gradient_trees_prediction(
              tree_ensemble_handle,
              self._seed, [self._dense_float_tensor], [
                  self._sparse_float_indices1, self._sparse_float_indices2
              ], [self._sparse_float_values1, self._sparse_float_values2],
              [self._sparse_float_shape1,
               self._sparse_float_shape2], [self._sparse_int_indices1],
              [self._sparse_int_values1], [self._sparse_int_shape1],
              learner_config=learner_config.SerializeToString(),
              apply_dropout=False,
              apply_averaging=False,
              center_bias=False,
              reduce_dim=True)
        self.assertAllClose([[-1.1], [-1.1]], result.eval())
        # Save before adding other trees.
        val = my_saver.save(sess, save_path)
        self.assertEqual(save_path, val)

        # Add more trees after saving.
        tree3 = tree_ensemble_config.trees.add()
        tree_ensemble_config.tree_weights.append(1.0)
        _append_to_leaf(tree3.nodes.add().leaf, 0, -10.0)
        # Predict to confirm.
        with ops.control_dependencies([
            model_ops.tree_ensemble_deserialize(
                tree_ensemble_handle,
                stamp_token=3,
                tree_ensemble_config=tree_ensemble_config.SerializeToString())
        ]):
          result, _ = prediction_ops.gradient_trees_prediction(
              tree_ensemble_handle,
              self._seed, [self._dense_float_tensor], [
                  self._sparse_float_indices1, self._sparse_float_indices2
              ], [self._sparse_float_values1, self._sparse_float_values2],
              [self._sparse_float_shape1,
               self._sparse_float_shape2], [self._sparse_int_indices1],
              [self._sparse_int_values1], [self._sparse_int_shape1],
              learner_config=learner_config.SerializeToString(),
              apply_dropout=False,
              apply_averaging=False,
              center_bias=False,
              reduce_dim=True)
        self.assertAllClose(result.eval(), [[-11.1], [-11.1]])

    # Start a second session.  In that session the parameter nodes
    # have not been initialized either.
    with ops.Graph().as_default() as graph:
      with self.session(graph) as sess:
        tree_ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=0, tree_ensemble_config="", name="restore_tree")
        my_saver = saver.Saver()
        my_saver.restore(sess, save_path)
        result, _ = prediction_ops.gradient_trees_prediction(
            tree_ensemble_handle,
            self._seed, [self._dense_float_tensor], [
                self._sparse_float_indices1, self._sparse_float_indices2
            ], [self._sparse_float_values1, self._sparse_float_values2],
            [self._sparse_float_shape1,
             self._sparse_float_shape2], [self._sparse_int_indices1],
            [self._sparse_int_values1], [self._sparse_int_shape1],
            learner_config=learner_config.SerializeToString(),
            apply_dropout=False,
            apply_averaging=False,
            center_bias=False,
            reduce_dim=True)
        # Make sure we only have the first and second tree.
        # The third tree was added after the save.
        self.assertAllClose(result.eval(), [[-1.1], [-1.1]])
예제 #28
0
  def testTrainFnMulticlassTreePerClass(self):
    """Tests the GBDT train for multiclass tree per class strategy."""
    with self.test_session() as sess:
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")

      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 1
      # Use full hessian multiclass strategy.
      learner_config.multi_class_strategy = (
          learner_pb2.LearnerConfig.TREE_PER_CLASS)
      learner_config.num_classes = 5
      learner_config.regularization.l1 = 0
      # To make matrix inversible.
      learner_config.regularization.l2 = 1e-5
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      features = {
          "dense_float": array_ops.constant(
              [[1.0], [1.5], [2.0]], dtypes.float32),
      }

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=False,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          features=features)

      batch_size = 3
      predictions = array_ops.constant(
          [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
           [0.0, 0.0, 0.0, 2.0, 1.2]],
          dtype=dtypes.float32)

      labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
      weights = array_ops.ones([batch_size, 1], dtypes.float32)

      partition_ids = array_ops.zeros([batch_size], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp,
          # This should result in a tree built for a class 2.
          "num_trees": 13,
      }

      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              losses.per_example_maxent_loss(
                  labels,
                  weights,
                  predictions,
                  num_classes=learner_config.num_classes)[0]),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect no splits to be chosen because the quantile
      # buckets will not be ready.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEqual(len(output.trees), 0)
      self.assertEqual(len(output.tree_weights), 0)
      self.assertEqual(stamp_token.eval(), 1)

      # Update the stamp to be able to run a second time.
      sess.run([ensemble_stamp.assign_add(1)])
      # On second run, expect a trivial split to be chosen to basically
      # predict the average.
      train_op.run()
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())

      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output.ParseFromString(serialized.eval())
      self.assertEqual(len(output.trees), 1)
      self.assertAllClose(output.tree_weights, [1])
      self.assertEqual(stamp_token.eval(), 2)

      # One node for a split, two children nodes.
      self.assertEqual(3, len(output.trees[0].nodes))

      # Leafs will have a sparse vector for class 3.
      self.assertEqual(1,
                       len(output.trees[0].nodes[1].leaf.sparse_vector.index))
      self.assertEqual(3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])
      self.assertAlmostEqual(
          -1.13134455681, output.trees[0].nodes[1].leaf.sparse_vector.value[0])

      self.assertEqual(1,
                       len(output.trees[0].nodes[2].leaf.sparse_vector.index))
      self.assertEqual(3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])
      self.assertAlmostEqual(
          0.893284678459, output.trees[0].nodes[2].leaf.sparse_vector.value[0])
예제 #29
0
def _dnn_tree_combined_model_fn(features,
                                labels,
                                mode,
                                head,
                                dnn_hidden_units,
                                dnn_feature_columns,
                                tree_learner_config,
                                num_trees,
                                tree_examples_per_layer,
                                config=None,
                                dnn_optimizer="Adagrad",
                                dnn_activation_fn=nn.relu,
                                dnn_dropout=None,
                                dnn_input_layer_partitioner=None,
                                dnn_input_layer_to_tree=True,
                                dnn_steps_to_train=10000,
                                tree_feature_columns=None,
                                tree_center_bias=False,
                                use_core_versions=False):
    """DNN and GBDT combined model_fn.

  Args:
    features: `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    head: A `Head` instance.
    dnn_hidden_units: List of hidden units per layer.
    dnn_feature_columns: An iterable containing all the feature columns
      used by the model's DNN.
    tree_learner_config: A config for the tree learner.
    num_trees: Number of trees to grow model to after training DNN.
    tree_examples_per_layer: Number of examples to accumulate before
      growing the tree a layer. This value has a big impact on model
      quality and should be set equal to the number of examples in
      training dataset if possible. It can also be a function that computes
      the number of examples based on the depth of the layer that's
      being built.
    config: `RunConfig` of the estimator.
    dnn_optimizer: string, `Optimizer` object, or callable that defines the
      optimizer to use for training the DNN. If `None`, will use the Adagrad
      optimizer with default learning rate of 0.001.
    dnn_activation_fn: Activation function applied to each layer of the DNN.
      If `None`, will use `tf.nn.relu`.
    dnn_dropout: When not `None`, the probability to drop out a given
      unit in the DNN.
    dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
      Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
    dnn_input_layer_to_tree: Whether to provide the DNN's input layer
    as a feature to the tree.
    dnn_steps_to_train: Number of steps to train dnn for before switching
      to gbdt.
    tree_feature_columns: An iterable containing all the feature columns
      used by the model's boosted trees. If dnn_input_layer_to_tree is
      set to True, these features are in addition to dnn_feature_columns.
    tree_center_bias: Whether a separate tree should be created for
      first fitting the bias.
    use_core_versions: Whether feature columns and loss are from the core (as
      opposed to contrib) version of tensorflow.

  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
    if not isinstance(features, dict):
        raise ValueError("features should be a dictionary of `Tensor`s. "
                         "Given type: {}".format(type(features)))

    if not dnn_feature_columns:
        raise ValueError("dnn_feature_columns must be specified")

    # Build DNN Logits.
    dnn_parent_scope = "dnn"
    dnn_partitioner = dnn_input_layer_partitioner or (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=config.num_ps_replicas, min_slice_size=64 << 20))

    with variable_scope.variable_scope(dnn_parent_scope,
                                       values=tuple(six.itervalues(features)),
                                       partitioner=dnn_partitioner):

        with variable_scope.variable_scope(
                "input_from_feature_columns",
                values=tuple(six.itervalues(features)),
                partitioner=dnn_partitioner) as input_layer_scope:
            if use_core_versions:
                input_layer = feature_column_lib.input_layer(
                    features=features,
                    feature_columns=dnn_feature_columns,
                    weight_collections=[dnn_parent_scope])
            else:
                input_layer = layers.input_from_feature_columns(
                    columns_to_tensors=features,
                    feature_columns=dnn_feature_columns,
                    weight_collections=[dnn_parent_scope],
                    scope=input_layer_scope)
        previous_layer = input_layer
        for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
            with variable_scope.variable_scope(
                    "hiddenlayer_%d" % layer_id,
                    values=(previous_layer, )) as hidden_layer_scope:
                net = layers.fully_connected(
                    previous_layer,
                    num_hidden_units,
                    activation_fn=dnn_activation_fn,
                    variables_collections=[dnn_parent_scope],
                    scope=hidden_layer_scope)
                if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
                    net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
            _add_hidden_layer_summary(net, hidden_layer_scope.name)
            previous_layer = net
        with variable_scope.variable_scope(
                "logits", values=(previous_layer, )) as logits_scope:
            dnn_logits = layers.fully_connected(
                previous_layer,
                head.logits_dimension,
                activation_fn=None,
                variables_collections=[dnn_parent_scope],
                scope=logits_scope)
        _add_hidden_layer_summary(dnn_logits, logits_scope.name)

        def _dnn_train_op_fn(loss):
            """Returns the op to optimize the loss."""
            return optimizers.optimize_loss(
                loss=loss,
                global_step=training_util.get_global_step(),
                learning_rate=_DNN_LEARNING_RATE,
                optimizer=_get_optimizer(dnn_optimizer),
                name=dnn_parent_scope,
                variables=ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
                                             scope=dnn_parent_scope),
                # Empty summaries to prevent optimizers from logging training_loss.
                summaries=[])

    # Build Tree Logits.
    global_step = training_util.get_global_step()
    with ops.device(global_step.device):
        ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=0,
            tree_ensemble_config="",  # Initialize an empty ensemble.
            name="ensemble_model")

    tree_features = features.copy()
    if dnn_input_layer_to_tree:
        tree_features["dnn_input_layer"] = input_layer
        tree_feature_columns.append(
            layers.real_valued_column("dnn_input_layer"))
    gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
        is_chief=config.is_chief,
        num_ps_replicas=config.num_ps_replicas,
        ensemble_handle=ensemble_handle,
        center_bias=tree_center_bias,
        examples_per_layer=tree_examples_per_layer,
        learner_config=tree_learner_config,
        feature_columns=tree_feature_columns,
        logits_dimension=head.logits_dimension,
        features=tree_features)

    with ops.name_scope("gbdt"):
        predictions_dict = gbdt_model.predict(mode)
        tree_logits = predictions_dict["predictions"]

        def _tree_train_op_fn(loss):
            """Returns the op to optimize the loss."""
            update_op = gbdt_model.train(loss, predictions_dict, labels)
            with ops.control_dependencies(
                [update_op]), (ops.colocate_with(global_step)):
                update_op = state_ops.assign_add(global_step, 1).op
                return update_op

    tree_train_logits = dnn_logits + tree_logits

    def _no_train_op_fn(loss):
        """Returns a no-op."""
        del loss
        return control_flow_ops.no_op()

    if use_core_versions:
        model_fn_ops = head.create_estimator_spec(features=features,
                                                  mode=mode,
                                                  labels=labels,
                                                  train_op_fn=_no_train_op_fn,
                                                  logits=tree_train_logits)
        dnn_train_op = head.create_estimator_spec(features=features,
                                                  mode=mode,
                                                  labels=labels,
                                                  train_op_fn=_dnn_train_op_fn,
                                                  logits=dnn_logits)
        dnn_train_op = estimator_utils.estimator_spec_to_model_fn_ops(
            dnn_train_op).train_op

        tree_train_op = head.create_estimator_spec(
            features=tree_features,
            mode=mode,
            labels=labels,
            train_op_fn=_tree_train_op_fn,
            logits=tree_train_logits)
        tree_train_op = estimator_utils.estimator_spec_to_model_fn_ops(
            tree_train_op).train_op

        model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
            model_fn_ops)
    else:
        model_fn_ops = head.create_model_fn_ops(features=features,
                                                mode=mode,
                                                labels=labels,
                                                train_op_fn=_no_train_op_fn,
                                                logits=tree_train_logits)
        dnn_train_op = head.create_model_fn_ops(features=features,
                                                mode=mode,
                                                labels=labels,
                                                train_op_fn=_dnn_train_op_fn,
                                                logits=dnn_logits).train_op
        tree_train_op = head.create_model_fn_ops(
            features=tree_features,
            mode=mode,
            labels=labels,
            train_op_fn=_tree_train_op_fn,
            logits=tree_train_logits).train_op

    if tree_center_bias:
        num_trees += 1
    finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()

    model_fn_ops.training_hooks.extend([
        trainer_hooks.SwitchTrainOp(dnn_train_op, dnn_steps_to_train,
                                    tree_train_op),
        trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                      finalized_trees)
    ])

    return model_fn_ops
def model_fn(features, labels, mode, params, config):
    head = params["head"]
    learner_config = params["learner_config"]
    examples_per_layer = params["examples_per_layer"]
    feature_columns = params["feature_columns"]
    weight_column_name = params["weight_column_name"]
    num_trees = params["num_trees"]
    use_core_libs = params["use_core_libs"]
    logits_modifier_function = params["logits_modifier_function"]
    output_leaf_index = params["output_leaf_index"]

    if features is None:
        raise ValueError("At least one feature must be specified.")

    if config is None:
        raise ValueError("Missing estimator RunConfig.")

    center_bias = params["center_bias"]

    if isinstance(features, ops.Tensor):
        features = {features.name: features}

    # Make a shallow copy of features to ensure downstream usage
    # is unaffected by modifications in the model function.
    training_features = copy.copy(features)
    training_features.pop(weight_column_name, None)
    global_step = training_util.get_global_step()
    with ops.device(global_step.device):
        ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=0,
            tree_ensemble_config="",  # Initialize an empty ensemble.
            name="ensemble_model")

    # Create GBDT model.
    gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
        is_chief=config.is_chief,
        num_ps_replicas=config.num_ps_replicas,
        ensemble_handle=ensemble_handle,
        center_bias=center_bias,
        examples_per_layer=examples_per_layer,
        learner_config=learner_config,
        feature_columns=feature_columns,
        logits_dimension=head.logits_dimension,
        features=training_features,
        use_core_columns=use_core_libs,
        output_leaf_index=output_leaf_index)
    with ops.name_scope("gbdt", "gbdt_optimizer"):
        predictions_dict = gbdt_model.predict(mode)
        logits = predictions_dict["predictions"]
        if logits_modifier_function:
            logits = logits_modifier_function(logits, features, mode)

        def _train_op_fn(loss):
            """Returns the op to optimize the loss."""
            update_op = gbdt_model.train(loss, predictions_dict, labels)
            with ops.control_dependencies(
                    [update_op]), (ops.colocate_with(global_step)):
                update_op = state_ops.assign_add(global_step, 1).op
                return update_op

    create_estimator_spec_op = getattr(head, "create_estimator_spec", None)

    training_hooks = []
    if num_trees:
        if center_bias:
            num_trees += 1

        finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
        training_hooks.append(
            trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                          finalized_trees))

    if use_core_libs and callable(create_estimator_spec_op):
        model_fn_ops = head.create_estimator_spec(
            features=features,
            mode=mode,
            labels=labels,
            train_op_fn=_train_op_fn,
            logits=logits)
        model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
            model_fn_ops)
    else:
        model_fn_ops = head.create_model_fn_ops(
            features=features,
            mode=mode,
            labels=labels,
            train_op_fn=_train_op_fn,
            logits=logits)

    if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
        model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
            gbdt_batch.LEAF_INDEX]

    model_fn_ops.training_hooks.extend(training_hooks)
    return model_fn_ops
예제 #31
0
  def testTrainFnMulticlassDiagonalHessian(self):
    """Tests the GBDT train for multiclass diagonal hessian."""
    with self.test_session() as sess:
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")

      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 1
      # Use full hessian multiclass strategy.
      learner_config.multi_class_strategy = (
          learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
      learner_config.num_classes = 5
      learner_config.regularization.l1 = 0
      # To make matrix inversible.
      learner_config.regularization.l2 = 1e-5
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      batch_size = 3
      features = {}
      features["dense_float"] = array_ops.constant(
          [0.3, 1.5, 1.1], dtype=dtypes.float32)

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=False,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          features=features)

      predictions = array_ops.constant(
          [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
           [0.0, 0.0, 0.0, 0.0, 1.2]],
          dtype=dtypes.float32)

      labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
      weights = array_ops.ones([batch_size, 1], dtypes.float32)

      partition_ids = array_ops.zeros([batch_size], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp,
          "num_trees": 0,
      }

      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              losses.per_example_maxent_loss(
                  labels,
                  weights,
                  predictions,
                  num_classes=learner_config.num_classes)[0]),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect no splits to be chosen because the quantile
      # buckets will not be ready.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEqual(len(output.trees), 0)
      self.assertEqual(len(output.tree_weights), 0)
      self.assertEqual(stamp_token.eval(), 1)

      # Update the stamp to be able to run a second time.
      sess.run([ensemble_stamp.assign_add(1)])
      # On second run, expect a trivial split to be chosen to basically
      # predict the average.
      train_op.run()
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())

      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output.ParseFromString(serialized.eval())
      self.assertEqual(len(output.trees), 1)
      # We got 3 nodes: one parent and 2 leafs.
      self.assertEqual(len(output.trees[0].nodes), 3)
      self.assertAllClose(output.tree_weights, [1])
      self.assertEqual(stamp_token.eval(), 2)

      # Leafs should have a dense vector of size 5.
      expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023]
      expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269]
      self.assertArrayNear(expected_leaf_1,
                           output.trees[0].nodes[1].leaf.vector.value, 1e-3)
      self.assertArrayNear(expected_leaf_2,
                           output.trees[0].nodes[2].leaf.vector.value, 1e-3)
예제 #32
0
def ranking_model_builder(features,
                          labels,
                          mode,
                          params,
                          config,
                          output_type=ModelBuilderOutputType.MODEL_FN_OPS):
  """Multi-machine batch gradient descent tree model for ranking.

  Args:
    features: `Tensor` or `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    params: A dict of hyperparameters.
      The following hyperparameters are expected:
      * head: A `Head` instance.
      * learner_config: A config for the learner.
      * feature_columns: An iterable containing all the feature columns used by
          the model.
      * examples_per_layer: Number of examples to accumulate before growing a
          layer. It can also be a function that computes the number of examples
          based on the depth of the layer that's being built.
      * weight_column_name: The name of weight column.
      * center_bias: Whether a separate tree should be created for first fitting
          the bias.
      * ranking_model_pair_keys (Optional): Keys to distinguish between features
        for left and right part of the training pairs for ranking. For example,
        for an Example with features "a.f1" and "b.f1", the keys would be
        ("a", "b").
      * override_global_step_value: If after the training is done, global step
        value must be reset to this value. This is particularly useful for hyper
        parameter tuning, which can't recognize early stopping due to the number
        of trees. If None, no override of global step will happen.
    config: `RunConfig` of the estimator.
    output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
      (new interface).


  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
  head = params["head"]
  learner_config = params["learner_config"]
  examples_per_layer = params["examples_per_layer"]
  feature_columns = params["feature_columns"]
  weight_column_name = params["weight_column_name"]
  num_trees = params["num_trees"]
  use_core_libs = params["use_core_libs"]
  logits_modifier_function = params["logits_modifier_function"]
  output_leaf_index = params["output_leaf_index"]
  ranking_model_pair_keys = params["ranking_model_pair_keys"]
  override_global_step_value = params.get("override_global_step_value", None)
  num_quantiles = params["num_quantiles"]

  if features is None:
    raise ValueError("At least one feature must be specified.")

  if config is None:
    raise ValueError("Missing estimator RunConfig.")

  center_bias = params["center_bias"]

  if isinstance(features, ops.Tensor):
    features = {features.name: features}

  # Make a shallow copy of features to ensure downstream usage
  # is unaffected by modifications in the model function.
  training_features = copy.copy(features)
  training_features.pop(weight_column_name, None)
  global_step = training_util.get_global_step()
  with ops.device(global_step.device):
    ensemble_handle = model_ops.tree_ensemble_variable(
        stamp_token=0,
        tree_ensemble_config="",  # Initialize an empty ensemble.
        name="ensemble_model")

  # Extract the features.
  if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL:
    # For ranking pairwise training, we extract two sets of features.
    if len(ranking_model_pair_keys) != 2:
      raise ValueError("You must provide keys for ranking.")
    left_pair_key = ranking_model_pair_keys[0]
    right_pair_key = ranking_model_pair_keys[1]
    if left_pair_key is None or right_pair_key is None:
      raise ValueError("Both pair keys should be provided for ranking.")

    features_1 = {}
    features_2 = {}
    for name in training_features:
      feature = training_features[name]
      new_name = name[2:]
      if name.startswith(left_pair_key + "."):
        features_1[new_name] = feature
      else:
        assert name.startswith(right_pair_key + ".")
        features_2[new_name] = feature

    main_features = features_1
    supplementary_features = features_2
  else:
    # For non-ranking or inference ranking, we have only 1 set of features.
    main_features = training_features

  # Create GBDT model.
  gbdt_model_main = gbdt_batch.GradientBoostedDecisionTreeModel(
      is_chief=config.is_chief,
      num_ps_replicas=config.num_ps_replicas,
      ensemble_handle=ensemble_handle,
      center_bias=center_bias,
      examples_per_layer=examples_per_layer,
      learner_config=learner_config,
      feature_columns=feature_columns,
      logits_dimension=head.logits_dimension,
      features=main_features,
      use_core_columns=use_core_libs,
      output_leaf_index=output_leaf_index,
      num_quantiles=num_quantiles)

  with ops.name_scope("gbdt", "gbdt_optimizer"):
    # Logits for inference.
    if mode == learn.ModeKeys.INFER:
      predictions_dict = gbdt_model_main.predict(mode)
      logits = predictions_dict[gbdt_batch.PREDICTIONS]
      if logits_modifier_function:
        logits = logits_modifier_function(logits, features, mode)
    else:
      gbdt_model_supplementary = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=config.is_chief,
          num_ps_replicas=config.num_ps_replicas,
          ensemble_handle=ensemble_handle,
          center_bias=center_bias,
          examples_per_layer=examples_per_layer,
          learner_config=learner_config,
          feature_columns=feature_columns,
          logits_dimension=head.logits_dimension,
          features=supplementary_features,
          use_core_columns=use_core_libs,
          output_leaf_index=output_leaf_index)

      # Logits for train and eval.
      if not supplementary_features:
        raise ValueError("Features for ranking must be specified.")

      predictions_dict_1 = gbdt_model_main.predict(mode)
      predictions_1 = predictions_dict_1[gbdt_batch.PREDICTIONS]

      predictions_dict_2 = gbdt_model_supplementary.predict(mode)
      predictions_2 = predictions_dict_2[gbdt_batch.PREDICTIONS]

      logits = predictions_1 - predictions_2
      if logits_modifier_function:
        logits = logits_modifier_function(logits, features, mode)

      predictions_dict = predictions_dict_1
      predictions_dict[gbdt_batch.PREDICTIONS] = logits

    def _train_op_fn(loss):
      """Returns the op to optimize the loss."""
      update_op = gbdt_model_main.train(loss, predictions_dict, labels)
      with ops.control_dependencies(
          [update_op]), (ops.colocate_with(global_step)):
        update_op = state_ops.assign_add(global_step, 1).op
        return update_op

  create_estimator_spec_op = getattr(head, "create_estimator_spec", None)

  training_hooks = []
  if num_trees:
    if center_bias:
      num_trees += 1

    finalized_trees, attempted_trees = (
        gbdt_model_main.get_number_of_trees_tensor())
    training_hooks.append(
        trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                      finalized_trees,
                                      override_global_step_value))

  if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
    if use_core_libs and callable(create_estimator_spec_op):
      model_fn_ops = head.create_estimator_spec(
          features=features,
          mode=mode,
          labels=labels,
          train_op_fn=_train_op_fn,
          logits=logits)
      model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
          model_fn_ops)
    else:
      model_fn_ops = head.create_model_fn_ops(
          features=features,
          mode=mode,
          labels=labels,
          train_op_fn=_train_op_fn,
          logits=logits)

    if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
      model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
          gbdt_batch.LEAF_INDEX]

    model_fn_ops.training_hooks.extend(training_hooks)
    return model_fn_ops

  elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
    assert callable(create_estimator_spec_op)
    estimator_spec = head.create_estimator_spec(
        features=features,
        mode=mode,
        labels=labels,
        train_op_fn=_train_op_fn,
        logits=logits)

    estimator_spec = estimator_spec._replace(
        training_hooks=training_hooks + list(estimator_spec.training_hooks))
    return estimator_spec

  return model_fn_ops
예제 #33
0
    def testTrainFnChiefScalingNumberOfExamples(self):
        """Tests the train function running on chief without bias centering."""
        with self.test_session() as sess:
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
            learner_config.num_classes = 2
            learner_config.regularization.l1 = 0
            learner_config.regularization.l2 = 0
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            num_examples_fn = (lambda layer: math_ops.pow(
                math_ops.cast(2, dtypes.int64), layer) * 1)
            features = {}
            features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=True,
                num_ps_replicas=0,
                center_bias=False,
                ensemble_handle=ensemble_handle,
                examples_per_layer=num_examples_fn,
                learner_config=learner_config,
                features=features)

            predictions = array_ops.constant([[0.0], [1.0], [0.0], [2.0]],
                                             dtype=dtypes.float32)
            partition_ids = array_ops.zeros([4], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp,
                "num_trees": 12,
            }

            labels = array_ops.ones([4, 1], dtypes.float32)
            weights = array_ops.ones([4, 1], dtypes.float32)
            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                _squared_loss(labels, weights, predictions)),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # On first run, expect no splits to be chosen because the quantile
            # buckets will not be ready.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEquals(len(output.trees), 0)
            self.assertEquals(len(output.tree_weights), 0)
            self.assertEquals(stamp_token.eval(), 1)

            # Update the stamp to be able to run a second time.
            sess.run([ensemble_stamp.assign_add(1)])

            # On second run, expect a trivial split to be chosen to basically
            # predict the average.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEquals(len(output.trees), 1)
            self.assertAllClose(output.tree_weights, [0.1])
            self.assertEquals(stamp_token.eval(), 2)
            expected_tree = """
          nodes {
            dense_float_binary_split {
              threshold: 1.0
              left_id: 1
              right_id: 2
            }
            node_metadata {
              gain: 0
            }
          }
          nodes {
            leaf {
              vector {
                value: 0.25
              }
            }
          }
          nodes {
            leaf {
              vector {
                value: 0.0
              }
            }
          }"""
            self.assertProtoEquals(expected_tree, output.trees[0])
예제 #34
0
def model_builder(features, labels, mode, params, config):
  """Multi-machine batch gradient descent tree model.

  Args:
    features: `Tensor` or `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    params: A dict of hyperparameters.
      The following hyperparameters are expected:
      * head: A `Head` instance.
      * learner_config: A config for the learner.
      * feature_columns: An iterable containing all the feature columns used by
          the model.
      * examples_per_layer: Number of examples to accumulate before growing a
          layer. It can also be a function that computes the number of examples
          based on the depth of the layer that's being built.
      * weight_column_name: The name of weight column.
      * center_bias: Whether a separate tree should be created for first fitting
          the bias.
    config: `RunConfig` of the estimator.

  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
  head = params["head"]
  learner_config = params["learner_config"]
  examples_per_layer = params["examples_per_layer"]
  feature_columns = params["feature_columns"]
  weight_column_name = params["weight_column_name"]
  num_trees = params["num_trees"]
  if features is None:
    raise ValueError("At least one feature must be specified.")

  if config is None:
    raise ValueError("Missing estimator RunConfig.")

  center_bias = params["center_bias"]

  # Make a shallow copy of features to ensure downstream usage
  # is unaffected by modifications in the model function.
  training_features = copy.copy(features)
  training_features.pop(weight_column_name, None)
  global_step = training_util.get_global_step()
  with ops.device(global_step.device):
    ensemble_handle = model_ops.tree_ensemble_variable(
        stamp_token=0,
        tree_ensemble_config="",  # Initialize an empty ensemble.
        name="ensemble_model")

  # Create GBDT model.
  gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
      is_chief=config.is_chief,
      num_ps_replicas=config.num_ps_replicas,
      ensemble_handle=ensemble_handle,
      center_bias=center_bias,
      examples_per_layer=examples_per_layer,
      learner_config=learner_config,
      feature_columns=feature_columns,
      features=features)
  with ops.name_scope("gbdt", "gbdt_optimizer"):
    predictions_dict = gbdt_model.predict(mode)
    logits = predictions_dict["predictions"]

    def _train_op_fn(loss):
      """Returns the op to optimize the loss."""
      update_op = gbdt_model.train(loss, predictions_dict, labels)
      with ops.control_dependencies(
          [update_op]), (ops.colocate_with(global_step)):
        update_op = state_ops.assign_add(global_step, 1).op
        return update_op

  model_fn_ops = head.create_model_fn_ops(
      features=features,
      mode=mode,
      labels=labels,
      train_op_fn=_train_op_fn,
      logits=logits)
  if num_trees:
    if center_bias:
      num_trees += 1
    finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
    model_fn_ops.training_hooks.append(
        trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                      finalized_trees))
  return model_fn_ops
예제 #35
0
    def testSerialization(self):
        with ops.Graph().as_default() as graph:
            with self.test_session(graph):
                tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig(
                )
                # Bias tree only for second class.
                tree1 = tree_ensemble_config.trees.add()
                _append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)

                tree_ensemble_config.tree_weights.append(1.0)

                # Depth 2 tree.
                tree2 = tree_ensemble_config.trees.add()
                tree_ensemble_config.tree_weights.append(1.0)
                _set_float_split(
                    tree2.nodes.add().sparse_float_binary_split_default_right.
                    split, 1, 4.0, 1, 2)
                _set_float_split(tree2.nodes.add().dense_float_binary_split, 0,
                                 9.0, 3, 4)
                _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
                _append_to_leaf(tree2.nodes.add().leaf, 1, 1.2)
                _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)

                tree_ensemble_handle = model_ops.tree_ensemble_variable(
                    stamp_token=7,
                    tree_ensemble_config=tree_ensemble_config.
                    SerializeToString(),
                    name="saver_tree")
                stamp_token, serialized_config = model_ops.tree_ensemble_serialize(
                    tree_ensemble_handle)
                resources.initialize_resources(
                    resources.shared_resources()).run()
                self.assertEqual(stamp_token.eval(), 7)
                serialized_config = serialized_config.eval()

        with ops.Graph().as_default() as graph:
            with self.test_session(graph):
                tree_ensemble_handle2 = model_ops.tree_ensemble_variable(
                    stamp_token=9,
                    tree_ensemble_config=serialized_config,
                    name="saver_tree2")
                resources.initialize_resources(
                    resources.shared_resources()).run()

                # Prepare learner config.
                learner_config = learner_pb2.LearnerConfig()
                learner_config.num_classes = 3

                result, _, _ = prediction_ops.gradient_trees_prediction(
                    tree_ensemble_handle2,
                    self._seed, [self._dense_float_tensor],
                    [self._sparse_float_indices1, self._sparse_float_indices2],
                    [self._sparse_float_values1, self._sparse_float_values2],
                    [self._sparse_float_shape1, self._sparse_float_shape2],
                    [self._sparse_int_indices1], [self._sparse_int_values1],
                    [self._sparse_int_shape1],
                    learner_config=learner_config.SerializeToString(),
                    apply_dropout=False,
                    apply_averaging=False,
                    center_bias=False,
                    reduce_dim=True)

                # Re-serialize tree.
                stamp_token2, serialized_config2 = model_ops.tree_ensemble_serialize(
                    tree_ensemble_handle2)

                # The first example will get bias class 1 -0.2 from first tree and
                # leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
                # the second example will get the same bias class 1 -0.2 and leaf 3
                # payload of class 1 1.2 hence [0.0, 1.0].
                self.assertEqual(stamp_token2.eval(), 9)

                # Class 2 does have scores in the leaf => it gets score 0.
                self.assertEqual(serialized_config2.eval(), serialized_config)
                self.assertAllClose(result.eval(), [[0.5, -0.2], [0, 1.0]])
예제 #36
0
    def testTrainFnChiefWithBiasCentering(self):
        """Tests the train function running on chief with bias centering."""
        with self.test_session():
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
            learner_config.num_classes = 2
            learner_config.regularization.l1 = 0
            learner_config.regularization.l2 = 0
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            features = {}
            features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)

            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=True,
                num_ps_replicas=0,
                center_bias=True,
                ensemble_handle=ensemble_handle,
                examples_per_layer=1,
                learner_config=learner_config,
                features=features)

            predictions = array_ops.constant([[0.0], [1.0], [0.0], [2.0]],
                                             dtype=dtypes.float32)
            partition_ids = array_ops.zeros([4], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp,
                "num_trees": 12,
            }

            labels = array_ops.ones([4, 1], dtypes.float32)
            weights = array_ops.ones([4, 1], dtypes.float32)
            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                _squared_loss(labels, weights, predictions)),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # On first run, expect bias to be centered.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            expected_tree = """
          nodes {
            leaf {
              vector {
                value: 0.25
              }
            }
          }"""
            self.assertEquals(len(output.trees), 1)
            self.assertAllEqual(output.tree_weights, [1.0])
            self.assertProtoEquals(expected_tree, output.trees[0])
            self.assertEquals(stamp_token.eval(), 1)
    def testWithExistingEnsembleAndDropout(self):
        with self.test_session():
            tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            # Add 10 trees with some weights.
            for i in range(0, 10):
                tree = tree_ensemble.trees.add()
                _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
                tree_ensemble.tree_weights.append(i + 1)
                meta = tree_ensemble.tree_metadata.add()
                meta.num_tree_weight_updates = 1
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0,
                tree_ensemble_config=tree_ensemble.SerializeToString(),
                name="existing")
            # Create non-zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=np.array([2, 3], np.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=np.array(
                [0.0, 0.3], np.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()

            dropped = [1, 6, 8]
            dropped_original_weights = [2.0, 7.0, 9.0]

            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [1, 2],
                        feature_gains, [0.5, 0.3],
                        [dropped, dropped_original_weights],
                        learning_rate=0.1)
            ]):
                output_ensemble.ParseFromString(
                    model_ops.tree_ensemble_serialize(tree_ensemble_handle)
                    [1].eval())

            # Output.
            self.assertEqual(11, len(output_ensemble.trees))
            self.assertProtoEquals(self._tree_to_add,
                                   output_ensemble.trees[10])
            self.assertAllClose(4.5, output_ensemble.tree_weights[10])

            self.assertAllClose(
                [1., 1.5, 3., 4., 5., 6., 5.25, 8., 6.75, 10., 4.5],
                output_ensemble.tree_weights)

            self.assertEqual(
                1, output_ensemble.tree_metadata[0].num_tree_weight_updates)
            self.assertEqual(
                2, output_ensemble.tree_metadata[1].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[2].num_tree_weight_updates)

            self.assertEqual(
                1, output_ensemble.tree_metadata[3].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[4].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[5].num_tree_weight_updates)
            self.assertEqual(
                2, output_ensemble.tree_metadata[6].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[7].num_tree_weight_updates)
            self.assertEqual(
                2, output_ensemble.tree_metadata[8].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[9].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[10].num_tree_weight_updates)
            self.assertAllEqual([3, 5], feature_usage_counts.eval())
            self.assertArrayNear([0.05, 0.33], feature_gains.eval(), 1e-6)
예제 #38
0
    def testTrainFnMulticlassDiagonalHessian(self):
        """Tests the GBDT train for multiclass diagonal hessian."""
        with self.test_session() as sess:
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")

            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 1
            # Use full hessian multiclass strategy.
            learner_config.multi_class_strategy = (
                learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
            learner_config.num_classes = 5
            learner_config.regularization.l1 = 0
            # To make matrix inversible.
            learner_config.regularization.l2 = 1e-5
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            batch_size = 3
            features = {}
            features["dense_float"] = array_ops.ones([batch_size, 1],
                                                     dtypes.float32)

            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=True,
                num_ps_replicas=0,
                center_bias=False,
                ensemble_handle=ensemble_handle,
                examples_per_layer=1,
                learner_config=learner_config,
                features=features)

            predictions = array_ops.constant(
                [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
                 [0.0, 0.0, 0.0, 0.0, 1.2]],
                dtype=dtypes.float32)

            labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
            weights = array_ops.ones([batch_size, 1], dtypes.float32)

            partition_ids = array_ops.zeros([batch_size], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp,
                "num_trees": 0,
            }

            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                losses.per_example_maxent_loss(
                    labels,
                    weights,
                    predictions,
                    num_classes=learner_config.num_classes)[0]),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # On first run, expect no splits to be chosen because the quantile
            # buckets will not be ready.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEqual(len(output.trees), 0)
            self.assertEqual(len(output.tree_weights), 0)
            self.assertEqual(stamp_token.eval(), 1)

            # Update the stamp to be able to run a second time.
            sess.run([ensemble_stamp.assign_add(1)])
            # On second run, expect a trivial split to be chosen to basically
            # predict the average.
            train_op.run()
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())

            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output.ParseFromString(serialized.eval())
            self.assertEqual(len(output.trees), 1)
            self.assertAllClose(output.tree_weights, [1])
            self.assertEqual(stamp_token.eval(), 2)

            # Leaf should have a dense vector of size 5.
            expected = [
                -1.26767396927, -1.13043296337, 4.58542203903, 1.81428349018,
                -2.43038392067
            ]
            for i in range(learner_config.num_classes):
                self.assertAlmostEqual(
                    expected[i], output.trees[0].nodes[1].leaf.vector.value[i])
예제 #39
0
    def testTrainFnMulticlassTreePerClass(self):
        """Tests the GBDT train for multiclass tree per class strategy."""
        with self.test_session() as sess:
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")

            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 1
            # Use full hessian multiclass strategy.
            learner_config.multi_class_strategy = (
                learner_pb2.LearnerConfig.TREE_PER_CLASS)
            learner_config.num_classes = 5
            learner_config.regularization.l1 = 0
            # To make matrix inversible.
            learner_config.regularization.l2 = 1e-5
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            features = {
                "dense_float":
                array_ops.constant([[1.0], [1.5], [2.0]], dtypes.float32),
            }

            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=True,
                num_ps_replicas=0,
                center_bias=False,
                ensemble_handle=ensemble_handle,
                examples_per_layer=1,
                learner_config=learner_config,
                features=features)

            batch_size = 3
            predictions = array_ops.constant(
                [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
                 [0.0, 0.0, 0.0, 2.0, 1.2]],
                dtype=dtypes.float32)

            labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
            weights = array_ops.ones([batch_size, 1], dtypes.float32)

            partition_ids = array_ops.zeros([batch_size], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp,
                # This should result in a tree built for a class 2.
                "num_trees": 13,
            }

            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                losses.per_example_maxent_loss(
                    labels,
                    weights,
                    predictions,
                    num_classes=learner_config.num_classes)[0]),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # On first run, expect no splits to be chosen because the quantile
            # buckets will not be ready.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEqual(len(output.trees), 0)
            self.assertEqual(len(output.tree_weights), 0)
            self.assertEqual(stamp_token.eval(), 1)

            # Update the stamp to be able to run a second time.
            sess.run([ensemble_stamp.assign_add(1)])
            # On second run, expect a trivial split to be chosen to basically
            # predict the average.
            train_op.run()
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())

            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output.ParseFromString(serialized.eval())
            self.assertEqual(len(output.trees), 1)
            self.assertAllClose(output.tree_weights, [1])
            self.assertEqual(stamp_token.eval(), 2)

            # One node for a split, two children nodes.
            self.assertEqual(3, len(output.trees[0].nodes))

            # Leafs will have a sparse vector for class 3.
            self.assertEqual(
                1, len(output.trees[0].nodes[1].leaf.sparse_vector.index))
            self.assertEqual(
                3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])
            self.assertAlmostEqual(
                -1.13134455681,
                output.trees[0].nodes[1].leaf.sparse_vector.value[0])

            self.assertEqual(
                1, len(output.trees[0].nodes[2].leaf.sparse_vector.index))
            self.assertEqual(
                3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])
            self.assertAlmostEqual(
                0.893284678459,
                output.trees[0].nodes[2].leaf.sparse_vector.value[0])
예제 #40
0
파일: model.py 프로젝트: zw18/tensorflow
def ranking_model_builder(features,
                          labels,
                          mode,
                          params,
                          config,
                          output_type=ModelBuilderOutputType.MODEL_FN_OPS):
    """Multi-machine batch gradient descent tree model for ranking.

  Args:
    features: `Tensor` or `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    params: A dict of hyperparameters.
      The following hyperparameters are expected:
      * head: A `Head` instance.
      * learner_config: A config for the learner.
      * feature_columns: An iterable containing all the feature columns used by
          the model.
      * examples_per_layer: Number of examples to accumulate before growing a
          layer. It can also be a function that computes the number of examples
          based on the depth of the layer that's being built.
      * weight_column_name: The name of weight column.
      * center_bias: Whether a separate tree should be created for first fitting
          the bias.
      * ranking_model_pair_keys (Optional): Keys to distinguish between features
        for left and right part of the training pairs for ranking. For example,
        for an Example with features "a.f1" and "b.f1", the keys would be
        ("a", "b").
    config: `RunConfig` of the estimator.
    output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
      (new interface).


  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
    head = params["head"]
    learner_config = params["learner_config"]
    examples_per_layer = params["examples_per_layer"]
    feature_columns = params["feature_columns"]
    weight_column_name = params["weight_column_name"]
    num_trees = params["num_trees"]
    use_core_libs = params["use_core_libs"]
    logits_modifier_function = params["logits_modifier_function"]
    output_leaf_index = params["output_leaf_index"]
    ranking_model_pair_keys = params["ranking_model_pair_keys"]

    if features is None:
        raise ValueError("At least one feature must be specified.")

    if config is None:
        raise ValueError("Missing estimator RunConfig.")

    center_bias = params["center_bias"]

    if isinstance(features, ops.Tensor):
        features = {features.name: features}

    # Make a shallow copy of features to ensure downstream usage
    # is unaffected by modifications in the model function.
    training_features = copy.copy(features)
    training_features.pop(weight_column_name, None)
    global_step = training_util.get_global_step()
    with ops.device(global_step.device):
        ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=0,
            tree_ensemble_config="",  # Initialize an empty ensemble.
            name="ensemble_model")

    # Extract the features.
    if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL:
        # For ranking pairwise training, we extract two sets of features.
        if len(ranking_model_pair_keys) != 2:
            raise ValueError("You must provide keys for ranking.")
        left_pair_key = ranking_model_pair_keys[0]
        right_pair_key = ranking_model_pair_keys[1]
        if left_pair_key is None or right_pair_key is None:
            raise ValueError("Both pair keys should be provided for ranking.")

        features_1 = {}
        features_2 = {}
        for name in training_features:
            feature = training_features[name]
            new_name = name[2:]
            if name.startswith(left_pair_key + "."):
                features_1[new_name] = feature
            else:
                assert name.startswith(right_pair_key + ".")
                features_2[new_name] = feature

        main_features = features_1
        supplementary_features = features_2
    else:
        # For non-ranking or inference ranking, we have only 1 set of features.
        main_features = training_features

    # Create GBDT model.
    gbdt_model_main = gbdt_batch.GradientBoostedDecisionTreeModel(
        is_chief=config.is_chief,
        num_ps_replicas=config.num_ps_replicas,
        ensemble_handle=ensemble_handle,
        center_bias=center_bias,
        examples_per_layer=examples_per_layer,
        learner_config=learner_config,
        feature_columns=feature_columns,
        logits_dimension=head.logits_dimension,
        features=main_features,
        use_core_columns=use_core_libs,
        output_leaf_index=output_leaf_index)

    with ops.name_scope("gbdt", "gbdt_optimizer"):
        # Logits for inference.
        if mode == learn.ModeKeys.INFER:
            predictions_dict = gbdt_model_main.predict(mode)
            logits = predictions_dict[gbdt_batch.PREDICTIONS]
            if logits_modifier_function:
                logits = logits_modifier_function(logits, features, mode)
        else:
            gbdt_model_supplementary = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=config.is_chief,
                num_ps_replicas=config.num_ps_replicas,
                ensemble_handle=ensemble_handle,
                center_bias=center_bias,
                examples_per_layer=examples_per_layer,
                learner_config=learner_config,
                feature_columns=feature_columns,
                logits_dimension=head.logits_dimension,
                features=supplementary_features,
                use_core_columns=use_core_libs,
                output_leaf_index=output_leaf_index)

            # Logits for train and eval.
            if not supplementary_features:
                raise ValueError("Features for ranking must be specified.")

            predictions_dict_1 = gbdt_model_main.predict(mode)
            predictions_1 = predictions_dict_1[gbdt_batch.PREDICTIONS]

            predictions_dict_2 = gbdt_model_supplementary.predict(mode)
            predictions_2 = predictions_dict_2[gbdt_batch.PREDICTIONS]

            logits = predictions_1 - predictions_2
            if logits_modifier_function:
                logits = logits_modifier_function(logits, features, mode)

            predictions_dict = predictions_dict_1
            predictions_dict[gbdt_batch.PREDICTIONS] = logits

        def _train_op_fn(loss):
            """Returns the op to optimize the loss."""
            update_op = gbdt_model_main.train(loss, predictions_dict, labels)
            with ops.control_dependencies(
                [update_op]), (ops.colocate_with(global_step)):
                update_op = state_ops.assign_add(global_step, 1).op
                return update_op

    create_estimator_spec_op = getattr(head, "create_estimator_spec", None)

    training_hooks = []
    if num_trees:
        if center_bias:
            num_trees += 1

        finalized_trees, attempted_trees = (
            gbdt_model_main.get_number_of_trees_tensor())
        training_hooks.append(
            trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                          finalized_trees))

    if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
        if use_core_libs and callable(create_estimator_spec_op):
            model_fn_ops = head.create_estimator_spec(features=features,
                                                      mode=mode,
                                                      labels=labels,
                                                      train_op_fn=_train_op_fn,
                                                      logits=logits)
            model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
                model_fn_ops)
        else:
            model_fn_ops = head.create_model_fn_ops(features=features,
                                                    mode=mode,
                                                    labels=labels,
                                                    train_op_fn=_train_op_fn,
                                                    logits=logits)

        if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
            model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
                gbdt_batch.LEAF_INDEX]

        model_fn_ops.training_hooks.extend(training_hooks)
        return model_fn_ops

    elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
        assert callable(create_estimator_spec_op)
        estimator_spec = head.create_estimator_spec(features=features,
                                                    mode=mode,
                                                    labels=labels,
                                                    train_op_fn=_train_op_fn,
                                                    logits=logits)

        estimator_spec = estimator_spec._replace(
            training_hooks=training_hooks +
            list(estimator_spec.training_hooks))
        return estimator_spec

    return model_fn_ops
def _dnn_tree_combined_model_fn(
    features,
    labels,
    mode,
    head,
    dnn_hidden_units,
    dnn_feature_columns,
    tree_learner_config,
    num_trees,
    tree_examples_per_layer,
    config=None,
    dnn_optimizer="Adagrad",
    dnn_activation_fn=nn.relu,
    dnn_dropout=None,
    dnn_input_layer_partitioner=None,
    dnn_input_layer_to_tree=True,
    dnn_steps_to_train=10000,
    predict_with_tree_only=False,
    tree_feature_columns=None,
    tree_center_bias=False,
    dnn_to_tree_distillation_param=None,
    use_core_versions=False,
    output_type=model.ModelBuilderOutputType.MODEL_FN_OPS):
  """DNN and GBDT combined model_fn.

  Args:
    features: `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    head: A `Head` instance.
    dnn_hidden_units: List of hidden units per layer.
    dnn_feature_columns: An iterable containing all the feature columns
      used by the model's DNN.
    tree_learner_config: A config for the tree learner.
    num_trees: Number of trees to grow model to after training DNN.
    tree_examples_per_layer: Number of examples to accumulate before
      growing the tree a layer. This value has a big impact on model
      quality and should be set equal to the number of examples in
      training dataset if possible. It can also be a function that computes
      the number of examples based on the depth of the layer that's
      being built.
    config: `RunConfig` of the estimator.
    dnn_optimizer: string, `Optimizer` object, or callable that defines the
      optimizer to use for training the DNN. If `None`, will use the Adagrad
      optimizer with default learning rate of 0.001.
    dnn_activation_fn: Activation function applied to each layer of the DNN.
      If `None`, will use `tf.nn.relu`.
    dnn_dropout: When not `None`, the probability to drop out a given
      unit in the DNN.
    dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
      Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
    dnn_input_layer_to_tree: Whether to provide the DNN's input layer
    as a feature to the tree.
    dnn_steps_to_train: Number of steps to train dnn for before switching
      to gbdt.
    predict_with_tree_only: Whether to use only the tree model output as the
      final prediction.
    tree_feature_columns: An iterable containing all the feature columns
      used by the model's boosted trees. If dnn_input_layer_to_tree is
      set to True, these features are in addition to dnn_feature_columns.
    tree_center_bias: Whether a separate tree should be created for
      first fitting the bias.
    dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
      float defines the weight of the distillation loss, and the loss_fn, for
      computing distillation loss, takes dnn_logits, tree_logits and weight
      tensor. If the entire tuple is None, no distillation will be applied. If
      only the loss_fn is None, we will take the sigmoid/softmax cross entropy
      loss be default. When distillation is applied, `predict_with_tree_only`
      will be set to True.
    use_core_versions: Whether feature columns and loss are from the core (as
      opposed to contrib) version of tensorflow.

  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
  if not isinstance(features, dict):
    raise ValueError("features should be a dictionary of `Tensor`s. "
                     "Given type: {}".format(type(features)))

  if not dnn_feature_columns:
    raise ValueError("dnn_feature_columns must be specified")

  if dnn_to_tree_distillation_param:
    if not predict_with_tree_only:
      logging.warning("update predict_with_tree_only to True since distillation"
                      "is specified.")
      predict_with_tree_only = True

  # Build DNN Logits.
  dnn_parent_scope = "dnn"
  dnn_partitioner = dnn_input_layer_partitioner or (
      partitioned_variables.min_max_variable_partitioner(
          max_partitions=config.num_ps_replicas, min_slice_size=64 << 20))

  if (output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC and
      not use_core_versions):
    raise ValueError("You must use core versions with Estimator Spec")

  with variable_scope.variable_scope(
      dnn_parent_scope,
      values=tuple(six.itervalues(features)),
      partitioner=dnn_partitioner):

    with variable_scope.variable_scope(
        "input_from_feature_columns",
        values=tuple(six.itervalues(features)),
        partitioner=dnn_partitioner) as input_layer_scope:
      if use_core_versions:
        input_layer = feature_column_lib.input_layer(
            features=features,
            feature_columns=dnn_feature_columns,
            weight_collections=[dnn_parent_scope])
      else:
        input_layer = layers.input_from_feature_columns(
            columns_to_tensors=features,
            feature_columns=dnn_feature_columns,
            weight_collections=[dnn_parent_scope],
            scope=input_layer_scope)
    previous_layer = input_layer
    for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
      with variable_scope.variable_scope(
          "hiddenlayer_%d" % layer_id,
          values=(previous_layer,)) as hidden_layer_scope:
        net = layers.fully_connected(
            previous_layer,
            num_hidden_units,
            activation_fn=dnn_activation_fn,
            variables_collections=[dnn_parent_scope],
            scope=hidden_layer_scope)
        if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
          net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
      _add_hidden_layer_summary(net, hidden_layer_scope.name)
      previous_layer = net
    with variable_scope.variable_scope(
        "logits", values=(previous_layer,)) as logits_scope:
      dnn_logits = layers.fully_connected(
          previous_layer,
          head.logits_dimension,
          activation_fn=None,
          variables_collections=[dnn_parent_scope],
          scope=logits_scope)
    _add_hidden_layer_summary(dnn_logits, logits_scope.name)

    def _dnn_train_op_fn(loss):
      """Returns the op to optimize the loss."""
      return optimizers.optimize_loss(
          loss=loss,
          global_step=training_util.get_global_step(),
          learning_rate=_DNN_LEARNING_RATE,
          optimizer=_get_optimizer(dnn_optimizer),
          name=dnn_parent_scope,
          variables=ops.get_collection(
              ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_parent_scope),
          # Empty summaries to prevent optimizers from logging training_loss.
          summaries=[])

  # Build Tree Logits.
  global_step = training_util.get_global_step()
  with ops.device(global_step.device):
    ensemble_handle = model_ops.tree_ensemble_variable(
        stamp_token=0,
        tree_ensemble_config="",  # Initialize an empty ensemble.
        name="ensemble_model")

  tree_features = features.copy()
  if dnn_input_layer_to_tree:
    tree_features["dnn_input_layer"] = input_layer
    tree_feature_columns.append(layers.real_valued_column("dnn_input_layer"))
  gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
      is_chief=config.is_chief,
      num_ps_replicas=config.num_ps_replicas,
      ensemble_handle=ensemble_handle,
      center_bias=tree_center_bias,
      examples_per_layer=tree_examples_per_layer,
      learner_config=tree_learner_config,
      feature_columns=tree_feature_columns,
      logits_dimension=head.logits_dimension,
      features=tree_features,
      use_core_columns=use_core_versions)

  with ops.name_scope("gbdt"):
    predictions_dict = gbdt_model.predict(mode)
    tree_logits = predictions_dict["predictions"]

    def _tree_train_op_fn(loss):
      """Returns the op to optimize the loss."""
      if dnn_to_tree_distillation_param:
        loss_weight, loss_fn = dnn_to_tree_distillation_param
        weight_tensor = head_lib._weight_tensor(  # pylint: disable=protected-access
            features, head.weight_column_name)
        dnn_logits_fixed = array_ops.stop_gradient(dnn_logits)

        if loss_fn is None:
          # we create the loss_fn similar to the head loss_fn for
          # multi_class_head used previously as the default one.
          n_classes = 2 if head.logits_dimension == 1 else head.logits_dimension
          loss_fn = distillation_loss.create_dnn_to_tree_cross_entropy_loss_fn(
              n_classes)

        dnn_to_tree_distillation_loss = loss_weight * loss_fn(
            dnn_logits_fixed, tree_logits, weight_tensor)
        summary.scalar("dnn_to_tree_distillation_loss",
                       dnn_to_tree_distillation_loss)
        loss += dnn_to_tree_distillation_loss

      update_op = gbdt_model.train(loss, predictions_dict, labels)
      with ops.control_dependencies(
          [update_op]), (ops.colocate_with(global_step)):
        update_op = state_ops.assign_add(global_step, 1).op
        return update_op

  if predict_with_tree_only:
    if mode == model_fn.ModeKeys.TRAIN or mode == model_fn.ModeKeys.INFER:
      tree_train_logits = tree_logits
    else:
      tree_train_logits = control_flow_ops.cond(
          global_step > dnn_steps_to_train,
          lambda: tree_logits,
          lambda: dnn_logits)
  else:
    tree_train_logits = dnn_logits + tree_logits

  def _no_train_op_fn(loss):
    """Returns a no-op."""
    del loss
    return control_flow_ops.no_op()

  if tree_center_bias:
    num_trees += 1
  finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()

  if output_type == model.ModelBuilderOutputType.MODEL_FN_OPS:
    if use_core_versions:
      model_fn_ops = head.create_estimator_spec(
          features=features,
          mode=mode,
          labels=labels,
          train_op_fn=_no_train_op_fn,
          logits=tree_train_logits)
      dnn_train_op = head.create_estimator_spec(
          features=features,
          mode=mode,
          labels=labels,
          train_op_fn=_dnn_train_op_fn,
          logits=dnn_logits)
      dnn_train_op = estimator_utils.estimator_spec_to_model_fn_ops(
          dnn_train_op).train_op

      tree_train_op = head.create_estimator_spec(
          features=tree_features,
          mode=mode,
          labels=labels,
          train_op_fn=_tree_train_op_fn,
          logits=tree_train_logits)
      tree_train_op = estimator_utils.estimator_spec_to_model_fn_ops(
          tree_train_op).train_op

      model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
          model_fn_ops)
    else:
      model_fn_ops = head.create_model_fn_ops(
          features=features,
          mode=mode,
          labels=labels,
          train_op_fn=_no_train_op_fn,
          logits=tree_train_logits)
      dnn_train_op = head.create_model_fn_ops(
          features=features,
          mode=mode,
          labels=labels,
          train_op_fn=_dnn_train_op_fn,
          logits=dnn_logits).train_op
      tree_train_op = head.create_model_fn_ops(
          features=tree_features,
          mode=mode,
          labels=labels,
          train_op_fn=_tree_train_op_fn,
          logits=tree_train_logits).train_op

    # Add the hooks
    model_fn_ops.training_hooks.extend([
        trainer_hooks.SwitchTrainOp(dnn_train_op, dnn_steps_to_train,
                                    tree_train_op),
        trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                      finalized_trees)
    ])
    return model_fn_ops

  elif output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC:
    fusion_spec = head.create_estimator_spec(
        features=features,
        mode=mode,
        labels=labels,
        train_op_fn=_no_train_op_fn,
        logits=tree_train_logits)
    dnn_spec = head.create_estimator_spec(
        features=features,
        mode=mode,
        labels=labels,
        train_op_fn=_dnn_train_op_fn,
        logits=dnn_logits)
    tree_spec = head.create_estimator_spec(
        features=tree_features,
        mode=mode,
        labels=labels,
        train_op_fn=_tree_train_op_fn,
        logits=tree_train_logits)

    training_hooks = [
        trainer_hooks.SwitchTrainOp(dnn_spec.train_op, dnn_steps_to_train,
                                    tree_spec.train_op),
        trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                      finalized_trees)
    ]
    fusion_spec = fusion_spec._replace(training_hooks=training_hooks +
                                       list(fusion_spec.training_hooks))
    return fusion_spec
def _dnn_tree_combined_model_fn(
    features, labels, mode, head, dnn_hidden_units,
    dnn_feature_columns, tree_learner_config, num_trees,
    tree_examples_per_layer,
    config=None, dnn_optimizer="Adagrad",
    dnn_activation_fn=nn.relu, dnn_dropout=None,
    dnn_input_layer_partitioner=None,
    dnn_input_layer_to_tree=True, dnn_steps_to_train=10000,
    tree_feature_columns=None,
    tree_center_bias=True):
  """DNN and GBDT combined model_fn.

  Args:
    features: `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    head: A `Head` instance.
    dnn_hidden_units: List of hidden units per layer.
    dnn_feature_columns: An iterable containing all the feature columns
      used by the model's DNN.
    tree_learner_config: A config for the tree learner.
    num_trees: Number of trees to grow model to after training DNN.
    tree_examples_per_layer: Number of examples to accumulate before
      growing the tree a layer. This value has a big impact on model
      quality and should be set equal to the number of examples in
      training dataset if possible. It can also be a function that computes
      the number of examples based on the depth of the layer that's
      being built.
    config: `RunConfig` of the estimator.
    dnn_optimizer: string, `Optimizer` object, or callable that defines the
      optimizer to use for training the DNN. If `None`, will use the Adagrad
      optimizer with default learning rate of 0.001.
    dnn_activation_fn: Activation function applied to each layer of the DNN.
      If `None`, will use `tf.nn.relu`.
    dnn_dropout: When not `None`, the probability to drop out a given
      unit in the DNN.
    dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
      Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
    dnn_input_layer_to_tree: Whether to provide the DNN's input layer
    as a feature to the tree.
    dnn_steps_to_train: Number of steps to train dnn for before switching
      to gbdt.
    tree_feature_columns: An iterable containing all the feature columns
      used by the model's boosted trees. If dnn_input_layer_to_tree is
      set to True, these features are in addition to dnn_feature_columns.
    tree_center_bias: Whether a separate tree should be created for
      first fitting the bias.

  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
  if not isinstance(features, dict):
    raise ValueError("features should be a dictionary of `Tensor`s. "
                     "Given type: {}".format(type(features)))

  if not dnn_feature_columns:
    raise ValueError("dnn_feature_columns must be specified")

  # Build DNN Logits.
  dnn_parent_scope = "dnn"
  dnn_partitioner = dnn_input_layer_partitioner or (
      partitioned_variables.min_max_variable_partitioner(
          max_partitions=config.num_ps_replicas,
          min_slice_size=64 << 20))

  with variable_scope.variable_scope(
      dnn_parent_scope,
      values=tuple(six.itervalues(features)),
      partitioner=dnn_partitioner):

    with variable_scope.variable_scope(
        "input_from_feature_columns",
        values=tuple(six.itervalues(features)),
        partitioner=dnn_partitioner) as input_layer_scope:
      input_layer = layers.input_from_feature_columns(
          columns_to_tensors=features,
          feature_columns=dnn_feature_columns,
          weight_collections=[dnn_parent_scope],
          scope=input_layer_scope)
    previous_layer = input_layer
    for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
      with variable_scope.variable_scope(
          "hiddenlayer_%d" % layer_id,
          values=(previous_layer,)) as hidden_layer_scope:
        net = layers.fully_connected(
            previous_layer,
            num_hidden_units,
            activation_fn=dnn_activation_fn,
            variables_collections=[dnn_parent_scope],
            scope=hidden_layer_scope)
        if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
          net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
      _add_hidden_layer_summary(net, hidden_layer_scope.name)
      previous_layer = net
    with variable_scope.variable_scope(
        "logits",
        values=(previous_layer,)) as logits_scope:
      dnn_logits = layers.fully_connected(
          previous_layer,
          head.logits_dimension,
          activation_fn=None,
          variables_collections=[dnn_parent_scope],
          scope=logits_scope)
    _add_hidden_layer_summary(dnn_logits, logits_scope.name)

    def _dnn_train_op_fn(loss):
      """Returns the op to optimize the loss."""
      return optimizers.optimize_loss(
          loss=loss,
          global_step=training_util.get_global_step(),
          learning_rate=_DNN_LEARNING_RATE,
          optimizer=_get_optimizer(dnn_optimizer),
          name=dnn_parent_scope,
          variables=ops.get_collection(
              ops.GraphKeys.TRAINABLE_VARIABLES,
              scope=dnn_parent_scope),
          # Empty summaries to prevent optimizers from logging training_loss.
          summaries=[])

  # Build Tree Logits.
  global_step = training_util.get_global_step()
  with ops.device(global_step.device):
    ensemble_handle = model_ops.tree_ensemble_variable(
        stamp_token=0,
        tree_ensemble_config="",  # Initialize an empty ensemble.
        name="ensemble_model")

  tree_features = features.copy()
  if dnn_input_layer_to_tree:
    tree_features["dnn_input_layer"] = input_layer
    tree_feature_columns.append(layers.real_valued_column("dnn_input_layer"))
  gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
      is_chief=config.is_chief,
      num_ps_replicas=config.num_ps_replicas,
      ensemble_handle=ensemble_handle,
      center_bias=tree_center_bias,
      examples_per_layer=tree_examples_per_layer,
      learner_config=tree_learner_config,
      feature_columns=tree_feature_columns,
      logits_dimension=head.logits_dimension,
      features=tree_features)

  with ops.name_scope("gbdt"):
    predictions_dict = gbdt_model.predict(mode)
    tree_logits = predictions_dict["predictions"]

    def _tree_train_op_fn(loss):
      """Returns the op to optimize the loss."""
      update_op = gbdt_model.train(loss, predictions_dict, labels)
      with ops.control_dependencies(
          [update_op]), (ops.colocate_with(global_step)):
        update_op = state_ops.assign_add(global_step, 1).op
        return update_op

  tree_train_logits = dnn_logits + tree_logits

  def _no_train_op_fn(loss):
    """Returns a no-op."""
    del loss
    return control_flow_ops.no_op()

  model_fn_ops = head.create_model_fn_ops(
      features=features,
      mode=mode,
      labels=labels,
      train_op_fn=_no_train_op_fn,
      logits=tree_train_logits)
  dnn_train_op = head.create_model_fn_ops(
      features=features,
      mode=mode,
      labels=labels,
      train_op_fn=_dnn_train_op_fn,
      logits=dnn_logits).train_op
  tree_train_op = head.create_model_fn_ops(
      features=tree_features,
      mode=mode,
      labels=labels,
      train_op_fn=_tree_train_op_fn,
      logits=tree_train_logits).train_op

  if tree_center_bias:
    num_trees += 1
  finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()

  model_fn_ops.training_hooks.extend([
      trainer_hooks.SwitchTrainOp(
          dnn_train_op, dnn_steps_to_train, tree_train_op),
      trainer_hooks.StopAfterNTrees(
          num_trees, attempted_trees, finalized_trees)])

  return model_fn_ops
예제 #43
0
  def testTrainFnChiefScalingNumberOfExamples(self):
    """Tests the train function running on chief without bias centering."""
    with self.test_session() as sess:
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
      learner_config.num_classes = 2
      learner_config.regularization.l1 = 0
      learner_config.regularization.l2 = 0
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      num_examples_fn = (
          lambda layer: math_ops.pow(math_ops.cast(2, dtypes.int64), layer) * 1)
      features = {}
      features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=False,
          ensemble_handle=ensemble_handle,
          examples_per_layer=num_examples_fn,
          learner_config=learner_config,
          features=features)

      predictions = array_ops.constant(
          [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
      partition_ids = array_ops.zeros([4], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp,
          "num_trees": 12,
      }

      labels = array_ops.ones([4, 1], dtypes.float32)
      weights = array_ops.ones([4, 1], dtypes.float32)
      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              _squared_loss(labels, weights, predictions)),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect no splits to be chosen because the quantile
      # buckets will not be ready.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEquals(len(output.trees), 0)
      self.assertEquals(len(output.tree_weights), 0)
      self.assertEquals(stamp_token.eval(), 1)

      # Update the stamp to be able to run a second time.
      sess.run([ensemble_stamp.assign_add(1)])

      # On second run, expect a trivial split to be chosen to basically
      # predict the average.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEquals(len(output.trees), 1)
      self.assertAllClose(output.tree_weights, [0.1])
      self.assertEquals(stamp_token.eval(), 2)
      expected_tree = """
          nodes {
            dense_float_binary_split {
              threshold: 1.0
              left_id: 1
              right_id: 2
            }
            node_metadata {
              gain: 0
            }
          }
          nodes {
            leaf {
              vector {
                value: 0.25
              }
            }
          }
          nodes {
            leaf {
              vector {
                value: 0.0
              }
            }
          }"""
      self.assertProtoEquals(expected_tree, output.trees[0])
예제 #44
0
  def testTrainFnChiefWithBiasCentering(self):
    """Tests the train function running on chief with bias centering."""
    with self.test_session():
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
      learner_config.num_classes = 2
      learner_config.regularization.l1 = 0
      learner_config.regularization.l2 = 0
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      features = {}
      features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=True,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          features=features)

      predictions = array_ops.constant(
          [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
      partition_ids = array_ops.zeros([4], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp,
          "num_trees": 12,
      }

      labels = array_ops.ones([4, 1], dtypes.float32)
      weights = array_ops.ones([4, 1], dtypes.float32)
      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              _squared_loss(labels, weights, predictions)),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect bias to be centered.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      expected_tree = """
          nodes {
            leaf {
              vector {
                value: 0.25
              }
            }
          }"""
      self.assertEquals(len(output.trees), 1)
      self.assertAllEqual(output.tree_weights, [1.0])
      self.assertProtoEquals(expected_tree, output.trees[0])
      self.assertEquals(stamp_token.eval(), 1)
예제 #45
0
    def testRestore(self):
        # Calling self.test_session() without a graph specified results in
        # TensorFlowTestCase caching the session and returning the same one
        # every time. In this test, we need to create two different sessions
        # which is why we also create a graph and pass it to self.test_session()
        # to ensure no caching occurs under the hood.
        save_path = os.path.join(self.get_temp_dir(), "restore-test")
        with ops.Graph().as_default() as graph:
            with self.test_session(graph) as sess:
                tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig(
                )

                tree = tree_ensemble_config.trees.add()
                tree_ensemble_config.tree_metadata.add().is_finalized = True
                tree_ensemble_config.tree_weights.append(1.0)
                _append_to_leaf(tree.nodes.add().leaf, 0, -0.1)

                tree_ensemble_config2 = tree_config_pb2.DecisionTreeEnsembleConfig(
                )
                tree2 = tree_ensemble_config2.trees.add()
                tree_ensemble_config.tree_weights.append(1.0)
                _append_to_leaf(tree2.nodes.add().leaf, 0, -1.0)

                tree_ensemble_config3 = tree_config_pb2.DecisionTreeEnsembleConfig(
                )
                tree3 = tree_ensemble_config3.trees.add()
                tree_ensemble_config.tree_weights.append(1.0)
                _append_to_leaf(tree3.nodes.add().leaf, 0, -10.0)

                # Prepare learner config.
                learner_config = learner_pb2.LearnerConfig()
                learner_config.num_classes = 2

                tree_ensemble_handle = model_ops.tree_ensemble_variable(
                    stamp_token=3,
                    tree_ensemble_config=tree_ensemble_config.
                    SerializeToString(),
                    name="restore_tree")
                feature_usage_counts = variables.Variable(
                    initial_value=array_ops.zeros([1], dtypes.int64),
                    name="feature_usage_counts",
                    trainable=False)
                feature_gains = variables.Variable(
                    initial_value=array_ops.zeros([1], dtypes.float32),
                    name="feature_gains",
                    trainable=False)

                resources.initialize_resources(
                    resources.shared_resources()).run()
                variables.initialize_all_variables().run()
                my_saver = saver.Saver()

                with ops.control_dependencies([
                        ensemble_optimizer_ops.add_trees_to_ensemble(
                            tree_ensemble_handle,
                            tree_ensemble_config2.SerializeToString(),
                            feature_usage_counts, [0],
                            feature_gains, [0], [[]],
                            learning_rate=1)
                ]):
                    result, _, _ = prediction_ops.gradient_trees_prediction(
                        tree_ensemble_handle,
                        self._seed, [self._dense_float_tensor], [
                            self._sparse_float_indices1,
                            self._sparse_float_indices2
                        ], [
                            self._sparse_float_values1,
                            self._sparse_float_values2
                        ],
                        [self._sparse_float_shape1, self._sparse_float_shape2],
                        [self._sparse_int_indices1],
                        [self._sparse_int_values1], [self._sparse_int_shape1],
                        learner_config=learner_config.SerializeToString(),
                        apply_dropout=False,
                        apply_averaging=False,
                        center_bias=False,
                        reduce_dim=True)
                self.assertAllClose([[-1.1], [-1.1]], result.eval())
                # Save before adding other trees.
                val = my_saver.save(sess, save_path)
                self.assertEqual(save_path, val)

                # Add more trees after saving.
                with ops.control_dependencies([
                        ensemble_optimizer_ops.add_trees_to_ensemble(
                            tree_ensemble_handle,
                            tree_ensemble_config3.SerializeToString(),
                            feature_usage_counts, [0],
                            feature_gains, [0], [[]],
                            learning_rate=1)
                ]):
                    result, _, _ = prediction_ops.gradient_trees_prediction(
                        tree_ensemble_handle,
                        self._seed, [self._dense_float_tensor], [
                            self._sparse_float_indices1,
                            self._sparse_float_indices2
                        ], [
                            self._sparse_float_values1,
                            self._sparse_float_values2
                        ],
                        [self._sparse_float_shape1, self._sparse_float_shape2],
                        [self._sparse_int_indices1],
                        [self._sparse_int_values1], [self._sparse_int_shape1],
                        learner_config=learner_config.SerializeToString(),
                        apply_dropout=False,
                        apply_averaging=False,
                        center_bias=False,
                        reduce_dim=True)
                self.assertAllClose(result.eval(), [[-11.1], [-11.1]])

        # Start a second session.  In that session the parameter nodes
        # have not been initialized either.
        with ops.Graph().as_default() as graph:
            with self.test_session(graph) as sess:
                tree_ensemble_handle = model_ops.tree_ensemble_variable(
                    stamp_token=0,
                    tree_ensemble_config="",
                    name="restore_tree")
                my_saver = saver.Saver()
                my_saver.restore(sess, save_path)
                result, _, _ = prediction_ops.gradient_trees_prediction(
                    tree_ensemble_handle,
                    self._seed, [self._dense_float_tensor],
                    [self._sparse_float_indices1, self._sparse_float_indices2],
                    [self._sparse_float_values1, self._sparse_float_values2],
                    [self._sparse_float_shape1, self._sparse_float_shape2],
                    [self._sparse_int_indices1], [self._sparse_int_values1],
                    [self._sparse_int_shape1],
                    learner_config=learner_config.SerializeToString(),
                    apply_dropout=False,
                    apply_averaging=False,
                    center_bias=False,
                    reduce_dim=True)
                # Make sure we only have the first and second tree.
                # The third tree was added after the save.
                self.assertAllClose(result.eval(), [[-1.1], [-1.1]])
예제 #46
0
  def testTrainFnNonChiefWithCentering(self):
    """Tests the train function running on worker with bias centering."""
    with self.test_session():
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
      learner_config.num_classes = 2
      learner_config.regularization.l1 = 0
      learner_config.regularization.l2 = 0
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      features = {}
      features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=False,
          num_ps_replicas=0,
          center_bias=True,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          features=features)

      predictions = array_ops.constant(
          [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
      partition_ids = array_ops.zeros([4], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp
      }

      labels = array_ops.ones([4, 1], dtypes.float32)
      weights = array_ops.ones([4, 1], dtypes.float32)
      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              _squared_loss(labels, weights, predictions)),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # Regardless of how many times the train op is run, a non-chief worker
      # can only accumulate stats so the tree ensemble never changes.
      for _ in range(5):
        train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEquals(len(output.trees), 0)
      self.assertEquals(len(output.tree_weights), 0)
      self.assertEquals(stamp_token.eval(), 0)
예제 #47
0
def model_builder(features,
                  labels,
                  mode,
                  params,
                  config,
                  output_type=ModelBuilderOutputType.MODEL_FN_OPS):
  """Multi-machine batch gradient descent tree model.

  Args:
    features: `Tensor` or `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    params: A dict of hyperparameters.
      The following hyperparameters are expected:
      * head: A `Head` instance.
      * learner_config: A config for the learner.
      * feature_columns: An iterable containing all the feature columns used by
          the model.
      * examples_per_layer: Number of examples to accumulate before growing a
          layer. It can also be a function that computes the number of examples
          based on the depth of the layer that's being built.
      * weight_column_name: The name of weight column.
      * center_bias: Whether a separate tree should be created for first fitting
          the bias.
      * override_global_step_value: If after the training is done, global step
        value must be reset to this value. This is particularly useful for hyper
        parameter tuning, which can't recognize early stopping due to the number
        of trees. If None, no override of global step will happen.
    config: `RunConfig` of the estimator.
    output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
      (new interface).

  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
  head = params["head"]
  learner_config = params["learner_config"]
  examples_per_layer = params["examples_per_layer"]
  feature_columns = params["feature_columns"]
  weight_column_name = params["weight_column_name"]
  num_trees = params["num_trees"]
  use_core_libs = params["use_core_libs"]
  logits_modifier_function = params["logits_modifier_function"]
  output_leaf_index = params["output_leaf_index"]
  override_global_step_value = params.get("override_global_step_value", None)
  num_quantiles = params["num_quantiles"]

  if features is None:
    raise ValueError("At least one feature must be specified.")

  if config is None:
    raise ValueError("Missing estimator RunConfig.")
  if config.session_config is not None:
    session_config = config.session_config
    session_config.allow_soft_placement = True
  else:
    session_config = config_pb2.ConfigProto(allow_soft_placement=True)
  config = config.replace(session_config=session_config)

  center_bias = params["center_bias"]

  if isinstance(features, ops.Tensor):
    features = {features.name: features}

  # Make a shallow copy of features to ensure downstream usage
  # is unaffected by modifications in the model function.
  training_features = copy.copy(features)
  training_features.pop(weight_column_name, None)
  global_step = training_util.get_global_step()
  with ops.device(global_step.device):
    ensemble_handle = model_ops.tree_ensemble_variable(
        stamp_token=0,
        tree_ensemble_config="",  # Initialize an empty ensemble.
        name="ensemble_model")

  # Create GBDT model.
  gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
      is_chief=config.is_chief,
      num_ps_replicas=config.num_ps_replicas,
      ensemble_handle=ensemble_handle,
      center_bias=center_bias,
      examples_per_layer=examples_per_layer,
      learner_config=learner_config,
      feature_columns=feature_columns,
      logits_dimension=head.logits_dimension,
      features=training_features,
      use_core_columns=use_core_libs,
      output_leaf_index=output_leaf_index,
      num_quantiles=num_quantiles)
  with ops.name_scope("gbdt", "gbdt_optimizer"):
    predictions_dict = gbdt_model.predict(mode)
    logits = predictions_dict["predictions"]
    if logits_modifier_function:
      logits = logits_modifier_function(logits, features, mode)

    def _train_op_fn(loss):
      """Returns the op to optimize the loss."""
      update_op = gbdt_model.train(loss, predictions_dict, labels)
      with ops.control_dependencies(
          [update_op]), (ops.colocate_with(global_step)):
        update_op = state_ops.assign_add(global_step, 1).op
        return update_op

  create_estimator_spec_op = getattr(head, "create_estimator_spec", None)

  training_hooks = []
  if num_trees:
    if center_bias:
      num_trees += 1

    finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
    training_hooks.append(
        trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                      finalized_trees,
                                      override_global_step_value))

  if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
    if use_core_libs and callable(create_estimator_spec_op):
      model_fn_ops = head.create_estimator_spec(
          features=features,
          mode=mode,
          labels=labels,
          train_op_fn=_train_op_fn,
          logits=logits)
      model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
          model_fn_ops)
    else:
      model_fn_ops = head.create_model_fn_ops(
          features=features,
          mode=mode,
          labels=labels,
          train_op_fn=_train_op_fn,
          logits=logits)

    if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
      model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
          gbdt_batch.LEAF_INDEX]

    model_fn_ops.training_hooks.extend(training_hooks)
    return model_fn_ops
  elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
    assert callable(create_estimator_spec_op)
    estimator_spec = head.create_estimator_spec(
        features=features,
        mode=mode,
        labels=labels,
        train_op_fn=_train_op_fn,
        logits=logits)

    if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
      estimator_spec.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
          gbdt_batch.LEAF_INDEX]

    estimator_spec = estimator_spec._replace(
        training_hooks=training_hooks + list(estimator_spec.training_hooks))
    return estimator_spec

  return model_fn_ops
def _dnn_tree_combined_model_fn(
        features,
        labels,
        mode,
        head,
        dnn_hidden_units,
        dnn_feature_columns,
        tree_learner_config,
        num_trees,
        tree_examples_per_layer,
        config=None,
        dnn_optimizer="Adagrad",
        dnn_activation_fn=nn.relu,
        dnn_dropout=None,
        dnn_input_layer_partitioner=None,
        dnn_input_layer_to_tree=True,
        dnn_steps_to_train=10000,
        predict_with_tree_only=False,
        tree_feature_columns=None,
        tree_center_bias=False,
        dnn_to_tree_distillation_param=None,
        use_core_versions=False,
        output_type=model.ModelBuilderOutputType.MODEL_FN_OPS,
        override_global_step_value=None):
    """DNN and GBDT combined model_fn.

  Args:
    features: `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    head: A `Head` instance.
    dnn_hidden_units: List of hidden units per layer.
    dnn_feature_columns: An iterable containing all the feature columns
      used by the model's DNN.
    tree_learner_config: A config for the tree learner.
    num_trees: Number of trees to grow model to after training DNN.
    tree_examples_per_layer: Number of examples to accumulate before
      growing the tree a layer. This value has a big impact on model
      quality and should be set equal to the number of examples in
      training dataset if possible. It can also be a function that computes
      the number of examples based on the depth of the layer that's
      being built.
    config: `RunConfig` of the estimator.
    dnn_optimizer: string, `Optimizer` object, or callable that defines the
      optimizer to use for training the DNN. If `None`, will use the Adagrad
      optimizer with default learning rate of 0.001.
    dnn_activation_fn: Activation function applied to each layer of the DNN.
      If `None`, will use `tf.nn.relu`.
    dnn_dropout: When not `None`, the probability to drop out a given
      unit in the DNN.
    dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
      Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
    dnn_input_layer_to_tree: Whether to provide the DNN's input layer
    as a feature to the tree.
    dnn_steps_to_train: Number of steps to train dnn for before switching
      to gbdt.
    predict_with_tree_only: Whether to use only the tree model output as the
      final prediction.
    tree_feature_columns: An iterable containing all the feature columns
      used by the model's boosted trees. If dnn_input_layer_to_tree is
      set to True, these features are in addition to dnn_feature_columns.
    tree_center_bias: Whether a separate tree should be created for
      first fitting the bias.
    dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
      float defines the weight of the distillation loss, and the loss_fn, for
      computing distillation loss, takes dnn_logits, tree_logits and weight
      tensor. If the entire tuple is None, no distillation will be applied. If
      only the loss_fn is None, we will take the sigmoid/softmax cross entropy
      loss be default. When distillation is applied, `predict_with_tree_only`
      will be set to True.
    use_core_versions: Whether feature columns and loss are from the core (as
      opposed to contrib) version of tensorflow.
    output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
      (new interface).
    override_global_step_value: If after the training is done, global step
      value must be reset to this value. This is particularly useful for hyper
      parameter tuning, which can't recognize early stopping due to the number
      of trees. If None, no override of global step will happen.

  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
    if not isinstance(features, dict):
        raise ValueError("features should be a dictionary of `Tensor`s. "
                         "Given type: {}".format(type(features)))

    if not dnn_feature_columns:
        raise ValueError("dnn_feature_columns must be specified")

    if dnn_to_tree_distillation_param:
        if not predict_with_tree_only:
            logging.warning(
                "update predict_with_tree_only to True since distillation"
                "is specified.")
            predict_with_tree_only = True

    # Build DNN Logits.
    dnn_parent_scope = "dnn"
    dnn_partitioner = dnn_input_layer_partitioner or (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=config.num_ps_replicas, min_slice_size=64 << 20))

    if (output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC
            and not use_core_versions):
        raise ValueError("You must use core versions with Estimator Spec")

    with variable_scope.variable_scope(dnn_parent_scope,
                                       values=tuple(six.itervalues(features)),
                                       partitioner=dnn_partitioner):

        with variable_scope.variable_scope(
                "input_from_feature_columns",
                values=tuple(six.itervalues(features)),
                partitioner=dnn_partitioner) as input_layer_scope:
            if use_core_versions:
                input_layer = feature_column_lib.input_layer(
                    features=features,
                    feature_columns=dnn_feature_columns,
                    weight_collections=[dnn_parent_scope])
            else:
                input_layer = layers.input_from_feature_columns(
                    columns_to_tensors=features,
                    feature_columns=dnn_feature_columns,
                    weight_collections=[dnn_parent_scope],
                    scope=input_layer_scope)
        previous_layer = input_layer
        for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
            with variable_scope.variable_scope(
                    "hiddenlayer_%d" % layer_id,
                    values=(previous_layer, )) as hidden_layer_scope:
                net = layers.fully_connected(
                    previous_layer,
                    num_hidden_units,
                    activation_fn=dnn_activation_fn,
                    variables_collections=[dnn_parent_scope],
                    scope=hidden_layer_scope)
                if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
                    net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
            _add_hidden_layer_summary(net, hidden_layer_scope.name)
            previous_layer = net
        with variable_scope.variable_scope(
                "logits", values=(previous_layer, )) as logits_scope:
            dnn_logits = layers.fully_connected(
                previous_layer,
                head.logits_dimension,
                activation_fn=None,
                variables_collections=[dnn_parent_scope],
                scope=logits_scope)
        _add_hidden_layer_summary(dnn_logits, logits_scope.name)

        def _dnn_train_op_fn(loss):
            """Returns the op to optimize the loss."""
            return optimizers.optimize_loss(
                loss=loss,
                global_step=training_util.get_global_step(),
                learning_rate=_DNN_LEARNING_RATE,
                optimizer=_get_optimizer(dnn_optimizer),
                name=dnn_parent_scope,
                variables=ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
                                             scope=dnn_parent_scope),
                # Empty summaries to prevent optimizers from logging training_loss.
                summaries=[])

    # Build Tree Logits.
    global_step = training_util.get_global_step()
    with ops.device(global_step.device):
        ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=0,
            tree_ensemble_config="",  # Initialize an empty ensemble.
            name="ensemble_model")

    tree_features = features.copy()
    if dnn_input_layer_to_tree:
        tree_features["dnn_input_layer"] = input_layer
        tree_feature_columns.append(
            layers.real_valued_column("dnn_input_layer"))
    gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
        is_chief=config.is_chief,
        num_ps_replicas=config.num_ps_replicas,
        ensemble_handle=ensemble_handle,
        center_bias=tree_center_bias,
        examples_per_layer=tree_examples_per_layer,
        learner_config=tree_learner_config,
        feature_columns=tree_feature_columns,
        logits_dimension=head.logits_dimension,
        features=tree_features,
        use_core_columns=use_core_versions)

    with ops.name_scope("gbdt"):
        predictions_dict = gbdt_model.predict(mode)
        tree_logits = predictions_dict["predictions"]

        def _tree_train_op_fn(loss):
            """Returns the op to optimize the loss."""
            if dnn_to_tree_distillation_param:
                loss_weight, loss_fn = dnn_to_tree_distillation_param
                weight_tensor = head_lib._weight_tensor(  # pylint: disable=protected-access
                    features, head.weight_column_name)
                dnn_logits_fixed = array_ops.stop_gradient(dnn_logits)

                if loss_fn is None:
                    # we create the loss_fn similar to the head loss_fn for
                    # multi_class_head used previously as the default one.
                    n_classes = 2 if head.logits_dimension == 1 else head.logits_dimension
                    loss_fn = distillation_loss.create_dnn_to_tree_cross_entropy_loss_fn(
                        n_classes)

                dnn_to_tree_distillation_loss = loss_weight * loss_fn(
                    dnn_logits_fixed, tree_logits, weight_tensor)
                summary.scalar("dnn_to_tree_distillation_loss",
                               dnn_to_tree_distillation_loss)
                loss += dnn_to_tree_distillation_loss

            update_op = gbdt_model.train(loss, predictions_dict, labels)
            with ops.control_dependencies(
                [update_op]), (ops.colocate_with(global_step)):
                update_op = state_ops.assign_add(global_step, 1).op
                return update_op

    if predict_with_tree_only:
        if mode == model_fn.ModeKeys.TRAIN or mode == model_fn.ModeKeys.INFER:
            tree_train_logits = tree_logits
        else:
            tree_train_logits = control_flow_ops.cond(
                global_step > dnn_steps_to_train, lambda: tree_logits,
                lambda: dnn_logits)
    else:
        tree_train_logits = dnn_logits + tree_logits

    def _no_train_op_fn(loss):
        """Returns a no-op."""
        del loss
        return control_flow_ops.no_op()

    if tree_center_bias:
        num_trees += 1
    finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()

    if output_type == model.ModelBuilderOutputType.MODEL_FN_OPS:
        if use_core_versions:
            model_fn_ops = head.create_estimator_spec(
                features=features,
                mode=mode,
                labels=labels,
                train_op_fn=_no_train_op_fn,
                logits=tree_train_logits)
            dnn_train_op = head.create_estimator_spec(
                features=features,
                mode=mode,
                labels=labels,
                train_op_fn=_dnn_train_op_fn,
                logits=dnn_logits)
            dnn_train_op = estimator_utils.estimator_spec_to_model_fn_ops(
                dnn_train_op).train_op

            tree_train_op = head.create_estimator_spec(
                features=tree_features,
                mode=mode,
                labels=labels,
                train_op_fn=_tree_train_op_fn,
                logits=tree_train_logits)
            tree_train_op = estimator_utils.estimator_spec_to_model_fn_ops(
                tree_train_op).train_op

            model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
                model_fn_ops)
        else:
            model_fn_ops = head.create_model_fn_ops(
                features=features,
                mode=mode,
                labels=labels,
                train_op_fn=_no_train_op_fn,
                logits=tree_train_logits)
            dnn_train_op = head.create_model_fn_ops(
                features=features,
                mode=mode,
                labels=labels,
                train_op_fn=_dnn_train_op_fn,
                logits=dnn_logits).train_op
            tree_train_op = head.create_model_fn_ops(
                features=tree_features,
                mode=mode,
                labels=labels,
                train_op_fn=_tree_train_op_fn,
                logits=tree_train_logits).train_op

        # Add the hooks
        model_fn_ops.training_hooks.extend([
            trainer_hooks.SwitchTrainOp(dnn_train_op, dnn_steps_to_train,
                                        tree_train_op),
            trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                          finalized_trees,
                                          override_global_step_value)
        ])
        return model_fn_ops

    elif output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC:
        fusion_spec = head.create_estimator_spec(features=features,
                                                 mode=mode,
                                                 labels=labels,
                                                 train_op_fn=_no_train_op_fn,
                                                 logits=tree_train_logits)
        dnn_spec = head.create_estimator_spec(features=features,
                                              mode=mode,
                                              labels=labels,
                                              train_op_fn=_dnn_train_op_fn,
                                              logits=dnn_logits)
        tree_spec = head.create_estimator_spec(features=tree_features,
                                               mode=mode,
                                               labels=labels,
                                               train_op_fn=_tree_train_op_fn,
                                               logits=tree_train_logits)

        training_hooks = [
            trainer_hooks.SwitchTrainOp(dnn_spec.train_op, dnn_steps_to_train,
                                        tree_spec.train_op),
            trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                          finalized_trees,
                                          override_global_step_value)
        ]
        fusion_spec = fusion_spec._replace(training_hooks=training_hooks +
                                           list(fusion_spec.training_hooks))
        return fusion_spec
예제 #49
0
파일: model.py 프로젝트: zw18/tensorflow
def model_builder(features,
                  labels,
                  mode,
                  params,
                  config,
                  output_type=ModelBuilderOutputType.MODEL_FN_OPS):
    """Multi-machine batch gradient descent tree model.

  Args:
    features: `Tensor` or `dict` of `Tensor` objects.
    labels: Labels used to train on.
    mode: Mode we are in. (TRAIN/EVAL/INFER)
    params: A dict of hyperparameters.
      The following hyperparameters are expected:
      * head: A `Head` instance.
      * learner_config: A config for the learner.
      * feature_columns: An iterable containing all the feature columns used by
          the model.
      * examples_per_layer: Number of examples to accumulate before growing a
          layer. It can also be a function that computes the number of examples
          based on the depth of the layer that's being built.
      * weight_column_name: The name of weight column.
      * center_bias: Whether a separate tree should be created for first fitting
          the bias.
    config: `RunConfig` of the estimator.
    output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
      (new interface).

  Returns:
    A `ModelFnOps` object.
  Raises:
    ValueError: if inputs are not valid.
  """
    head = params["head"]
    learner_config = params["learner_config"]
    examples_per_layer = params["examples_per_layer"]
    feature_columns = params["feature_columns"]
    weight_column_name = params["weight_column_name"]
    num_trees = params["num_trees"]
    use_core_libs = params["use_core_libs"]
    logits_modifier_function = params["logits_modifier_function"]
    output_leaf_index = params["output_leaf_index"]

    if features is None:
        raise ValueError("At least one feature must be specified.")

    if config is None:
        raise ValueError("Missing estimator RunConfig.")

    center_bias = params["center_bias"]

    if isinstance(features, ops.Tensor):
        features = {features.name: features}

    # Make a shallow copy of features to ensure downstream usage
    # is unaffected by modifications in the model function.
    training_features = copy.copy(features)
    training_features.pop(weight_column_name, None)
    global_step = training_util.get_global_step()
    with ops.device(global_step.device):
        ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=0,
            tree_ensemble_config="",  # Initialize an empty ensemble.
            name="ensemble_model")

    # Create GBDT model.
    gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
        is_chief=config.is_chief,
        num_ps_replicas=config.num_ps_replicas,
        ensemble_handle=ensemble_handle,
        center_bias=center_bias,
        examples_per_layer=examples_per_layer,
        learner_config=learner_config,
        feature_columns=feature_columns,
        logits_dimension=head.logits_dimension,
        features=training_features,
        use_core_columns=use_core_libs,
        output_leaf_index=output_leaf_index)
    with ops.name_scope("gbdt", "gbdt_optimizer"):
        predictions_dict = gbdt_model.predict(mode)
        logits = predictions_dict["predictions"]
        if logits_modifier_function:
            logits = logits_modifier_function(logits, features, mode)

        def _train_op_fn(loss):
            """Returns the op to optimize the loss."""
            update_op = gbdt_model.train(loss, predictions_dict, labels)
            with ops.control_dependencies(
                [update_op]), (ops.colocate_with(global_step)):
                update_op = state_ops.assign_add(global_step, 1).op
                return update_op

    create_estimator_spec_op = getattr(head, "create_estimator_spec", None)

    training_hooks = []
    if num_trees:
        if center_bias:
            num_trees += 1

        finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor(
        )
        training_hooks.append(
            trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
                                          finalized_trees))

    if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
        if use_core_libs and callable(create_estimator_spec_op):
            model_fn_ops = head.create_estimator_spec(features=features,
                                                      mode=mode,
                                                      labels=labels,
                                                      train_op_fn=_train_op_fn,
                                                      logits=logits)
            model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
                model_fn_ops)
        else:
            model_fn_ops = head.create_model_fn_ops(features=features,
                                                    mode=mode,
                                                    labels=labels,
                                                    train_op_fn=_train_op_fn,
                                                    logits=logits)

        if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
            model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
                gbdt_batch.LEAF_INDEX]

        model_fn_ops.training_hooks.extend(training_hooks)
        return model_fn_ops
    elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
        assert callable(create_estimator_spec_op)
        estimator_spec = head.create_estimator_spec(features=features,
                                                    mode=mode,
                                                    labels=labels,
                                                    train_op_fn=_train_op_fn,
                                                    logits=logits)

        estimator_spec = estimator_spec._replace(
            training_hooks=training_hooks +
            list(estimator_spec.training_hooks))
        return estimator_spec

    return model_fn_ops
  def testWithExistingEnsembleAndDropout(self):
    with self.test_session():
      tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Add 10 trees with some weights.
      for i in range(0, 10):
        tree = tree_ensemble.trees.add()
        _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
        tree_ensemble.tree_weights.append(i + 1)
        meta = tree_ensemble.tree_metadata.add()
        meta.num_tree_weight_updates = 1
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble.SerializeToString(),
          name="existing")
      # Create non-zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=np.array([2, 3], np.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=np.array([0.0, 0.3], np.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()

      dropped = [1, 6, 8]
      dropped_original_weights = [2.0, 7.0, 9.0]

      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [1, 2],
              feature_gains, [0.5, 0.3], [dropped, dropped_original_weights],
              learning_rate=0.1)
      ]):
        output_ensemble.ParseFromString(
            model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())

      # Output.
      self.assertEqual(11, len(output_ensemble.trees))
      self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[10])
      self.assertAllClose(4.5, output_ensemble.tree_weights[10])

      self.assertAllClose([1., 1.5, 3., 4., 5., 6., 5.25, 8., 6.75, 10., 4.5],
                          output_ensemble.tree_weights)

      self.assertEqual(1,
                       output_ensemble.tree_metadata[0].num_tree_weight_updates)
      self.assertEqual(2,
                       output_ensemble.tree_metadata[1].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[2].num_tree_weight_updates)

      self.assertEqual(1,
                       output_ensemble.tree_metadata[3].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[4].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[5].num_tree_weight_updates)
      self.assertEqual(2,
                       output_ensemble.tree_metadata[6].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[7].num_tree_weight_updates)
      self.assertEqual(2,
                       output_ensemble.tree_metadata[8].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[9].num_tree_weight_updates)
      self.assertEqual(
          1, output_ensemble.tree_metadata[10].num_tree_weight_updates)
      self.assertAllEqual([3, 5], feature_usage_counts.eval())
      self.assertArrayNear([0.05, 0.33], feature_gains.eval(), 1e-6)